From 129328e842833f5c2189883bfc2955e5b53e2bbe Mon Sep 17 00:00:00 2001 From: Nicolas Corrarello Date: Wed, 20 Sep 2017 15:59:35 -0500 Subject: [PATCH 01/52] MVP of working Nomad Secret Backend --- builtin/logical/nomad/backend.go | 37 ++++++ builtin/logical/nomad/client.go | 28 +++++ builtin/logical/nomad/path_config.go | 104 ++++++++++++++++ builtin/logical/nomad/path_roles.go | 163 ++++++++++++++++++++++++++ builtin/logical/nomad/path_token.go | 80 +++++++++++++ builtin/logical/nomad/secret_token.go | 59 ++++++++++ cli/commands.go | 2 + 7 files changed, 473 insertions(+) create mode 100644 builtin/logical/nomad/backend.go create mode 100644 builtin/logical/nomad/client.go create mode 100644 builtin/logical/nomad/path_config.go create mode 100644 builtin/logical/nomad/path_roles.go create mode 100644 builtin/logical/nomad/path_token.go create mode 100644 builtin/logical/nomad/secret_token.go diff --git a/builtin/logical/nomad/backend.go b/builtin/logical/nomad/backend.go new file mode 100644 index 000000000..99386d095 --- /dev/null +++ b/builtin/logical/nomad/backend.go @@ -0,0 +1,37 @@ +package nomad + +import ( + "github.com/hashicorp/vault/logical" + "github.com/hashicorp/vault/logical/framework" +) + +func Factory(conf *logical.BackendConfig) (logical.Backend, error) { + b := Backend() + if err := b.Setup(conf); err != nil { + return nil, err + } + return b, nil +} + +func Backend() *backend { + var b backend + b.Backend = &framework.Backend{ + Paths: []*framework.Path{ + pathConfigAccess(), + pathListRoles(&b), + pathRoles(), + pathToken(&b), + }, + + Secrets: []*framework.Secret{ + secretToken(&b), + }, + BackendType: logical.TypeLogical, + } + + return &b +} + +type backend struct { + *framework.Backend +} diff --git a/builtin/logical/nomad/client.go b/builtin/logical/nomad/client.go new file mode 100644 index 000000000..2101d31dc --- /dev/null +++ b/builtin/logical/nomad/client.go @@ -0,0 +1,28 @@ +package nomad + +import ( + "fmt" + + "github.com/hashicorp/nomad/api" + "github.com/hashicorp/vault/logical" +) + +func client(s logical.Storage) (*api.Client, error, error) { + conf, userErr, intErr := readConfigAccess(s) + if intErr != nil { + return nil, nil, intErr + } + if userErr != nil { + return nil, userErr, nil + } + if conf == nil { + return nil, nil, fmt.Errorf("no error received but no configuration found") + } + + nomadConf := api.DefaultConfig() + nomadConf.Address = conf.Address + nomadConf.SecretID = conf.Token + + client, err := api.NewClient(nomadConf) + return client, nil, err +} diff --git a/builtin/logical/nomad/path_config.go b/builtin/logical/nomad/path_config.go new file mode 100644 index 000000000..d9e6dc128 --- /dev/null +++ b/builtin/logical/nomad/path_config.go @@ -0,0 +1,104 @@ +package nomad + +import ( + "fmt" + + "github.com/hashicorp/vault/logical" + "github.com/hashicorp/vault/logical/framework" +) + +func pathConfigAccess() *framework.Path { + return &framework.Path{ + Pattern: "config/access", + Fields: map[string]*framework.FieldSchema{ + "address": &framework.FieldSchema{ + Type: framework.TypeString, + Description: "Nomad server address", + }, + + "scheme": &framework.FieldSchema{ + Type: framework.TypeString, + Description: "URI scheme for the Nomad address", + + // https would be a better default but Consul on its own + // defaults to HTTP access, and when HTTPS is enabled it + // disables HTTP, so there isn't really any harm done here. + Default: "http", + }, + + "token": &framework.FieldSchema{ + Type: framework.TypeString, + Description: "Token for API calls", + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: pathConfigAccessRead, + logical.UpdateOperation: pathConfigAccessWrite, + }, + } +} + +func readConfigAccess(storage logical.Storage) (*accessConfig, error, error) { + entry, err := storage.Get("config/access") + if err != nil { + return nil, nil, err + } + if entry == nil { + return nil, fmt.Errorf( + "Access credentials for the backend itself haven't been configured. Please configure them at the '/config/access' endpoint"), + nil + } + + conf := &accessConfig{} + if err := entry.DecodeJSON(conf); err != nil { + return nil, nil, fmt.Errorf("error reading nomad access configuration: %s", err) + } + + return conf, nil, nil +} + +func pathConfigAccessRead( + req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + conf, userErr, intErr := readConfigAccess(req.Storage) + if intErr != nil { + return nil, intErr + } + if userErr != nil { + return logical.ErrorResponse(userErr.Error()), nil + } + if conf == nil { + return nil, fmt.Errorf("no user error reported but nomad access configuration not found") + } + + return &logical.Response{ + Data: map[string]interface{}{ + "address": conf.Address, + "scheme": conf.Scheme, + }, + }, nil +} + +func pathConfigAccessWrite( + req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + entry, err := logical.StorageEntryJSON("config/access", accessConfig{ + Address: data.Get("address").(string), + Scheme: data.Get("scheme").(string), + Token: data.Get("token").(string), + }) + if err != nil { + return nil, err + } + + if err := req.Storage.Put(entry); err != nil { + return nil, err + } + + return nil, nil +} + +type accessConfig struct { + Address string `json:"address"` + Scheme string `json:"scheme"` + Token string `json:"token"` +} diff --git a/builtin/logical/nomad/path_roles.go b/builtin/logical/nomad/path_roles.go new file mode 100644 index 000000000..69846fb73 --- /dev/null +++ b/builtin/logical/nomad/path_roles.go @@ -0,0 +1,163 @@ +package nomad + +import ( + "fmt" + "time" + + "github.com/hashicorp/vault/logical" + "github.com/hashicorp/vault/logical/framework" +) + +func pathListRoles(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "roles/?$", + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ListOperation: b.pathRoleList, + }, + } +} + +func pathRoles() *framework.Path { + return &framework.Path{ + Pattern: "roles/" + framework.GenericNameRegex("name"), + Fields: map[string]*framework.FieldSchema{ + "name": &framework.FieldSchema{ + Type: framework.TypeString, + Description: "Name of the role", + }, + + "policy": &framework.FieldSchema{ + Type: framework.TypeString, + Description: "Policy name as previously created in Nomad. Required", + }, + + "token_type": &framework.FieldSchema{ + Type: framework.TypeString, + Default: "client", + Description: `Which type of token to create: 'client' +or 'management'. If a 'management' token, +the "policy" parameter is not required. +Defaults to 'client'.`, + }, + + "lease": &framework.FieldSchema{ + Type: framework.TypeString, + Description: "Lease time of the role.", + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: pathRolesRead, + logical.UpdateOperation: pathRolesWrite, + logical.DeleteOperation: pathRolesDelete, + }, + } +} + +func (b *backend) pathRoleList( + req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + entries, err := req.Storage.List("policy/") + if err != nil { + return nil, err + } + + return logical.ListResponse(entries), nil +} + +func pathRolesRead( + req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + name := d.Get("name").(string) + + entry, err := req.Storage.Get("policy/" + name) + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + + var result roleConfig + if err := entry.DecodeJSON(&result); err != nil { + return nil, err + } + + if result.TokenType == "" { + result.TokenType = "client" + } + + // Generate the response + resp := &logical.Response{ + Data: map[string]interface{}{ + "lease": result.Lease.String(), + "token_type": result.TokenType, + }, + } + if result.Policy != "" { + resp.Data["policy"] = result.Policy + } + return resp, nil +} + +func pathRolesWrite( + req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + tokenType := d.Get("token_type").(string) + + switch tokenType { + case "client": + case "management": + default: + return logical.ErrorResponse( + "token_type must be \"client\" or \"management\""), nil + } + + name := d.Get("name").(string) + policy := d.Get("policy").(string) + var err error + if tokenType != "management" { + if policy == "" { + return logical.ErrorResponse( + "policy cannot be empty when not using management tokens"), nil + } + } + + var lease time.Duration + leaseParam := d.Get("lease").(string) + if leaseParam != "" { + lease, err = time.ParseDuration(leaseParam) + if err != nil { + return logical.ErrorResponse(fmt.Sprintf( + "error parsing given lease of %s: %s", leaseParam, err)), nil + } + } + + entry, err := logical.StorageEntryJSON("policy/"+name, roleConfig{ + Policy: policy, + Lease: lease, + TokenType: tokenType, + }) + if err != nil { + return nil, err + } + + if err := req.Storage.Put(entry); err != nil { + return nil, err + } + + return nil, nil +} + +func pathRolesDelete( + req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + name := d.Get("name").(string) + if err := req.Storage.Delete("policy/" + name); err != nil { + return nil, err + } + return nil, nil +} + +type roleConfig struct { + Policy string `json:"policy"` + Lease time.Duration `json:"lease"` + TokenType string `json:"token_type"` +} diff --git a/builtin/logical/nomad/path_token.go b/builtin/logical/nomad/path_token.go new file mode 100644 index 000000000..e83d700b2 --- /dev/null +++ b/builtin/logical/nomad/path_token.go @@ -0,0 +1,80 @@ +package nomad + +import ( + "fmt" + "time" + + "github.com/hashicorp/nomad/api" + "github.com/hashicorp/vault/logical" + "github.com/hashicorp/vault/logical/framework" +) + +func pathToken(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "creds/" + framework.GenericNameRegex("name"), + Fields: map[string]*framework.FieldSchema{ + "name": &framework.FieldSchema{ + Type: framework.TypeString, + Description: "Name of the role", + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.pathTokenRead, + }, + } +} + +func (b *backend) pathTokenRead( + req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + name := d.Get("name").(string) + + entry, err := req.Storage.Get("policy/" + name) + if err != nil { + return nil, fmt.Errorf("error retrieving role: %s", err) + } + if entry == nil { + return logical.ErrorResponse(fmt.Sprintf("Role '%s' not found", name)), nil + } + + var result roleConfig + if err := entry.DecodeJSON(&result); err != nil { + return nil, err + } + + if result.TokenType == "" { + result.TokenType = "client" + } + + // Get the nomad client + c, userErr, intErr := client(req.Storage) + if intErr != nil { + return nil, intErr + } + if userErr != nil { + return logical.ErrorResponse(userErr.Error()), nil + } + + // Generate a name for the token + tokenName := fmt.Sprintf("Vault %s %s %d", name, req.DisplayName, time.Now().UnixNano()) + + // Create it + token, _, err := c.ACLTokens().Create(&api.ACLToken{ + Name: tokenName, + Type: result.TokenType, + Policies: []string{result.Policy}, + }, nil) + if err != nil { + return logical.ErrorResponse(err.Error()), nil + } + + // Use the helper to create the secret + s := b.Secret(SecretTokenType).Response(map[string]interface{}{ + "token": token, + }, map[string]interface{}{ + "token": token, + }) + s.Secret.TTL = result.Lease + + return s, nil +} diff --git a/builtin/logical/nomad/secret_token.go b/builtin/logical/nomad/secret_token.go new file mode 100644 index 000000000..4bca29051 --- /dev/null +++ b/builtin/logical/nomad/secret_token.go @@ -0,0 +1,59 @@ +package nomad + +import ( + "github.com/hashicorp/vault/logical" + "github.com/hashicorp/vault/logical/framework" +) + +const ( + SecretTokenType = "token" +) + +func secretToken(b *backend) *framework.Secret { + return &framework.Secret{ + Type: SecretTokenType, + Fields: map[string]*framework.FieldSchema{ + "token": &framework.FieldSchema{ + Type: framework.TypeString, + Description: "Request token", + }, + }, + + Renew: b.secretTokenRenew, + Revoke: secretTokenRevoke, + } +} + +func (b *backend) secretTokenRenew( + req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + + return framework.LeaseExtend(0, 0, b.System())(req, d) +} + +func secretTokenRevoke( + req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + c, userErr, intErr := client(req.Storage) + if intErr != nil { + return nil, intErr + } + if userErr != nil { + // Returning logical.ErrorResponse from revocation function is risky + return nil, userErr + } + + tokenRaw, ok := req.Secret.InternalData["token"] + if !ok { + // We return nil here because this is a pre-0.5.3 problem and there is + // nothing we can do about it. We already can't revoke the lease + // properly if it has been renewed and this is documented pre-0.5.3 + // behavior with a security bulletin about it. + return nil, nil + } + + _, err := c.ACLTokens().Delete(tokenRaw.(string), nil) + if err != nil { + return nil, err + } + + return nil, nil +} diff --git a/cli/commands.go b/cli/commands.go index 22c8640a9..83e50b6ed 100644 --- a/cli/commands.go +++ b/cli/commands.go @@ -45,6 +45,7 @@ import ( "github.com/hashicorp/vault/builtin/logical/mongodb" "github.com/hashicorp/vault/builtin/logical/mssql" "github.com/hashicorp/vault/builtin/logical/mysql" + "github.com/hashicorp/vault/builtin/logical/nomad" "github.com/hashicorp/vault/builtin/logical/pki" "github.com/hashicorp/vault/builtin/logical/postgresql" "github.com/hashicorp/vault/builtin/logical/rabbitmq" @@ -107,6 +108,7 @@ func Commands(metaPtr *meta.Meta) map[string]cli.CommandFactory { LogicalBackends: map[string]logical.Factory{ "aws": aws.Factory, "consul": consul.Factory, + "nomad": nomad.Factory, "postgresql": postgresql.Factory, "cassandra": cassandra.Factory, "pki": pki.Factory, From 420b46fa08323ed63365243c61c99b511b23d70a Mon Sep 17 00:00:00 2001 From: Nicolas Corrarello Date: Wed, 20 Sep 2017 17:14:35 -0500 Subject: [PATCH 02/52] Fixing data model --- builtin/logical/nomad/path_roles.go | 10 +++++----- builtin/logical/nomad/path_token.go | 8 +++++--- builtin/logical/nomad/secret_token.go | 2 +- 3 files changed, 11 insertions(+), 9 deletions(-) diff --git a/builtin/logical/nomad/path_roles.go b/builtin/logical/nomad/path_roles.go index 69846fb73..79836c0ac 100644 --- a/builtin/logical/nomad/path_roles.go +++ b/builtin/logical/nomad/path_roles.go @@ -28,7 +28,7 @@ func pathRoles() *framework.Path { }, "policy": &framework.FieldSchema{ - Type: framework.TypeString, + Type: framework.TypeCommaStringSlice, Description: "Policy name as previously created in Nomad. Required", }, @@ -93,7 +93,7 @@ func pathRolesRead( "token_type": result.TokenType, }, } - if result.Policy != "" { + if len(result.Policy) != 0 { resp.Data["policy"] = result.Policy } return resp, nil @@ -112,10 +112,10 @@ func pathRolesWrite( } name := d.Get("name").(string) - policy := d.Get("policy").(string) + policy := d.Get("policy").([]string) var err error if tokenType != "management" { - if policy == "" { + if len(policy) == 0 { return logical.ErrorResponse( "policy cannot be empty when not using management tokens"), nil } @@ -157,7 +157,7 @@ func pathRolesDelete( } type roleConfig struct { - Policy string `json:"policy"` + Policy []string `json:"policy"` Lease time.Duration `json:"lease"` TokenType string `json:"token_type"` } diff --git a/builtin/logical/nomad/path_token.go b/builtin/logical/nomad/path_token.go index e83d700b2..f837df412 100644 --- a/builtin/logical/nomad/path_token.go +++ b/builtin/logical/nomad/path_token.go @@ -62,7 +62,7 @@ func (b *backend) pathTokenRead( token, _, err := c.ACLTokens().Create(&api.ACLToken{ Name: tokenName, Type: result.TokenType, - Policies: []string{result.Policy}, + Policies: result.Policy, }, nil) if err != nil { return logical.ErrorResponse(err.Error()), nil @@ -70,9 +70,11 @@ func (b *backend) pathTokenRead( // Use the helper to create the secret s := b.Secret(SecretTokenType).Response(map[string]interface{}{ - "token": token, + "secret_id": token.SecretID, + "accessor_id": token.AccessorID, }, map[string]interface{}{ - "token": token, + "secret_id": token.SecretID, + "accessor_id": token.AccessorID, }) s.Secret.TTL = result.Lease diff --git a/builtin/logical/nomad/secret_token.go b/builtin/logical/nomad/secret_token.go index 4bca29051..ad02b16fe 100644 --- a/builtin/logical/nomad/secret_token.go +++ b/builtin/logical/nomad/secret_token.go @@ -41,7 +41,7 @@ func secretTokenRevoke( return nil, userErr } - tokenRaw, ok := req.Secret.InternalData["token"] + tokenRaw, ok := req.Secret.InternalData["accessor_id"] if !ok { // We return nil here because this is a pre-0.5.3 problem and there is // nothing we can do about it. We already can't revoke the lease From 5178e5f5f2461a3814b85d96f7a10443b8d8072b Mon Sep 17 00:00:00 2001 From: Nicolas Corrarello Date: Wed, 20 Sep 2017 17:31:28 -0500 Subject: [PATCH 03/52] Adding Nomad secret backend documentation --- .../source/docs/secrets/nomad/index.html.md | 109 ++++++++++++++++++ 1 file changed, 109 insertions(+) create mode 100644 website/source/docs/secrets/nomad/index.html.md diff --git a/website/source/docs/secrets/nomad/index.html.md b/website/source/docs/secrets/nomad/index.html.md new file mode 100644 index 000000000..d5b87d107 --- /dev/null +++ b/website/source/docs/secrets/nomad/index.html.md @@ -0,0 +1,109 @@ +--- +layout: "docs" +page_title: "Nomad Secret Backend" +sidebar_current: "docs-secrets-nomad" +description: |- + The Nomad secret backend for Vault generates tokens for Nomad dynamically. +--- + +# Nomad Secret Backend + +Name: `Nomad` + +The Nomad secret backend for Vault generates +[Nomad](https://www.nomadproject.io) +API tokens dynamically based on pre-existing Nomad ACL policies. + +This page will show a quick start for this backend. For detailed documentation +on every path, use `vault path-help` after mounting the backend. + +~> **Version information** ACLs are only available on Nomad 0.7.0 and above, +which is currently in beta. + +## Quick Start + +The first step to using the vault backend is to mount it. +Unlike the `generic` backend, the `nomad` backend is not mounted by default. + +``` +$ vault mount nomad +Successfully mounted 'nomad' at 'nomad'! +``` +For a quick start, you can use the SecretID token provided by the [Nomad ACL bootstrap +process](https://www.nomadproject.io/guides/acl.html#generate-the-initial-token), although this +is discouraged for production deployments. +``` +$ nomad acl bootstrap +Accessor ID = 95a0ee55-eaa6-2c0a-a900-ed94c156754e +Secret ID = c25b6ca0-ea4e-000f-807a-fd03fcab6e3c +Name = Bootstrap Token +Type = management +Global = true +Policies = n/a +Create Time = 2017-09-20 19:40:36.527512364 +0000 UTC +Create Index = 7 +Modify Index = 7 +``` +The suggested pattern is to generate a token specifically for Vault, following the +[Nomad ACL guide](https://www.consul.io/docs/agent/http/acl.html) + +Next, we must configure Vault to know how to contact Nomad. +This is done by writing the access information: + +``` +$ vault write nomad/config/access \ + address=http://127.0.0.1:4646 \ + token=adf4238a-882b-9ddc-4a9d-5b6758e4159e +Success! Data written to: nomad/config/access +``` + +In this case, we've configured Vault to connect to Nomad +on the default port with the loopback address. We've also provided +an ACL token to use with the `token` parameter. Vault must have a management +type token so that it can create and revoke ACL tokens. + +The next step is to configure a role. A role is a logical name that maps +to a set of policy names used to generate those credentials. For example, lets create +an "monitoring" role that maps to a "readonly" policy: + +``` +$ vault write nomad/roles/monitoring policy=readonly +Success! Data written to: nomad/roles/monitoring +``` + +The backend expects either a single or a comma separated list of policy names. + +To generate a new Nomad ACL token, we simply read from that role: + +``` +$ vault read nomad/creds/monitoring +Key Value +--- ----- +lease_id nomad/creds/monitoring/78ec3ef3-c806-1022-4aa8-1dbae39c760c +lease_duration 768h0m0s +lease_renewable true +accessor_id a715994d-f5fd-1194-73df-ae9dad616307 +secret_id b31fb56c-0936-5428-8c5f-ed010431aba9 +``` + +Here we can see that Vault has generated a new Nomad ACL token for us. +We can test this token out, by reading it in Nomad (by it's accesor): + +``` +$ nomad acl token info a715994d-f5fd-1194-73df-ae9dad616307 +Accessor ID = a715994d-f5fd-1194-73df-ae9dad616307 +Secret ID = b31fb56c-0936-5428-8c5f-ed010431aba9 +Name = Vault example root 1505945527022465593 +Type = client +Global = false +Policies = [readonly] +Create Time = 2017-09-20 22:12:07.023455379 +0000 UTC +Create Index = 138 +Modify Index = 138 +``` + +## API + +The Nomad secret backend has a full HTTP API. Please see the +[Nomad secret backend API](/api/secret/nomad/index.html) for more +details. From 2b4561dccb17ef97edbe8c1aa877f644ee0a4990 Mon Sep 17 00:00:00 2001 From: Nicolas Corrarello Date: Thu, 21 Sep 2017 09:18:35 -0500 Subject: [PATCH 04/52] Adding Nomad Secret Backend API documentation --- website/source/api/secret/nomad/index.html.md | 239 ++++++++++++++++++ 1 file changed, 239 insertions(+) create mode 100644 website/source/api/secret/nomad/index.html.md diff --git a/website/source/api/secret/nomad/index.html.md b/website/source/api/secret/nomad/index.html.md new file mode 100644 index 000000000..236e3e7e8 --- /dev/null +++ b/website/source/api/secret/nomad/index.html.md @@ -0,0 +1,239 @@ +--- +layout: "api" +page_title: "Nomad Secret Backend - HTTP API" +sidebar_current: "docs-http-secret-nomad" +description: |- + This is the API documentation for the Vault Nomad secret backend. +--- + +# Nomad Secret Backend HTTP API + +This is the API documentation for the Vault Nomad secret backend. For general +information about the usage and operation of the Nomad backend, please see the +[Vault Nomad backend documentation](/docs/secrets/nomad/index.html). + +This documentation assumes the Nomad backend is mounted at the `/nomad` path +in Vault. Since it is possible to mount secret backends at any location, please +update your API calls accordingly. + +## Configure Access + +This endpoint configures the access information for Nomad. This access +information is used so that Vault can communicate with Nomad and generate +Nomad tokens. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/nomad/config/access` | `204 (empty body)` | + +### Parameters + +- `address` `(string: )` – Specifies the address of the Nomad + instance, provided as `"protocol://host:port"` like `"http://127.0.0.1:4646"`. + +- `token` `(string: )` – Specifies the Nomad Management token to use. + +### Sample Payload + +```json +{ + "address": "http://127.0.0.1:4646", + "token": "adha..." +} +``` + +### Sample Request + +``` +$ curl \ + --request POST \ + --header "X-Vault-Token: ..." \ + --data @payload.json \ + https://vault.rocks/v1/nomad/config/access +``` + +## Create/Update Role + +This endpoint creates or updates the Nomad role definition in Vault. If the role does not exist, it will be created. If the role already exists, it will receive +updated attributes. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/nomad/roles/:name` | `204 (empty body)` | + +### Parameters + +- `name` `(string: )` – Specifies the name of an existing role against + which to create this Nomad tokens. This is part of the request URL. + +- `lease` `(string: "")` – Specifies the lease for this role. This is provided + as a string duration with a time suffix like `"30s"` or `"1h"`. If not + provided, the default Vault lease is used. + +- `policy` `(string: )` – Comma separated list of Nomad policies the token is going to be created against. These need to be created beforehand in Nomad. + +- `token_type` `(string: "client")` - Specifies the type of token to create when + using this role. Valid values are `"client"` or `"management"`. + +### Sample Payload + +To create a client token with a custom policy: + +```json +{ + "policy": "readonly" +} +``` + +### Sample Request + +``` +$ curl \ + --request POST \ + --header "X-Vault-Token: ..." \ + --data @payload.json \ + https://vault.rocks/v1/nomad/roles/monitoring +``` + +## Read Role + +This endpoint queries for information about a Nomad role with the given name. +If no role exists with that name, a 404 is returned. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `GET` | `/nomad/roles/:name` | `200 application/json` | + +### Parameters + +- `name` `(string: )` – Specifies the name of the role to query. This + is part of the request URL. + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + https://vault.rocks/v1/nomad/roles/monitoring +``` + +### Sample Response + +```json +{ + "auth": null, + "data": { + "lease": "0s", + "policy": [ + "example" + ], + "token_type": "client" + }, + "lease_duration": 0, + "lease_id": "", + "renewable": false, + "request_id": "f4c7ee18-72aa-3b20-a910-93b6274a9dc0", + "warnings": null, + "wrap_info": null +} +``` + +## List Roles + +This endpoint lists all existing roles in the backend. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `LIST` | `/nomad/roles` | `200 application/json` | +| `GET` | `/nomad/roles?list=true` | `200 application/json` | + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request LIST \ + https://vault.rocks/v1/nomad/roles +``` + +### Sample Response + +```json +{ + "auth": null, + "data": { + "keys": [ + "example" + ] + }, + "lease_duration": 0, + "lease_id": "", + "renewable": false, + "request_id": "d7bb167b-81c5-9606-c214-b34fcda45634", + "warnings": null, + "wrap_info": null +} +``` + +## Delete Role + +This endpoint deletes a Nomad role with the given name. Even if the role does +not exist, this endpoint will still return a successful response. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `DELETE` | `/nomad/roles/:name` | `204 (empty body)` | + +### Parameters + +- `name` `(string: )` – Specifies the name of the role to delete. This + is part of the request URL. + +### Sample Request + +``` +$ curl \ + --request DELETE \ + --header "X-Vault-Token: ..." \ + https://vault.rocks/v1/nomad/roles/example-role +``` + +## Generate Credential + +This endpoint generates a dynamic Nomad token based on the given role +definition. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `GET` | `/nomad/creds/:name` | `200 application/json` | + +### Parameters + +- `name` `(string: )` – Specifies the name of an existing role against + which to create this Nomad token. This is part of the request URL. + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + https://vault.rocks/v1/nomad/creds/example +``` + +### Sample Response + +```json +{ + "auth": null, + "data": { + "accessor_id": "c834ba40-8d84-b0c1-c084-3a31d3383c03", + "secret_id": "65af6f07-7f57-bb24-cdae-a27f86a894ce" + }, + "lease_duration": 2764800, + "lease_id": "nomad/creds/example/c2686da3-2431-b6d6-7bbf-c5b9496dd6d7", + "renewable": true, + "request_id": "37a06ca1-8a1d-7f17-bda8-4661289c392b", + "warnings": null, + "wrap_info": null +} +``` From ec972939c238652190cf1c7bc60afcdbe0358e7e Mon Sep 17 00:00:00 2001 From: Nicolas Corrarello Date: Thu, 28 Sep 2017 21:44:30 +0100 Subject: [PATCH 05/52] Added tests --- builtin/logical/nomad/backend_test.go | 225 ++++++++++++++++++++++++++ 1 file changed, 225 insertions(+) create mode 100644 builtin/logical/nomad/backend_test.go diff --git a/builtin/logical/nomad/backend_test.go b/builtin/logical/nomad/backend_test.go new file mode 100644 index 000000000..a234b3b39 --- /dev/null +++ b/builtin/logical/nomad/backend_test.go @@ -0,0 +1,225 @@ +package nomad + +import ( + "fmt" + "log" + "os" + "reflect" + "testing" + "time" + + nomadapi "github.com/hashicorp/nomad/api" + "github.com/hashicorp/vault/logical" + "github.com/mitchellh/mapstructure" + dockertest "gopkg.in/ory-am/dockertest.v3" +) + +func prepareTestContainer(t *testing.T) (cleanup func(), retAddress string, nomadToken string) { + nomadToken = os.Getenv("NOMAD_TOKEN") + + retAddress = os.Getenv("NOMAD_ADDR") + + if retAddress != "" { + return func() {}, retAddress, nomadToken + } + + pool, err := dockertest.NewPool("") + if err != nil { + t.Fatalf("Failed to connect to docker: %s", err) + } + + dockerOptions := &dockertest.RunOptions{ + Repository: "djenriquez/nomad", + Tag: "v0.7.0-beta1", + Cmd: []string{"agent", "-dev"}, + } + resource, err := pool.RunWithOptions(dockerOptions) + if err != nil { + t.Fatalf("Could not start local Nomad docker container: %s", err) + } + + cleanup = func() { + err := pool.Purge(resource) + if err != nil { + t.Fatalf("Failed to cleanup local container: %s", err) + } + } + + retAddress = fmt.Sprintf("http://localhost:%s/", resource.GetPort("4646/tcp")) + + // exponential backoff-retry + if err = pool.Retry(func() error { + var err error + nomadapiConfig := nomadapi.DefaultConfig() + nomadapiConfig.Address = retAddress + nomad, err := nomadapi.NewClient(nomadapiConfig) + if err != nil { + return err + } + aclbootstrap, _, err := nomad.ACLTokens().Bootstrap(nil) + nomadToken = aclbootstrap.SecretID + policy := &nomadapi.ACLPolicy{ + Name: "test", + Description: "test", + Rules: `namespace "default" { + policy = "read" + } + `, + } + _, err = nomad.ACLPolicies().Upsert(policy, nil) + if err != nil { + t.Fatal(err) + } + return err + }); err != nil { + cleanup() + t.Fatalf("Could not connect to docker: %s", err) + } + return cleanup, retAddress, nomadToken +} + +func TestBackend_config_access(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(config) + if err != nil { + t.Fatal(err) + } + + cleanup, connURL, connToken := prepareTestContainer(t) + defer cleanup() + + connData := map[string]interface{}{ + "address": connURL, + "token": connToken, + } + + confReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/access", + Storage: config.StorageView, + Data: connData, + } + + resp, err := b.HandleRequest(confReq) + if err != nil || (resp != nil && resp.IsError()) || resp != nil { + t.Fatalf("failed to write configuration: resp:%#v err:%s", resp, err) + } + + confReq.Operation = logical.ReadOperation + resp, err = b.HandleRequest(confReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("failed to write configuration: resp:%#v err:%s", resp, err) + } + + expected := map[string]interface{}{ + "address": connData["address"].(string), + "scheme": "http", + } + if !reflect.DeepEqual(expected, resp.Data) { + t.Fatalf("bad: expected:%#v\nactual:%#v\n", expected, resp.Data) + } + if resp.Data["token"] != nil { + t.Fatalf("token should not be set in the response") + } +} + +func TestBackend_renew_revoke(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(config) + if err != nil { + t.Fatal(err) + } + + cleanup, connURL, connToken := prepareTestContainer(t) + defer cleanup() + + connData := map[string]interface{}{ + "address": connURL, + "token": connToken, + } + + req := &logical.Request{ + Storage: config.StorageView, + Operation: logical.UpdateOperation, + Path: "config/access", + Data: connData, + } + resp, err := b.HandleRequest(req) + if err != nil { + t.Fatal(err) + } + + req.Path = "roles/test" + req.Data = map[string]interface{}{ + "policy": []string{"policy"}, + "lease": "6h", + } + resp, err = b.HandleRequest(req) + if err != nil { + t.Fatal(err) + } + + req.Operation = logical.ReadOperation + req.Path = "creds/test" + resp, err = b.HandleRequest(req) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("resp nil") + } + if resp.IsError() { + t.Fatalf("resp is error: %v", resp.Error()) + } + + generatedSecret := resp.Secret + generatedSecret.IssueTime = time.Now() + generatedSecret.TTL = 6 * time.Hour + + var d struct { + Token string `mapstructure:"SecretID"` + } + if err := mapstructure.Decode(resp.Data, &d); err != nil { + t.Fatal(err) + } + log.Printf("[WARN] Generated token: %s", d.Token) + + // Build a client and verify that the credentials work + nomadapiConfig := nomadapi.DefaultConfig() + nomadapiConfig.Address = connData["address"].(string) + nomadapiConfig.SecretID = d.Token + client, err := nomadapi.NewClient(nomadapiConfig) + if err != nil { + t.Fatal(err) + } + + log.Printf("[WARN] Verifying that the generated token works...") + _, err = client.Status().Leader, nil + if err != nil { + t.Fatal(err) + } + + req.Operation = logical.RenewOperation + req.Secret = generatedSecret + resp, err = b.HandleRequest(req) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("got nil response from renew") + } + + req.Operation = logical.RevokeOperation + resp, err = b.HandleRequest(req) + if err != nil { + t.Fatal(err) + } + + log.Printf("[WARN] Verifying that the generated token does not work...") + _, err = client.Status().Leader, nil + if err == nil { + t.Fatal("expected error") + } +} From 9a011781eca779544c4a013d91223c32eb34d71f Mon Sep 17 00:00:00 2001 From: Nicolas Corrarello Date: Thu, 28 Sep 2017 23:57:48 +0100 Subject: [PATCH 06/52] Adding Global tokens to the data model --- builtin/logical/nomad/path_roles.go | 9 +++++++++ builtin/logical/nomad/path_token.go | 1 + 2 files changed, 10 insertions(+) diff --git a/builtin/logical/nomad/path_roles.go b/builtin/logical/nomad/path_roles.go index 79836c0ac..53fb7119d 100644 --- a/builtin/logical/nomad/path_roles.go +++ b/builtin/logical/nomad/path_roles.go @@ -32,6 +32,11 @@ func pathRoles() *framework.Path { Description: "Policy name as previously created in Nomad. Required", }, + "global": &framework.FieldSchema{ + Type: framework.TypeBool, + Description: "Policy name as previously created in Nomad. Required", + }, + "token_type": &framework.FieldSchema{ Type: framework.TypeString, Default: "client", @@ -91,6 +96,7 @@ func pathRolesRead( Data: map[string]interface{}{ "lease": result.Lease.String(), "token_type": result.TokenType, + "global": result.Global, }, } if len(result.Policy) != 0 { @@ -112,6 +118,7 @@ func pathRolesWrite( } name := d.Get("name").(string) + global := d.Get("global").(bool) policy := d.Get("policy").([]string) var err error if tokenType != "management" { @@ -135,6 +142,7 @@ func pathRolesWrite( Policy: policy, Lease: lease, TokenType: tokenType, + Global: global, }) if err != nil { return nil, err @@ -160,4 +168,5 @@ type roleConfig struct { Policy []string `json:"policy"` Lease time.Duration `json:"lease"` TokenType string `json:"token_type"` + Global bool `json:"global"` } diff --git a/builtin/logical/nomad/path_token.go b/builtin/logical/nomad/path_token.go index f837df412..1a1c66fde 100644 --- a/builtin/logical/nomad/path_token.go +++ b/builtin/logical/nomad/path_token.go @@ -63,6 +63,7 @@ func (b *backend) pathTokenRead( Name: tokenName, Type: result.TokenType, Policies: result.Policy, + Global: result.Global, }, nil) if err != nil { return logical.ErrorResponse(err.Error()), nil From ad5f1018dde57febf4ace5a71ca4b8c9ca6f5660 Mon Sep 17 00:00:00 2001 From: Nicolas Corrarello Date: Thu, 28 Sep 2017 23:58:41 +0100 Subject: [PATCH 07/52] Various fixes (Null pointer, wait for Nomad go up, Auth before policy creation) --- builtin/logical/nomad/backend_test.go | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/builtin/logical/nomad/backend_test.go b/builtin/logical/nomad/backend_test.go index a234b3b39..7c7a7393b 100644 --- a/builtin/logical/nomad/backend_test.go +++ b/builtin/logical/nomad/backend_test.go @@ -30,8 +30,9 @@ func prepareTestContainer(t *testing.T) (cleanup func(), retAddress string, noma dockerOptions := &dockertest.RunOptions{ Repository: "djenriquez/nomad", - Tag: "v0.7.0-beta1", + Tag: "latest", Cmd: []string{"agent", "-dev"}, + Env: []string{`NOMAD_LOCAL_CONFIG=bind_addr = "0.0.0.0" acl { enabled = true }`}, } resource, err := pool.RunWithOptions(dockerOptions) if err != nil { @@ -46,7 +47,9 @@ func prepareTestContainer(t *testing.T) (cleanup func(), retAddress string, noma } retAddress = fmt.Sprintf("http://localhost:%s/", resource.GetPort("4646/tcp")) + // Give Nomad time to initialize + time.Sleep(5000 * time.Millisecond) // exponential backoff-retry if err = pool.Retry(func() error { var err error @@ -57,7 +60,11 @@ func prepareTestContainer(t *testing.T) (cleanup func(), retAddress string, noma return err } aclbootstrap, _, err := nomad.ACLTokens().Bootstrap(nil) + if err != nil { + t.Fatalf("err: %v", err) + } nomadToken = aclbootstrap.SecretID + log.Printf("[WARN] Generated Master token: %s", nomadToken) policy := &nomadapi.ACLPolicy{ Name: "test", Description: "test", @@ -66,7 +73,11 @@ func prepareTestContainer(t *testing.T) (cleanup func(), retAddress string, noma } `, } - _, err = nomad.ACLPolicies().Upsert(policy, nil) + nomadAuthConfig := nomadapi.DefaultConfig() + nomadAuthConfig.Address = retAddress + nomadAuthConfig.SecretID = nomadToken + nomadAuth, err := nomadapi.NewClient(nomadAuthConfig) + _, err = nomadAuth.ACLPolicies().Upsert(policy, nil) if err != nil { t.Fatal(err) } @@ -179,7 +190,7 @@ func TestBackend_renew_revoke(t *testing.T) { generatedSecret.TTL = 6 * time.Hour var d struct { - Token string `mapstructure:"SecretID"` + Token string `mapstructure:"secret_id"` } if err := mapstructure.Decode(resp.Data, &d); err != nil { t.Fatal(err) @@ -196,7 +207,7 @@ func TestBackend_renew_revoke(t *testing.T) { } log.Printf("[WARN] Verifying that the generated token works...") - _, err = client.Status().Leader, nil + _, err = client.Jobs().List, nil if err != nil { t.Fatal(err) } @@ -218,7 +229,7 @@ func TestBackend_renew_revoke(t *testing.T) { } log.Printf("[WARN] Verifying that the generated token does not work...") - _, err = client.Status().Leader, nil + _, err = client.Jobs().List, nil if err == nil { t.Fatal("expected error") } From 639002141326c4985474df1ee1556f81f5759b03 Mon Sep 17 00:00:00 2001 From: Nicolas Corrarello Date: Fri, 29 Sep 2017 09:33:58 +0100 Subject: [PATCH 08/52] Working tests --- builtin/logical/nomad/backend_test.go | 48 ++++++++++++++++++++++----- 1 file changed, 39 insertions(+), 9 deletions(-) diff --git a/builtin/logical/nomad/backend_test.go b/builtin/logical/nomad/backend_test.go index 7c7a7393b..e1b0956b5 100644 --- a/builtin/logical/nomad/backend_test.go +++ b/builtin/logical/nomad/backend_test.go @@ -72,6 +72,20 @@ func prepareTestContainer(t *testing.T) (cleanup func(), retAddress string, noma policy = "read" } `, + } + anonPolicy := &nomadapi.ACLPolicy{ + Name: "anonymous", + Description: "Deny all access for anonymous requests", + Rules: `namespace "default" { + policy = "deny" + } + agent { + policy = "deny" + } + node { + policy = "deny" + } + `, } nomadAuthConfig := nomadapi.DefaultConfig() nomadAuthConfig.Address = retAddress @@ -81,6 +95,10 @@ func prepareTestContainer(t *testing.T) (cleanup func(), retAddress string, noma if err != nil { t.Fatal(err) } + _, err = nomadAuth.ACLPolicies().Upsert(anonPolicy, nil) + if err != nil { + t.Fatal(err) + } return err }); err != nil { cleanup() @@ -143,9 +161,10 @@ func TestBackend_renew_revoke(t *testing.T) { t.Fatal(err) } - cleanup, connURL, connToken := prepareTestContainer(t) - defer cleanup() - + //cleanup, connURL, connToken := prepareTestContainer(t) + //defer cleanup() + //Ignore cleanup until I can find why the bloody test is not working + _, connURL, connToken := prepareTestContainer(t) connData := map[string]interface{}{ "address": connURL, "token": connToken, @@ -190,12 +209,13 @@ func TestBackend_renew_revoke(t *testing.T) { generatedSecret.TTL = 6 * time.Hour var d struct { - Token string `mapstructure:"secret_id"` + Token string `mapstructure:"secret_id"` + Accessor string `mapstructure:"accessor_id"` } if err := mapstructure.Decode(resp.Data, &d); err != nil { t.Fatal(err) } - log.Printf("[WARN] Generated token: %s", d.Token) + log.Printf("[WARN] Generated token: %s with accesor %s", d.Token, d.Accessor) // Build a client and verify that the credentials work nomadapiConfig := nomadapi.DefaultConfig() @@ -207,7 +227,7 @@ func TestBackend_renew_revoke(t *testing.T) { } log.Printf("[WARN] Verifying that the generated token works...") - _, err = client.Jobs().List, nil + _, err = client.Agent().Members, nil if err != nil { t.Fatal(err) } @@ -228,9 +248,19 @@ func TestBackend_renew_revoke(t *testing.T) { t.Fatal(err) } - log.Printf("[WARN] Verifying that the generated token does not work...") - _, err = client.Jobs().List, nil + // Build a management client and verify that the token does not exist anymore + nomadmgmtConfig := nomadapi.DefaultConfig() + nomadmgmtConfig.Address = connData["address"].(string) + nomadmgmtConfig.SecretID = connData["token"].(string) + mgmtclient, err := nomadapi.NewClient(nomadmgmtConfig) + + q := &nomadapi.QueryOptions{ + Namespace: "default", + } + + log.Printf("[WARN] Verifying that the generated token does not exist...") + _, _, err = mgmtclient.ACLTokens().Info(d.Accessor, q) if err == nil { - t.Fatal("expected error") + t.Fatal("err: expected error") } } From 40839d2163f8db8eb4a8c5351b502cdadb0d7c73 Mon Sep 17 00:00:00 2001 From: Nicolas Corrarello Date: Fri, 29 Sep 2017 09:35:17 +0100 Subject: [PATCH 09/52] Removing ignore to cleanup function --- builtin/logical/nomad/backend_test.go | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/builtin/logical/nomad/backend_test.go b/builtin/logical/nomad/backend_test.go index e1b0956b5..b64dc1bea 100644 --- a/builtin/logical/nomad/backend_test.go +++ b/builtin/logical/nomad/backend_test.go @@ -161,10 +161,8 @@ func TestBackend_renew_revoke(t *testing.T) { t.Fatal(err) } - //cleanup, connURL, connToken := prepareTestContainer(t) - //defer cleanup() - //Ignore cleanup until I can find why the bloody test is not working - _, connURL, connToken := prepareTestContainer(t) + cleanup, connURL, connToken := prepareTestContainer(t) + defer cleanup() connData := map[string]interface{}{ "address": connURL, "token": connToken, From b207b76f1431eaf9f654d800da28d3bc1c12000f Mon Sep 17 00:00:00 2001 From: Nicolas Corrarello Date: Fri, 29 Sep 2017 11:23:47 +0100 Subject: [PATCH 10/52] Updated API Docs with the Global Token Parameter --- website/source/api/secret/nomad/index.html.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/website/source/api/secret/nomad/index.html.md b/website/source/api/secret/nomad/index.html.md index 236e3e7e8..9cdf59b97 100644 --- a/website/source/api/secret/nomad/index.html.md +++ b/website/source/api/secret/nomad/index.html.md @@ -70,7 +70,10 @@ updated attributes. as a string duration with a time suffix like `"30s"` or `"1h"`. If not provided, the default Vault lease is used. -- `policy` `(string: )` – Comma separated list of Nomad policies the token is going to be created against. These need to be created beforehand in Nomad. +- `policy` `(string: "")` – Comma separated list of Nomad policies the token is going to be created against. These need to be created beforehand in Nomad. + +- `global` `(bool: "")` – Specifies if the token should be global, as defined in the [Nomad Documentation](https://www.nomadproject.io/guides/acl.html#acl-tokens). +ma - `token_type` `(string: "client")` - Specifies the type of token to create when using this role. Valid values are `"client"` or `"management"`. From d7f606d497cddc2fc72a4694309fd62855b2ddf8 Mon Sep 17 00:00:00 2001 From: Nicolas Corrarello Date: Fri, 29 Sep 2017 14:39:24 +0100 Subject: [PATCH 11/52] Adding vendor dependency --- vendor/vendor.json | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/vendor/vendor.json b/vendor/vendor.json index 541d6c03e..2ccc97e6d 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -1140,6 +1140,13 @@ "revision": "68e816d1c783414e79bc65b3994d9ab6b0a722ab", "revisionTime": "2017-09-14T15:46:24Z" }, + { + "checksumSHA1": "4tY6k1MqB50R66TJJH/rsG69Yd4=", + "path": "github.com/hashicorp/nomad/api", + "revision": "a1e08240a1cff7d1bd10c89d722110a053f8dc6e", + "revisionTime": "2017-09-20T19:48:06Z", + "version": "v0.7.0-beta1" + }, { "checksumSHA1": "/oss17GO4hXGM7QnUdI3VzcAHzA=", "path": "github.com/hashicorp/serf/coordinate", From ade157a0412fa693fddf4e0b3846221afa2a97ef Mon Sep 17 00:00:00 2001 From: Nicolas Corrarello Date: Fri, 29 Sep 2017 14:39:59 +0100 Subject: [PATCH 12/52] Adding vendor dir --- vendor/github.com/hashicorp/nomad/LICENSE | 363 +++++++ vendor/github.com/hashicorp/nomad/api/acl.go | 186 ++++ .../github.com/hashicorp/nomad/api/agent.go | 267 +++++ .../hashicorp/nomad/api/allocations.go | 157 +++ vendor/github.com/hashicorp/nomad/api/api.go | 777 ++++++++++++++ .../hashicorp/nomad/api/constraint.go | 17 + .../hashicorp/nomad/api/deployments.go | 234 +++++ .../hashicorp/nomad/api/evaluations.go | 97 ++ vendor/github.com/hashicorp/nomad/api/fs.go | 398 +++++++ vendor/github.com/hashicorp/nomad/api/jobs.go | 968 ++++++++++++++++++ .../hashicorp/nomad/api/jobs_testing.go | 110 ++ .../hashicorp/nomad/api/namespace.go | 90 ++ .../github.com/hashicorp/nomad/api/nodes.go | 199 ++++ .../hashicorp/nomad/api/operator.go | 87 ++ vendor/github.com/hashicorp/nomad/api/raw.go | 38 + .../github.com/hashicorp/nomad/api/regions.go | 23 + .../hashicorp/nomad/api/resources.go | 81 ++ .../github.com/hashicorp/nomad/api/search.go | 39 + .../hashicorp/nomad/api/sentinel.go | 79 ++ .../github.com/hashicorp/nomad/api/status.go | 43 + .../github.com/hashicorp/nomad/api/system.go | 23 + .../github.com/hashicorp/nomad/api/tasks.go | 617 +++++++++++ 22 files changed, 4893 insertions(+) create mode 100644 vendor/github.com/hashicorp/nomad/LICENSE create mode 100644 vendor/github.com/hashicorp/nomad/api/acl.go create mode 100644 vendor/github.com/hashicorp/nomad/api/agent.go create mode 100644 vendor/github.com/hashicorp/nomad/api/allocations.go create mode 100644 vendor/github.com/hashicorp/nomad/api/api.go create mode 100644 vendor/github.com/hashicorp/nomad/api/constraint.go create mode 100644 vendor/github.com/hashicorp/nomad/api/deployments.go create mode 100644 vendor/github.com/hashicorp/nomad/api/evaluations.go create mode 100644 vendor/github.com/hashicorp/nomad/api/fs.go create mode 100644 vendor/github.com/hashicorp/nomad/api/jobs.go create mode 100644 vendor/github.com/hashicorp/nomad/api/jobs_testing.go create mode 100644 vendor/github.com/hashicorp/nomad/api/namespace.go create mode 100644 vendor/github.com/hashicorp/nomad/api/nodes.go create mode 100644 vendor/github.com/hashicorp/nomad/api/operator.go create mode 100644 vendor/github.com/hashicorp/nomad/api/raw.go create mode 100644 vendor/github.com/hashicorp/nomad/api/regions.go create mode 100644 vendor/github.com/hashicorp/nomad/api/resources.go create mode 100644 vendor/github.com/hashicorp/nomad/api/search.go create mode 100644 vendor/github.com/hashicorp/nomad/api/sentinel.go create mode 100644 vendor/github.com/hashicorp/nomad/api/status.go create mode 100644 vendor/github.com/hashicorp/nomad/api/system.go create mode 100644 vendor/github.com/hashicorp/nomad/api/tasks.go diff --git a/vendor/github.com/hashicorp/nomad/LICENSE b/vendor/github.com/hashicorp/nomad/LICENSE new file mode 100644 index 000000000..e87a115e4 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/LICENSE @@ -0,0 +1,363 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/nomad/api/acl.go b/vendor/github.com/hashicorp/nomad/api/acl.go new file mode 100644 index 000000000..bac698237 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/api/acl.go @@ -0,0 +1,186 @@ +package api + +import ( + "fmt" + "time" +) + +// ACLPolicies is used to query the ACL Policy endpoints. +type ACLPolicies struct { + client *Client +} + +// ACLPolicies returns a new handle on the ACL policies. +func (c *Client) ACLPolicies() *ACLPolicies { + return &ACLPolicies{client: c} +} + +// List is used to dump all of the policies. +func (a *ACLPolicies) List(q *QueryOptions) ([]*ACLPolicyListStub, *QueryMeta, error) { + var resp []*ACLPolicyListStub + qm, err := a.client.query("/v1/acl/policies", &resp, q) + if err != nil { + return nil, nil, err + } + return resp, qm, nil +} + +// Upsert is used to create or update a policy +func (a *ACLPolicies) Upsert(policy *ACLPolicy, q *WriteOptions) (*WriteMeta, error) { + if policy == nil || policy.Name == "" { + return nil, fmt.Errorf("missing policy name") + } + wm, err := a.client.write("/v1/acl/policy/"+policy.Name, policy, nil, q) + if err != nil { + return nil, err + } + return wm, nil +} + +// Delete is used to delete a policy +func (a *ACLPolicies) Delete(policyName string, q *WriteOptions) (*WriteMeta, error) { + if policyName == "" { + return nil, fmt.Errorf("missing policy name") + } + wm, err := a.client.delete("/v1/acl/policy/"+policyName, nil, q) + if err != nil { + return nil, err + } + return wm, nil +} + +// Info is used to query a specific policy +func (a *ACLPolicies) Info(policyName string, q *QueryOptions) (*ACLPolicy, *QueryMeta, error) { + if policyName == "" { + return nil, nil, fmt.Errorf("missing policy name") + } + var resp ACLPolicy + wm, err := a.client.query("/v1/acl/policy/"+policyName, &resp, q) + if err != nil { + return nil, nil, err + } + return &resp, wm, nil +} + +// ACLTokens is used to query the ACL token endpoints. +type ACLTokens struct { + client *Client +} + +// ACLTokens returns a new handle on the ACL tokens. +func (c *Client) ACLTokens() *ACLTokens { + return &ACLTokens{client: c} +} + +// Bootstrap is used to get the initial bootstrap token +func (a *ACLTokens) Bootstrap(q *WriteOptions) (*ACLToken, *WriteMeta, error) { + var resp ACLToken + wm, err := a.client.write("/v1/acl/bootstrap", nil, &resp, q) + if err != nil { + return nil, nil, err + } + return &resp, wm, nil +} + +// List is used to dump all of the tokens. +func (a *ACLTokens) List(q *QueryOptions) ([]*ACLTokenListStub, *QueryMeta, error) { + var resp []*ACLTokenListStub + qm, err := a.client.query("/v1/acl/tokens", &resp, q) + if err != nil { + return nil, nil, err + } + return resp, qm, nil +} + +// Create is used to create a token +func (a *ACLTokens) Create(token *ACLToken, q *WriteOptions) (*ACLToken, *WriteMeta, error) { + if token.AccessorID != "" { + return nil, nil, fmt.Errorf("cannot specify Accessor ID") + } + var resp ACLToken + wm, err := a.client.write("/v1/acl/token", token, &resp, q) + if err != nil { + return nil, nil, err + } + return &resp, wm, nil +} + +// Update is used to update an existing token +func (a *ACLTokens) Update(token *ACLToken, q *WriteOptions) (*ACLToken, *WriteMeta, error) { + if token.AccessorID == "" { + return nil, nil, fmt.Errorf("missing accessor ID") + } + var resp ACLToken + wm, err := a.client.write("/v1/acl/token/"+token.AccessorID, + token, &resp, q) + if err != nil { + return nil, nil, err + } + return &resp, wm, nil +} + +// Delete is used to delete a token +func (a *ACLTokens) Delete(accessorID string, q *WriteOptions) (*WriteMeta, error) { + if accessorID == "" { + return nil, fmt.Errorf("missing accessor ID") + } + wm, err := a.client.delete("/v1/acl/token/"+accessorID, nil, q) + if err != nil { + return nil, err + } + return wm, nil +} + +// Info is used to query a token +func (a *ACLTokens) Info(accessorID string, q *QueryOptions) (*ACLToken, *QueryMeta, error) { + if accessorID == "" { + return nil, nil, fmt.Errorf("missing accessor ID") + } + var resp ACLToken + wm, err := a.client.query("/v1/acl/token/"+accessorID, &resp, q) + if err != nil { + return nil, nil, err + } + return &resp, wm, nil +} + +// ACLPolicyListStub is used to for listing ACL policies +type ACLPolicyListStub struct { + Name string + Description string + CreateIndex uint64 + ModifyIndex uint64 +} + +// ACLPolicy is used to represent an ACL policy +type ACLPolicy struct { + Name string + Description string + Rules string + CreateIndex uint64 + ModifyIndex uint64 +} + +// ACLToken represents a client token which is used to Authenticate +type ACLToken struct { + AccessorID string + SecretID string + Name string + Type string + Policies []string + Global bool + CreateTime time.Time + CreateIndex uint64 + ModifyIndex uint64 +} + +type ACLTokenListStub struct { + AccessorID string + Name string + Type string + Policies []string + Global bool + CreateTime time.Time + CreateIndex uint64 + ModifyIndex uint64 +} diff --git a/vendor/github.com/hashicorp/nomad/api/agent.go b/vendor/github.com/hashicorp/nomad/api/agent.go new file mode 100644 index 000000000..e8b063ff1 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/api/agent.go @@ -0,0 +1,267 @@ +package api + +import ( + "fmt" + "net/url" +) + +// Agent encapsulates an API client which talks to Nomad's +// agent endpoints for a specific node. +type Agent struct { + client *Client + + // Cache static agent info + nodeName string + datacenter string + region string +} + +// KeyringResponse is a unified key response and can be used for install, +// remove, use, as well as listing key queries. +type KeyringResponse struct { + Messages map[string]string + Keys map[string]int + NumNodes int +} + +// KeyringRequest is request objects for serf key operations. +type KeyringRequest struct { + Key string +} + +// Agent returns a new agent which can be used to query +// the agent-specific endpoints. +func (c *Client) Agent() *Agent { + return &Agent{client: c} +} + +// Self is used to query the /v1/agent/self endpoint and +// returns information specific to the running agent. +func (a *Agent) Self() (*AgentSelf, error) { + var out *AgentSelf + + // Query the self endpoint on the agent + _, err := a.client.query("/v1/agent/self", &out, nil) + if err != nil { + return nil, fmt.Errorf("failed querying self endpoint: %s", err) + } + + // Populate the cache for faster queries + a.populateCache(out) + + return out, nil +} + +// populateCache is used to insert various pieces of static +// data into the agent handle. This is used during subsequent +// lookups for the same data later on to save the round trip. +func (a *Agent) populateCache(self *AgentSelf) { + if a.nodeName == "" { + a.nodeName = self.Member.Name + } + if a.datacenter == "" { + if val, ok := self.Config["Datacenter"]; ok { + a.datacenter, _ = val.(string) + } + } + if a.region == "" { + if val, ok := self.Config["Region"]; ok { + a.region, _ = val.(string) + } + } +} + +// NodeName is used to query the Nomad agent for its node name. +func (a *Agent) NodeName() (string, error) { + // Return from cache if we have it + if a.nodeName != "" { + return a.nodeName, nil + } + + // Query the node name + _, err := a.Self() + return a.nodeName, err +} + +// Datacenter is used to return the name of the datacenter which +// the agent is a member of. +func (a *Agent) Datacenter() (string, error) { + // Return from cache if we have it + if a.datacenter != "" { + return a.datacenter, nil + } + + // Query the agent for the DC + _, err := a.Self() + return a.datacenter, err +} + +// Region is used to look up the region the agent is in. +func (a *Agent) Region() (string, error) { + // Return from cache if we have it + if a.region != "" { + return a.region, nil + } + + // Query the agent for the region + _, err := a.Self() + return a.region, err +} + +// Join is used to instruct a server node to join another server +// via the gossip protocol. Multiple addresses may be specified. +// We attempt to join all of the hosts in the list. Returns the +// number of nodes successfully joined and any error. If one or +// more nodes have a successful result, no error is returned. +func (a *Agent) Join(addrs ...string) (int, error) { + // Accumulate the addresses + v := url.Values{} + for _, addr := range addrs { + v.Add("address", addr) + } + + // Send the join request + var resp joinResponse + _, err := a.client.write("/v1/agent/join?"+v.Encode(), nil, &resp, nil) + if err != nil { + return 0, fmt.Errorf("failed joining: %s", err) + } + if resp.Error != "" { + return 0, fmt.Errorf("failed joining: %s", resp.Error) + } + return resp.NumJoined, nil +} + +// Members is used to query all of the known server members +func (a *Agent) Members() (*ServerMembers, error) { + var resp *ServerMembers + + // Query the known members + _, err := a.client.query("/v1/agent/members", &resp, nil) + if err != nil { + return nil, err + } + return resp, nil +} + +// ForceLeave is used to eject an existing node from the cluster. +func (a *Agent) ForceLeave(node string) error { + _, err := a.client.write("/v1/agent/force-leave?node="+node, nil, nil, nil) + return err +} + +// Servers is used to query the list of servers on a client node. +func (a *Agent) Servers() ([]string, error) { + var resp []string + _, err := a.client.query("/v1/agent/servers", &resp, nil) + if err != nil { + return nil, err + } + return resp, nil +} + +// SetServers is used to update the list of servers on a client node. +func (a *Agent) SetServers(addrs []string) error { + // Accumulate the addresses + v := url.Values{} + for _, addr := range addrs { + v.Add("address", addr) + } + + _, err := a.client.write("/v1/agent/servers?"+v.Encode(), nil, nil, nil) + return err +} + +// ListKeys returns the list of installed keys +func (a *Agent) ListKeys() (*KeyringResponse, error) { + var resp KeyringResponse + _, err := a.client.query("/v1/agent/keyring/list", &resp, nil) + if err != nil { + return nil, err + } + return &resp, nil +} + +// InstallKey installs a key in the keyrings of all the serf members +func (a *Agent) InstallKey(key string) (*KeyringResponse, error) { + args := KeyringRequest{ + Key: key, + } + var resp KeyringResponse + _, err := a.client.write("/v1/agent/keyring/install", &args, &resp, nil) + return &resp, err +} + +// UseKey uses a key from the keyring of serf members +func (a *Agent) UseKey(key string) (*KeyringResponse, error) { + args := KeyringRequest{ + Key: key, + } + var resp KeyringResponse + _, err := a.client.write("/v1/agent/keyring/use", &args, &resp, nil) + return &resp, err +} + +// RemoveKey removes a particular key from keyrings of serf members +func (a *Agent) RemoveKey(key string) (*KeyringResponse, error) { + args := KeyringRequest{ + Key: key, + } + var resp KeyringResponse + _, err := a.client.write("/v1/agent/keyring/remove", &args, &resp, nil) + return &resp, err +} + +// joinResponse is used to decode the response we get while +// sending a member join request. +type joinResponse struct { + NumJoined int `json:"num_joined"` + Error string `json:"error"` +} + +type ServerMembers struct { + ServerName string + ServerRegion string + ServerDC string + Members []*AgentMember +} + +type AgentSelf struct { + Config map[string]interface{} `json:"config"` + Member AgentMember `json:"member"` + Stats map[string]map[string]string `json:"stats"` +} + +// AgentMember represents a cluster member known to the agent +type AgentMember struct { + Name string + Addr string + Port uint16 + Tags map[string]string + Status string + ProtocolMin uint8 + ProtocolMax uint8 + ProtocolCur uint8 + DelegateMin uint8 + DelegateMax uint8 + DelegateCur uint8 +} + +// AgentMembersNameSort implements sort.Interface for []*AgentMembersNameSort +// based on the Name, DC and Region +type AgentMembersNameSort []*AgentMember + +func (a AgentMembersNameSort) Len() int { return len(a) } +func (a AgentMembersNameSort) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a AgentMembersNameSort) Less(i, j int) bool { + if a[i].Tags["region"] != a[j].Tags["region"] { + return a[i].Tags["region"] < a[j].Tags["region"] + } + + if a[i].Tags["dc"] != a[j].Tags["dc"] { + return a[i].Tags["dc"] < a[j].Tags["dc"] + } + + return a[i].Name < a[j].Name + +} diff --git a/vendor/github.com/hashicorp/nomad/api/allocations.go b/vendor/github.com/hashicorp/nomad/api/allocations.go new file mode 100644 index 000000000..74aaaf3fd --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/api/allocations.go @@ -0,0 +1,157 @@ +package api + +import ( + "fmt" + "sort" + "time" +) + +var ( + // NodeDownErr marks an operation as not able to complete since the node is + // down. + NodeDownErr = fmt.Errorf("node down") +) + +// Allocations is used to query the alloc-related endpoints. +type Allocations struct { + client *Client +} + +// Allocations returns a handle on the allocs endpoints. +func (c *Client) Allocations() *Allocations { + return &Allocations{client: c} +} + +// List returns a list of all of the allocations. +func (a *Allocations) List(q *QueryOptions) ([]*AllocationListStub, *QueryMeta, error) { + var resp []*AllocationListStub + qm, err := a.client.query("/v1/allocations", &resp, q) + if err != nil { + return nil, nil, err + } + sort.Sort(AllocIndexSort(resp)) + return resp, qm, nil +} + +func (a *Allocations) PrefixList(prefix string) ([]*AllocationListStub, *QueryMeta, error) { + return a.List(&QueryOptions{Prefix: prefix}) +} + +// Info is used to retrieve a single allocation. +func (a *Allocations) Info(allocID string, q *QueryOptions) (*Allocation, *QueryMeta, error) { + var resp Allocation + qm, err := a.client.query("/v1/allocation/"+allocID, &resp, q) + if err != nil { + return nil, nil, err + } + return &resp, qm, nil +} + +func (a *Allocations) Stats(alloc *Allocation, q *QueryOptions) (*AllocResourceUsage, error) { + nodeClient, err := a.client.GetNodeClient(alloc.NodeID, q) + if err != nil { + return nil, err + } + + var resp AllocResourceUsage + _, err = nodeClient.query("/v1/client/allocation/"+alloc.ID+"/stats", &resp, nil) + return &resp, err +} + +func (a *Allocations) GC(alloc *Allocation, q *QueryOptions) error { + nodeClient, err := a.client.GetNodeClient(alloc.NodeID, q) + if err != nil { + return err + } + + var resp struct{} + _, err = nodeClient.query("/v1/client/allocation/"+alloc.ID+"/gc", &resp, nil) + return err +} + +// Allocation is used for serialization of allocations. +type Allocation struct { + ID string + Namespace string + EvalID string + Name string + NodeID string + JobID string + Job *Job + TaskGroup string + Resources *Resources + TaskResources map[string]*Resources + Services map[string]string + Metrics *AllocationMetric + DesiredStatus string + DesiredDescription string + ClientStatus string + ClientDescription string + TaskStates map[string]*TaskState + DeploymentID string + DeploymentStatus *AllocDeploymentStatus + PreviousAllocation string + CreateIndex uint64 + ModifyIndex uint64 + AllocModifyIndex uint64 + CreateTime int64 +} + +// AllocationMetric is used to deserialize allocation metrics. +type AllocationMetric struct { + NodesEvaluated int + NodesFiltered int + NodesAvailable map[string]int + ClassFiltered map[string]int + ConstraintFiltered map[string]int + NodesExhausted int + ClassExhausted map[string]int + DimensionExhausted map[string]int + Scores map[string]float64 + AllocationTime time.Duration + CoalescedFailures int +} + +// AllocationListStub is used to return a subset of an allocation +// during list operations. +type AllocationListStub struct { + ID string + EvalID string + Name string + NodeID string + JobID string + JobVersion uint64 + TaskGroup string + DesiredStatus string + DesiredDescription string + ClientStatus string + ClientDescription string + TaskStates map[string]*TaskState + DeploymentStatus *AllocDeploymentStatus + CreateIndex uint64 + ModifyIndex uint64 + CreateTime int64 +} + +// AllocDeploymentStatus captures the status of the allocation as part of the +// deployment. This can include things like if the allocation has been marked as +// heatlhy. +type AllocDeploymentStatus struct { + Healthy *bool + ModifyIndex uint64 +} + +// AllocIndexSort reverse sorts allocs by CreateIndex. +type AllocIndexSort []*AllocationListStub + +func (a AllocIndexSort) Len() int { + return len(a) +} + +func (a AllocIndexSort) Less(i, j int) bool { + return a[i].CreateIndex > a[j].CreateIndex +} + +func (a AllocIndexSort) Swap(i, j int) { + a[i], a[j] = a[j], a[i] +} diff --git a/vendor/github.com/hashicorp/nomad/api/api.go b/vendor/github.com/hashicorp/nomad/api/api.go new file mode 100644 index 000000000..a3d476d64 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/api/api.go @@ -0,0 +1,777 @@ +package api + +import ( + "bytes" + "compress/gzip" + "crypto/tls" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "os" + "strconv" + "strings" + "time" + + "github.com/hashicorp/go-cleanhttp" + rootcerts "github.com/hashicorp/go-rootcerts" +) + +// QueryOptions are used to parameterize a query +type QueryOptions struct { + // Providing a datacenter overwrites the region provided + // by the Config + Region string + + // Namespace is the target namespace for the query. + Namespace string + + // AllowStale allows any Nomad server (non-leader) to service + // a read. This allows for lower latency and higher throughput + AllowStale bool + + // WaitIndex is used to enable a blocking query. Waits + // until the timeout or the next index is reached + WaitIndex uint64 + + // WaitTime is used to bound the duration of a wait. + // Defaults to that of the Config, but can be overridden. + WaitTime time.Duration + + // If set, used as prefix for resource list searches + Prefix string + + // Set HTTP parameters on the query. + Params map[string]string + + // SecretID is the secret ID of an ACL token + SecretID string +} + +// WriteOptions are used to parameterize a write +type WriteOptions struct { + // Providing a datacenter overwrites the region provided + // by the Config + Region string + + // Namespace is the target namespace for the write. + Namespace string + + // SecretID is the secret ID of an ACL token + SecretID string +} + +// QueryMeta is used to return meta data about a query +type QueryMeta struct { + // LastIndex. This can be used as a WaitIndex to perform + // a blocking query + LastIndex uint64 + + // Time of last contact from the leader for the + // server servicing the request + LastContact time.Duration + + // Is there a known leader + KnownLeader bool + + // How long did the request take + RequestTime time.Duration +} + +// WriteMeta is used to return meta data about a write +type WriteMeta struct { + // LastIndex. This can be used as a WaitIndex to perform + // a blocking query + LastIndex uint64 + + // How long did the request take + RequestTime time.Duration +} + +// HttpBasicAuth is used to authenticate http client with HTTP Basic Authentication +type HttpBasicAuth struct { + // Username to use for HTTP Basic Authentication + Username string + + // Password to use for HTTP Basic Authentication + Password string +} + +// Config is used to configure the creation of a client +type Config struct { + // Address is the address of the Nomad agent + Address string + + // Region to use. If not provided, the default agent region is used. + Region string + + // SecretID to use. This can be overwritten per request. + SecretID string + + // Namespace to use. If not provided the default namespace is used. + Namespace string + + // httpClient is the client to use. Default will be used if not provided. + httpClient *http.Client + + // HttpAuth is the auth info to use for http access. + HttpAuth *HttpBasicAuth + + // WaitTime limits how long a Watch will block. If not provided, + // the agent default values will be used. + WaitTime time.Duration + + // TLSConfig provides the various TLS related configurations for the http + // client + TLSConfig *TLSConfig +} + +// ClientConfig copies the configuration with a new client address, region, and +// whether the client has TLS enabled. +func (c *Config) ClientConfig(region, address string, tlsEnabled bool) *Config { + scheme := "http" + if tlsEnabled { + scheme = "https" + } + defaultConfig := DefaultConfig() + config := &Config{ + Address: fmt.Sprintf("%s://%s", scheme, address), + Region: region, + Namespace: c.Namespace, + httpClient: defaultConfig.httpClient, + SecretID: c.SecretID, + HttpAuth: c.HttpAuth, + WaitTime: c.WaitTime, + TLSConfig: c.TLSConfig.Copy(), + } + if tlsEnabled && config.TLSConfig != nil { + config.TLSConfig.TLSServerName = fmt.Sprintf("client.%s.nomad", region) + } + + return config +} + +// TLSConfig contains the parameters needed to configure TLS on the HTTP client +// used to communicate with Nomad. +type TLSConfig struct { + // CACert is the path to a PEM-encoded CA cert file to use to verify the + // Nomad server SSL certificate. + CACert string + + // CAPath is the path to a directory of PEM-encoded CA cert files to verify + // the Nomad server SSL certificate. + CAPath string + + // ClientCert is the path to the certificate for Nomad communication + ClientCert string + + // ClientKey is the path to the private key for Nomad communication + ClientKey string + + // TLSServerName, if set, is used to set the SNI host when connecting via + // TLS. + TLSServerName string + + // Insecure enables or disables SSL verification + Insecure bool +} + +func (t *TLSConfig) Copy() *TLSConfig { + if t == nil { + return nil + } + + nt := new(TLSConfig) + *nt = *t + return nt +} + +// DefaultConfig returns a default configuration for the client +func DefaultConfig() *Config { + config := &Config{ + Address: "http://127.0.0.1:4646", + httpClient: cleanhttp.DefaultClient(), + TLSConfig: &TLSConfig{}, + } + transport := config.httpClient.Transport.(*http.Transport) + transport.TLSHandshakeTimeout = 10 * time.Second + transport.TLSClientConfig = &tls.Config{ + MinVersion: tls.VersionTLS12, + } + + if addr := os.Getenv("NOMAD_ADDR"); addr != "" { + config.Address = addr + } + if v := os.Getenv("NOMAD_REGION"); v != "" { + config.Region = v + } + if v := os.Getenv("NOMAD_NAMESPACE"); v != "" { + config.Namespace = v + } + if auth := os.Getenv("NOMAD_HTTP_AUTH"); auth != "" { + var username, password string + if strings.Contains(auth, ":") { + split := strings.SplitN(auth, ":", 2) + username = split[0] + password = split[1] + } else { + username = auth + } + + config.HttpAuth = &HttpBasicAuth{ + Username: username, + Password: password, + } + } + + // Read TLS specific env vars + if v := os.Getenv("NOMAD_CACERT"); v != "" { + config.TLSConfig.CACert = v + } + if v := os.Getenv("NOMAD_CAPATH"); v != "" { + config.TLSConfig.CAPath = v + } + if v := os.Getenv("NOMAD_CLIENT_CERT"); v != "" { + config.TLSConfig.ClientCert = v + } + if v := os.Getenv("NOMAD_CLIENT_KEY"); v != "" { + config.TLSConfig.ClientKey = v + } + if v := os.Getenv("NOMAD_SKIP_VERIFY"); v != "" { + if insecure, err := strconv.ParseBool(v); err == nil { + config.TLSConfig.Insecure = insecure + } + } + if v := os.Getenv("NOMAD_TOKEN"); v != "" { + config.SecretID = v + } + return config +} + +// ConfigureTLS applies a set of TLS configurations to the the HTTP client. +func (c *Config) ConfigureTLS() error { + if c.TLSConfig == nil { + return nil + } + if c.httpClient == nil { + return fmt.Errorf("config HTTP Client must be set") + } + + var clientCert tls.Certificate + foundClientCert := false + if c.TLSConfig.ClientCert != "" || c.TLSConfig.ClientKey != "" { + if c.TLSConfig.ClientCert != "" && c.TLSConfig.ClientKey != "" { + var err error + clientCert, err = tls.LoadX509KeyPair(c.TLSConfig.ClientCert, c.TLSConfig.ClientKey) + if err != nil { + return err + } + foundClientCert = true + } else { + return fmt.Errorf("Both client cert and client key must be provided") + } + } + + clientTLSConfig := c.httpClient.Transport.(*http.Transport).TLSClientConfig + rootConfig := &rootcerts.Config{ + CAFile: c.TLSConfig.CACert, + CAPath: c.TLSConfig.CAPath, + } + if err := rootcerts.ConfigureTLS(clientTLSConfig, rootConfig); err != nil { + return err + } + + clientTLSConfig.InsecureSkipVerify = c.TLSConfig.Insecure + + if foundClientCert { + clientTLSConfig.Certificates = []tls.Certificate{clientCert} + } + if c.TLSConfig.TLSServerName != "" { + clientTLSConfig.ServerName = c.TLSConfig.TLSServerName + } + + return nil +} + +// Client provides a client to the Nomad API +type Client struct { + config Config +} + +// NewClient returns a new client +func NewClient(config *Config) (*Client, error) { + // bootstrap the config + defConfig := DefaultConfig() + + if config.Address == "" { + config.Address = defConfig.Address + } else if _, err := url.Parse(config.Address); err != nil { + return nil, fmt.Errorf("invalid address '%s': %v", config.Address, err) + } + + if config.httpClient == nil { + config.httpClient = defConfig.httpClient + } + + // Configure the TLS cofigurations + if err := config.ConfigureTLS(); err != nil { + return nil, err + } + + client := &Client{ + config: *config, + } + return client, nil +} + +// SetRegion sets the region to forward API requests to. +func (c *Client) SetRegion(region string) { + c.config.Region = region +} + +// SetNamespace sets the namespace to forward API requests to. +func (c *Client) SetNamespace(namespace string) { + c.config.Namespace = namespace +} + +// GetNodeClient returns a new Client that will dial the specified node. If the +// QueryOptions is set, its region will be used. +func (c *Client) GetNodeClient(nodeID string, q *QueryOptions) (*Client, error) { + return c.getNodeClientImpl(nodeID, q, c.Nodes().Info) +} + +// nodeLookup is the definition of a function used to lookup a node. This is +// largely used to mock the lookup in tests. +type nodeLookup func(nodeID string, q *QueryOptions) (*Node, *QueryMeta, error) + +// getNodeClientImpl is the implementation of creating a API client for +// contacting a node. It takes a function to lookup the node such that it can be +// mocked during tests. +func (c *Client) getNodeClientImpl(nodeID string, q *QueryOptions, lookup nodeLookup) (*Client, error) { + node, _, err := lookup(nodeID, q) + if err != nil { + return nil, err + } + if node.Status == "down" { + return nil, NodeDownErr + } + if node.HTTPAddr == "" { + return nil, fmt.Errorf("http addr of node %q (%s) is not advertised", node.Name, nodeID) + } + + var region string + switch { + case q != nil && q.Region != "": + // Prefer the region set in the query parameter + region = q.Region + case c.config.Region != "": + // If the client is configured for a particular region use that + region = c.config.Region + default: + // No region information is given so use the default. + region = "global" + } + + // Get an API client for the node + conf := c.config.ClientConfig(region, node.HTTPAddr, node.TLSEnabled) + return NewClient(conf) +} + +// SetSecretID sets the ACL token secret for API requests. +func (c *Client) SetSecretID(secretID string) { + c.config.SecretID = secretID +} + +// request is used to help build up a request +type request struct { + config *Config + method string + url *url.URL + params url.Values + token string + body io.Reader + obj interface{} +} + +// setQueryOptions is used to annotate the request with +// additional query options +func (r *request) setQueryOptions(q *QueryOptions) { + if q == nil { + return + } + if q.Region != "" { + r.params.Set("region", q.Region) + } + if q.Namespace != "" { + r.params.Set("namespace", q.Namespace) + } + if q.SecretID != "" { + r.token = q.SecretID + } + if q.AllowStale { + r.params.Set("stale", "") + } + if q.WaitIndex != 0 { + r.params.Set("index", strconv.FormatUint(q.WaitIndex, 10)) + } + if q.WaitTime != 0 { + r.params.Set("wait", durToMsec(q.WaitTime)) + } + if q.Prefix != "" { + r.params.Set("prefix", q.Prefix) + } + for k, v := range q.Params { + r.params.Set(k, v) + } +} + +// durToMsec converts a duration to a millisecond specified string +func durToMsec(dur time.Duration) string { + return fmt.Sprintf("%dms", dur/time.Millisecond) +} + +// setWriteOptions is used to annotate the request with +// additional write options +func (r *request) setWriteOptions(q *WriteOptions) { + if q == nil { + return + } + if q.Region != "" { + r.params.Set("region", q.Region) + } + if q.Namespace != "" { + r.params.Set("namespace", q.Namespace) + } + if q.SecretID != "" { + r.token = q.SecretID + } +} + +// toHTTP converts the request to an HTTP request +func (r *request) toHTTP() (*http.Request, error) { + // Encode the query parameters + r.url.RawQuery = r.params.Encode() + + // Check if we should encode the body + if r.body == nil && r.obj != nil { + if b, err := encodeBody(r.obj); err != nil { + return nil, err + } else { + r.body = b + } + } + + // Create the HTTP request + req, err := http.NewRequest(r.method, r.url.RequestURI(), r.body) + if err != nil { + return nil, err + } + + // Optionally configure HTTP basic authentication + if r.url.User != nil { + username := r.url.User.Username() + password, _ := r.url.User.Password() + req.SetBasicAuth(username, password) + } else if r.config.HttpAuth != nil { + req.SetBasicAuth(r.config.HttpAuth.Username, r.config.HttpAuth.Password) + } + + req.Header.Add("Accept-Encoding", "gzip") + if r.token != "" { + req.Header.Set("X-Nomad-Token", r.token) + } + + req.URL.Host = r.url.Host + req.URL.Scheme = r.url.Scheme + req.Host = r.url.Host + return req, nil +} + +// newRequest is used to create a new request +func (c *Client) newRequest(method, path string) (*request, error) { + base, _ := url.Parse(c.config.Address) + u, err := url.Parse(path) + if err != nil { + return nil, err + } + r := &request{ + config: &c.config, + method: method, + url: &url.URL{ + Scheme: base.Scheme, + User: base.User, + Host: base.Host, + Path: u.Path, + }, + params: make(map[string][]string), + } + if c.config.Region != "" { + r.params.Set("region", c.config.Region) + } + if c.config.Namespace != "" { + r.params.Set("namespace", c.config.Namespace) + } + if c.config.WaitTime != 0 { + r.params.Set("wait", durToMsec(r.config.WaitTime)) + } + if c.config.SecretID != "" { + r.token = r.config.SecretID + } + + // Add in the query parameters, if any + for key, values := range u.Query() { + for _, value := range values { + r.params.Add(key, value) + } + } + + return r, nil +} + +// multiCloser is to wrap a ReadCloser such that when close is called, multiple +// Closes occur. +type multiCloser struct { + reader io.Reader + inorderClose []io.Closer +} + +func (m *multiCloser) Close() error { + for _, c := range m.inorderClose { + if err := c.Close(); err != nil { + return err + } + } + return nil +} + +func (m *multiCloser) Read(p []byte) (int, error) { + return m.reader.Read(p) +} + +// doRequest runs a request with our client +func (c *Client) doRequest(r *request) (time.Duration, *http.Response, error) { + req, err := r.toHTTP() + if err != nil { + return 0, nil, err + } + start := time.Now() + resp, err := c.config.httpClient.Do(req) + diff := time.Now().Sub(start) + + // If the response is compressed, we swap the body's reader. + if resp != nil && resp.Header != nil { + var reader io.ReadCloser + switch resp.Header.Get("Content-Encoding") { + case "gzip": + greader, err := gzip.NewReader(resp.Body) + if err != nil { + return 0, nil, err + } + + // The gzip reader doesn't close the wrapped reader so we use + // multiCloser. + reader = &multiCloser{ + reader: greader, + inorderClose: []io.Closer{greader, resp.Body}, + } + default: + reader = resp.Body + } + resp.Body = reader + } + + return diff, resp, err +} + +// rawQuery makes a GET request to the specified endpoint but returns just the +// response body. +func (c *Client) rawQuery(endpoint string, q *QueryOptions) (io.ReadCloser, error) { + r, err := c.newRequest("GET", endpoint) + if err != nil { + return nil, err + } + r.setQueryOptions(q) + _, resp, err := requireOK(c.doRequest(r)) + if err != nil { + return nil, err + } + + return resp.Body, nil +} + +// query is used to do a GET request against an endpoint +// and deserialize the response into an interface using +// standard Nomad conventions. +func (c *Client) query(endpoint string, out interface{}, q *QueryOptions) (*QueryMeta, error) { + r, err := c.newRequest("GET", endpoint) + if err != nil { + return nil, err + } + r.setQueryOptions(q) + rtt, resp, err := requireOK(c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + if err := decodeBody(resp, out); err != nil { + return nil, err + } + return qm, nil +} + +// putQuery is used to do a PUT request when doing a read against an endpoint +// and deserialize the response into an interface using standard Nomad +// conventions. +func (c *Client) putQuery(endpoint string, in, out interface{}, q *QueryOptions) (*QueryMeta, error) { + r, err := c.newRequest("PUT", endpoint) + if err != nil { + return nil, err + } + r.setQueryOptions(q) + r.obj = in + rtt, resp, err := requireOK(c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + if err := decodeBody(resp, out); err != nil { + return nil, err + } + return qm, nil +} + +// write is used to do a PUT request against an endpoint +// and serialize/deserialized using the standard Nomad conventions. +func (c *Client) write(endpoint string, in, out interface{}, q *WriteOptions) (*WriteMeta, error) { + r, err := c.newRequest("PUT", endpoint) + if err != nil { + return nil, err + } + r.setWriteOptions(q) + r.obj = in + rtt, resp, err := requireOK(c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + parseWriteMeta(resp, wm) + + if out != nil { + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + } + return wm, nil +} + +// delete is used to do a DELETE request against an endpoint +// and serialize/deserialized using the standard Nomad conventions. +func (c *Client) delete(endpoint string, out interface{}, q *WriteOptions) (*WriteMeta, error) { + r, err := c.newRequest("DELETE", endpoint) + if err != nil { + return nil, err + } + r.setWriteOptions(q) + rtt, resp, err := requireOK(c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + parseWriteMeta(resp, wm) + + if out != nil { + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + } + return wm, nil +} + +// parseQueryMeta is used to help parse query meta-data +func parseQueryMeta(resp *http.Response, q *QueryMeta) error { + header := resp.Header + + // Parse the X-Nomad-Index + index, err := strconv.ParseUint(header.Get("X-Nomad-Index"), 10, 64) + if err != nil { + return fmt.Errorf("Failed to parse X-Nomad-Index: %v", err) + } + q.LastIndex = index + + // Parse the X-Nomad-LastContact + last, err := strconv.ParseUint(header.Get("X-Nomad-LastContact"), 10, 64) + if err != nil { + return fmt.Errorf("Failed to parse X-Nomad-LastContact: %v", err) + } + q.LastContact = time.Duration(last) * time.Millisecond + + // Parse the X-Nomad-KnownLeader + switch header.Get("X-Nomad-KnownLeader") { + case "true": + q.KnownLeader = true + default: + q.KnownLeader = false + } + return nil +} + +// parseWriteMeta is used to help parse write meta-data +func parseWriteMeta(resp *http.Response, q *WriteMeta) error { + header := resp.Header + + // Parse the X-Nomad-Index + index, err := strconv.ParseUint(header.Get("X-Nomad-Index"), 10, 64) + if err != nil { + return fmt.Errorf("Failed to parse X-Nomad-Index: %v", err) + } + q.LastIndex = index + return nil +} + +// decodeBody is used to JSON decode a body +func decodeBody(resp *http.Response, out interface{}) error { + dec := json.NewDecoder(resp.Body) + return dec.Decode(out) +} + +// encodeBody is used to encode a request body +func encodeBody(obj interface{}) (io.Reader, error) { + buf := bytes.NewBuffer(nil) + enc := json.NewEncoder(buf) + if err := enc.Encode(obj); err != nil { + return nil, err + } + return buf, nil +} + +// requireOK is used to wrap doRequest and check for a 200 +func requireOK(d time.Duration, resp *http.Response, e error) (time.Duration, *http.Response, error) { + if e != nil { + if resp != nil { + resp.Body.Close() + } + return d, nil, e + } + if resp.StatusCode != 200 { + var buf bytes.Buffer + io.Copy(&buf, resp.Body) + resp.Body.Close() + return d, nil, fmt.Errorf("Unexpected response code: %d (%s)", resp.StatusCode, buf.Bytes()) + } + return d, resp, nil +} diff --git a/vendor/github.com/hashicorp/nomad/api/constraint.go b/vendor/github.com/hashicorp/nomad/api/constraint.go new file mode 100644 index 000000000..ec3a37a64 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/api/constraint.go @@ -0,0 +1,17 @@ +package api + +// Constraint is used to serialize a job placement constraint. +type Constraint struct { + LTarget string + RTarget string + Operand string +} + +// NewConstraint generates a new job placement constraint. +func NewConstraint(left, operand, right string) *Constraint { + return &Constraint{ + LTarget: left, + RTarget: right, + Operand: operand, + } +} diff --git a/vendor/github.com/hashicorp/nomad/api/deployments.go b/vendor/github.com/hashicorp/nomad/api/deployments.go new file mode 100644 index 000000000..0b996f73c --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/api/deployments.go @@ -0,0 +1,234 @@ +package api + +import ( + "sort" +) + +// Deployments is used to query the deployments endpoints. +type Deployments struct { + client *Client +} + +// Deployments returns a new handle on the deployments. +func (c *Client) Deployments() *Deployments { + return &Deployments{client: c} +} + +// List is used to dump all of the deployments. +func (d *Deployments) List(q *QueryOptions) ([]*Deployment, *QueryMeta, error) { + var resp []*Deployment + qm, err := d.client.query("/v1/deployments", &resp, q) + if err != nil { + return nil, nil, err + } + sort.Sort(DeploymentIndexSort(resp)) + return resp, qm, nil +} + +func (d *Deployments) PrefixList(prefix string) ([]*Deployment, *QueryMeta, error) { + return d.List(&QueryOptions{Prefix: prefix}) +} + +// Info is used to query a single deployment by its ID. +func (d *Deployments) Info(deploymentID string, q *QueryOptions) (*Deployment, *QueryMeta, error) { + var resp Deployment + qm, err := d.client.query("/v1/deployment/"+deploymentID, &resp, q) + if err != nil { + return nil, nil, err + } + return &resp, qm, nil +} + +// Allocations is used to retrieve a set of allocations that are part of the +// deployment +func (d *Deployments) Allocations(deploymentID string, q *QueryOptions) ([]*AllocationListStub, *QueryMeta, error) { + var resp []*AllocationListStub + qm, err := d.client.query("/v1/deployment/allocations/"+deploymentID, &resp, q) + if err != nil { + return nil, nil, err + } + sort.Sort(AllocIndexSort(resp)) + return resp, qm, nil +} + +// Fail is used to fail the given deployment. +func (d *Deployments) Fail(deploymentID string, q *WriteOptions) (*DeploymentUpdateResponse, *WriteMeta, error) { + var resp DeploymentUpdateResponse + req := &DeploymentFailRequest{ + DeploymentID: deploymentID, + } + wm, err := d.client.write("/v1/deployment/fail/"+deploymentID, req, &resp, q) + if err != nil { + return nil, nil, err + } + return &resp, wm, nil +} + +// Pause is used to pause or unpause the given deployment. +func (d *Deployments) Pause(deploymentID string, pause bool, q *WriteOptions) (*DeploymentUpdateResponse, *WriteMeta, error) { + var resp DeploymentUpdateResponse + req := &DeploymentPauseRequest{ + DeploymentID: deploymentID, + Pause: pause, + } + wm, err := d.client.write("/v1/deployment/pause/"+deploymentID, req, &resp, q) + if err != nil { + return nil, nil, err + } + return &resp, wm, nil +} + +// PromoteAll is used to promote all canaries in the given deployment +func (d *Deployments) PromoteAll(deploymentID string, q *WriteOptions) (*DeploymentUpdateResponse, *WriteMeta, error) { + var resp DeploymentUpdateResponse + req := &DeploymentPromoteRequest{ + DeploymentID: deploymentID, + All: true, + } + wm, err := d.client.write("/v1/deployment/promote/"+deploymentID, req, &resp, q) + if err != nil { + return nil, nil, err + } + return &resp, wm, nil +} + +// PromoteGroups is used to promote canaries in the passed groups in the given deployment +func (d *Deployments) PromoteGroups(deploymentID string, groups []string, q *WriteOptions) (*DeploymentUpdateResponse, *WriteMeta, error) { + var resp DeploymentUpdateResponse + req := &DeploymentPromoteRequest{ + DeploymentID: deploymentID, + Groups: groups, + } + wm, err := d.client.write("/v1/deployment/promote/"+deploymentID, req, &resp, q) + if err != nil { + return nil, nil, err + } + return &resp, wm, nil +} + +// SetAllocHealth is used to set allocation health for allocs that are part of +// the given deployment +func (d *Deployments) SetAllocHealth(deploymentID string, healthy, unhealthy []string, q *WriteOptions) (*DeploymentUpdateResponse, *WriteMeta, error) { + var resp DeploymentUpdateResponse + req := &DeploymentAllocHealthRequest{ + DeploymentID: deploymentID, + HealthyAllocationIDs: healthy, + UnhealthyAllocationIDs: unhealthy, + } + wm, err := d.client.write("/v1/deployment/allocation-health/"+deploymentID, req, &resp, q) + if err != nil { + return nil, nil, err + } + return &resp, wm, nil +} + +// Deployment is used to serialize an deployment. +type Deployment struct { + ID string + Namespace string + JobID string + JobVersion uint64 + JobModifyIndex uint64 + JobCreateIndex uint64 + TaskGroups map[string]*DeploymentState + Status string + StatusDescription string + CreateIndex uint64 + ModifyIndex uint64 +} + +// DeploymentState tracks the state of a deployment for a given task group. +type DeploymentState struct { + PlacedCanaries []string + AutoRevert bool + Promoted bool + DesiredCanaries int + DesiredTotal int + PlacedAllocs int + HealthyAllocs int + UnhealthyAllocs int +} + +// DeploymentIndexSort is a wrapper to sort deployments by CreateIndex. We +// reverse the test so that we get the highest index first. +type DeploymentIndexSort []*Deployment + +func (d DeploymentIndexSort) Len() int { + return len(d) +} + +func (d DeploymentIndexSort) Less(i, j int) bool { + return d[i].CreateIndex > d[j].CreateIndex +} + +func (d DeploymentIndexSort) Swap(i, j int) { + d[i], d[j] = d[j], d[i] +} + +// DeploymentUpdateResponse is used to respond to a deployment change. The +// response will include the modify index of the deployment as well as details +// of any triggered evaluation. +type DeploymentUpdateResponse struct { + EvalID string + EvalCreateIndex uint64 + DeploymentModifyIndex uint64 + RevertedJobVersion *uint64 + WriteMeta +} + +// DeploymentAllocHealthRequest is used to set the health of a set of +// allocations as part of a deployment. +type DeploymentAllocHealthRequest struct { + DeploymentID string + + // Marks these allocations as healthy, allow further allocations + // to be rolled. + HealthyAllocationIDs []string + + // Any unhealthy allocations fail the deployment + UnhealthyAllocationIDs []string + + WriteRequest +} + +// DeploymentPromoteRequest is used to promote task groups in a deployment +type DeploymentPromoteRequest struct { + DeploymentID string + + // All is to promote all task groups + All bool + + // Groups is used to set the promotion status per task group + Groups []string + + WriteRequest +} + +// DeploymentPauseRequest is used to pause a deployment +type DeploymentPauseRequest struct { + DeploymentID string + + // Pause sets the pause status + Pause bool + + WriteRequest +} + +// DeploymentSpecificRequest is used to make a request specific to a particular +// deployment +type DeploymentSpecificRequest struct { + DeploymentID string + QueryOptions +} + +// DeploymentFailRequest is used to fail a particular deployment +type DeploymentFailRequest struct { + DeploymentID string + WriteRequest +} + +// SingleDeploymentResponse is used to respond with a single deployment +type SingleDeploymentResponse struct { + Deployment *Deployment + QueryMeta +} diff --git a/vendor/github.com/hashicorp/nomad/api/evaluations.go b/vendor/github.com/hashicorp/nomad/api/evaluations.go new file mode 100644 index 000000000..40aee6975 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/api/evaluations.go @@ -0,0 +1,97 @@ +package api + +import ( + "sort" + "time" +) + +// Evaluations is used to query the evaluation endpoints. +type Evaluations struct { + client *Client +} + +// Evaluations returns a new handle on the evaluations. +func (c *Client) Evaluations() *Evaluations { + return &Evaluations{client: c} +} + +// List is used to dump all of the evaluations. +func (e *Evaluations) List(q *QueryOptions) ([]*Evaluation, *QueryMeta, error) { + var resp []*Evaluation + qm, err := e.client.query("/v1/evaluations", &resp, q) + if err != nil { + return nil, nil, err + } + sort.Sort(EvalIndexSort(resp)) + return resp, qm, nil +} + +func (e *Evaluations) PrefixList(prefix string) ([]*Evaluation, *QueryMeta, error) { + return e.List(&QueryOptions{Prefix: prefix}) +} + +// Info is used to query a single evaluation by its ID. +func (e *Evaluations) Info(evalID string, q *QueryOptions) (*Evaluation, *QueryMeta, error) { + var resp Evaluation + qm, err := e.client.query("/v1/evaluation/"+evalID, &resp, q) + if err != nil { + return nil, nil, err + } + return &resp, qm, nil +} + +// Allocations is used to retrieve a set of allocations given +// an evaluation ID. +func (e *Evaluations) Allocations(evalID string, q *QueryOptions) ([]*AllocationListStub, *QueryMeta, error) { + var resp []*AllocationListStub + qm, err := e.client.query("/v1/evaluation/"+evalID+"/allocations", &resp, q) + if err != nil { + return nil, nil, err + } + sort.Sort(AllocIndexSort(resp)) + return resp, qm, nil +} + +// Evaluation is used to serialize an evaluation. +type Evaluation struct { + ID string + Priority int + Type string + TriggeredBy string + Namespace string + JobID string + JobModifyIndex uint64 + NodeID string + NodeModifyIndex uint64 + DeploymentID string + Status string + StatusDescription string + Wait time.Duration + NextEval string + PreviousEval string + BlockedEval string + FailedTGAllocs map[string]*AllocationMetric + ClassEligibility map[string]bool + EscapedComputedClass bool + AnnotatePlan bool + QueuedAllocations map[string]int + SnapshotIndex uint64 + CreateIndex uint64 + ModifyIndex uint64 +} + +// EvalIndexSort is a wrapper to sort evaluations by CreateIndex. +// We reverse the test so that we get the highest index first. +type EvalIndexSort []*Evaluation + +func (e EvalIndexSort) Len() int { + return len(e) +} + +func (e EvalIndexSort) Less(i, j int) bool { + return e[i].CreateIndex > e[j].CreateIndex +} + +func (e EvalIndexSort) Swap(i, j int) { + e[i], e[j] = e[j], e[i] +} diff --git a/vendor/github.com/hashicorp/nomad/api/fs.go b/vendor/github.com/hashicorp/nomad/api/fs.go new file mode 100644 index 000000000..c412db541 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/api/fs.go @@ -0,0 +1,398 @@ +package api + +import ( + "encoding/json" + "fmt" + "io" + "strconv" + "sync" + "time" +) + +const ( + // OriginStart and OriginEnd are the available parameters for the origin + // argument when streaming a file. They respectively offset from the start + // and end of a file. + OriginStart = "start" + OriginEnd = "end" +) + +// AllocFileInfo holds information about a file inside the AllocDir +type AllocFileInfo struct { + Name string + IsDir bool + Size int64 + FileMode string + ModTime time.Time +} + +// StreamFrame is used to frame data of a file when streaming +type StreamFrame struct { + Offset int64 `json:",omitempty"` + Data []byte `json:",omitempty"` + File string `json:",omitempty"` + FileEvent string `json:",omitempty"` +} + +// IsHeartbeat returns if the frame is a heartbeat frame +func (s *StreamFrame) IsHeartbeat() bool { + return len(s.Data) == 0 && s.FileEvent == "" && s.File == "" && s.Offset == 0 +} + +// AllocFS is used to introspect an allocation directory on a Nomad client +type AllocFS struct { + client *Client +} + +// AllocFS returns an handle to the AllocFS endpoints +func (c *Client) AllocFS() *AllocFS { + return &AllocFS{client: c} +} + +// List is used to list the files at a given path of an allocation directory +func (a *AllocFS) List(alloc *Allocation, path string, q *QueryOptions) ([]*AllocFileInfo, *QueryMeta, error) { + nodeClient, err := a.client.GetNodeClient(alloc.NodeID, q) + if err != nil { + return nil, nil, err + } + + if q == nil { + q = &QueryOptions{} + } + if q.Params == nil { + q.Params = make(map[string]string) + } + + q.Params["path"] = path + + var resp []*AllocFileInfo + qm, err := nodeClient.query(fmt.Sprintf("/v1/client/fs/ls/%s", alloc.ID), &resp, q) + if err != nil { + return nil, nil, err + } + + return resp, qm, nil +} + +// Stat is used to stat a file at a given path of an allocation directory +func (a *AllocFS) Stat(alloc *Allocation, path string, q *QueryOptions) (*AllocFileInfo, *QueryMeta, error) { + nodeClient, err := a.client.GetNodeClient(alloc.NodeID, q) + if err != nil { + return nil, nil, err + } + + if q == nil { + q = &QueryOptions{} + } + if q.Params == nil { + q.Params = make(map[string]string) + } + + q.Params["path"] = path + + var resp AllocFileInfo + qm, err := nodeClient.query(fmt.Sprintf("/v1/client/fs/stat/%s", alloc.ID), &resp, q) + if err != nil { + return nil, nil, err + } + return &resp, qm, nil +} + +// ReadAt is used to read bytes at a given offset until limit at the given path +// in an allocation directory. If limit is <= 0, there is no limit. +func (a *AllocFS) ReadAt(alloc *Allocation, path string, offset int64, limit int64, q *QueryOptions) (io.ReadCloser, error) { + nodeClient, err := a.client.GetNodeClient(alloc.NodeID, q) + if err != nil { + return nil, err + } + + if q == nil { + q = &QueryOptions{} + } + if q.Params == nil { + q.Params = make(map[string]string) + } + + q.Params["path"] = path + q.Params["offset"] = strconv.FormatInt(offset, 10) + q.Params["limit"] = strconv.FormatInt(limit, 10) + + r, err := nodeClient.rawQuery(fmt.Sprintf("/v1/client/fs/readat/%s", alloc.ID), q) + if err != nil { + return nil, err + } + return r, nil +} + +// Cat is used to read contents of a file at the given path in an allocation +// directory +func (a *AllocFS) Cat(alloc *Allocation, path string, q *QueryOptions) (io.ReadCloser, error) { + nodeClient, err := a.client.GetNodeClient(alloc.NodeID, q) + if err != nil { + return nil, err + } + + if q == nil { + q = &QueryOptions{} + } + if q.Params == nil { + q.Params = make(map[string]string) + } + + q.Params["path"] = path + + r, err := nodeClient.rawQuery(fmt.Sprintf("/v1/client/fs/cat/%s", alloc.ID), q) + if err != nil { + return nil, err + } + return r, nil +} + +// Stream streams the content of a file blocking on EOF. +// The parameters are: +// * path: path to file to stream. +// * offset: The offset to start streaming data at. +// * origin: Either "start" or "end" and defines from where the offset is applied. +// * cancel: A channel that when closed, streaming will end. +// +// The return value is a channel that will emit StreamFrames as they are read. +func (a *AllocFS) Stream(alloc *Allocation, path, origin string, offset int64, + cancel <-chan struct{}, q *QueryOptions) (<-chan *StreamFrame, <-chan error) { + + errCh := make(chan error, 1) + nodeClient, err := a.client.GetNodeClient(alloc.NodeID, q) + if err != nil { + errCh <- err + return nil, errCh + } + + if q == nil { + q = &QueryOptions{} + } + if q.Params == nil { + q.Params = make(map[string]string) + } + + q.Params["path"] = path + q.Params["offset"] = strconv.FormatInt(offset, 10) + q.Params["origin"] = origin + + r, err := nodeClient.rawQuery(fmt.Sprintf("/v1/client/fs/stream/%s", alloc.ID), q) + if err != nil { + errCh <- err + return nil, errCh + } + + // Create the output channel + frames := make(chan *StreamFrame, 10) + + go func() { + // Close the body + defer r.Close() + + // Create a decoder + dec := json.NewDecoder(r) + + for { + // Check if we have been cancelled + select { + case <-cancel: + return + default: + } + + // Decode the next frame + var frame StreamFrame + if err := dec.Decode(&frame); err != nil { + errCh <- err + close(frames) + return + } + + // Discard heartbeat frames + if frame.IsHeartbeat() { + continue + } + + frames <- &frame + } + }() + + return frames, errCh +} + +// Logs streams the content of a tasks logs blocking on EOF. +// The parameters are: +// * allocation: the allocation to stream from. +// * follow: Whether the logs should be followed. +// * task: the tasks name to stream logs for. +// * logType: Either "stdout" or "stderr" +// * origin: Either "start" or "end" and defines from where the offset is applied. +// * offset: The offset to start streaming data at. +// * cancel: A channel that when closed, streaming will end. +// +// The return value is a channel that will emit StreamFrames as they are read. +func (a *AllocFS) Logs(alloc *Allocation, follow bool, task, logType, origin string, + offset int64, cancel <-chan struct{}, q *QueryOptions) (<-chan *StreamFrame, <-chan error) { + + errCh := make(chan error, 1) + nodeClient, err := a.client.GetNodeClient(alloc.NodeID, q) + if err != nil { + errCh <- err + return nil, errCh + } + + if q == nil { + q = &QueryOptions{} + } + if q.Params == nil { + q.Params = make(map[string]string) + } + + q.Params["follow"] = strconv.FormatBool(follow) + q.Params["task"] = task + q.Params["type"] = logType + q.Params["origin"] = origin + q.Params["offset"] = strconv.FormatInt(offset, 10) + + r, err := nodeClient.rawQuery(fmt.Sprintf("/v1/client/fs/logs/%s", alloc.ID), q) + if err != nil { + errCh <- err + return nil, errCh + } + + // Create the output channel + frames := make(chan *StreamFrame, 10) + + go func() { + // Close the body + defer r.Close() + + // Create a decoder + dec := json.NewDecoder(r) + + for { + // Check if we have been cancelled + select { + case <-cancel: + return + default: + } + + // Decode the next frame + var frame StreamFrame + if err := dec.Decode(&frame); err != nil { + errCh <- err + close(frames) + return + } + + // Discard heartbeat frames + if frame.IsHeartbeat() { + continue + } + + frames <- &frame + } + }() + + return frames, errCh +} + +// FrameReader is used to convert a stream of frames into a read closer. +type FrameReader struct { + frames <-chan *StreamFrame + errCh <-chan error + cancelCh chan struct{} + + closedLock sync.Mutex + closed bool + + unblockTime time.Duration + + frame *StreamFrame + frameOffset int + + byteOffset int +} + +// NewFrameReader takes a channel of frames and returns a FrameReader which +// implements io.ReadCloser +func NewFrameReader(frames <-chan *StreamFrame, errCh <-chan error, cancelCh chan struct{}) *FrameReader { + return &FrameReader{ + frames: frames, + errCh: errCh, + cancelCh: cancelCh, + } +} + +// SetUnblockTime sets the time to unblock and return zero bytes read. If the +// duration is unset or is zero or less, the read will block til data is read. +func (f *FrameReader) SetUnblockTime(d time.Duration) { + f.unblockTime = d +} + +// Offset returns the offset into the stream. +func (f *FrameReader) Offset() int { + return f.byteOffset +} + +// Read reads the data of the incoming frames into the bytes buffer. Returns EOF +// when there are no more frames. +func (f *FrameReader) Read(p []byte) (n int, err error) { + f.closedLock.Lock() + closed := f.closed + f.closedLock.Unlock() + if closed { + return 0, io.EOF + } + + if f.frame == nil { + var unblock <-chan time.Time + if f.unblockTime.Nanoseconds() > 0 { + unblock = time.After(f.unblockTime) + } + + select { + case frame, ok := <-f.frames: + if !ok { + return 0, io.EOF + } + f.frame = frame + + // Store the total offset into the file + f.byteOffset = int(f.frame.Offset) + case <-unblock: + return 0, nil + case err := <-f.errCh: + return 0, err + case <-f.cancelCh: + return 0, io.EOF + } + } + + // Copy the data out of the frame and update our offset + n = copy(p, f.frame.Data[f.frameOffset:]) + f.frameOffset += n + + // Clear the frame and its offset once we have read everything + if len(f.frame.Data) == f.frameOffset { + f.frame = nil + f.frameOffset = 0 + } + + return n, nil +} + +// Close cancels the stream of frames +func (f *FrameReader) Close() error { + f.closedLock.Lock() + defer f.closedLock.Unlock() + if f.closed { + return nil + } + + close(f.cancelCh) + f.closed = true + return nil +} diff --git a/vendor/github.com/hashicorp/nomad/api/jobs.go b/vendor/github.com/hashicorp/nomad/api/jobs.go new file mode 100644 index 000000000..4ec71af4a --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/api/jobs.go @@ -0,0 +1,968 @@ +package api + +import ( + "fmt" + "net/url" + "sort" + "strconv" + "time" + + "github.com/gorhill/cronexpr" + "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/nomad/structs" +) + +const ( + // JobTypeService indicates a long-running processes + JobTypeService = "service" + + // JobTypeBatch indicates a short-lived process + JobTypeBatch = "batch" + + // PeriodicSpecCron is used for a cron spec. + PeriodicSpecCron = "cron" + + // DefaultNamespace is the default namespace. + DefaultNamespace = "default" +) + +const ( + // RegisterEnforceIndexErrPrefix is the prefix to use in errors caused by + // enforcing the job modify index during registers. + RegisterEnforceIndexErrPrefix = "Enforcing job modify index" +) + +// Jobs is used to access the job-specific endpoints. +type Jobs struct { + client *Client +} + +// Jobs returns a handle on the jobs endpoints. +func (c *Client) Jobs() *Jobs { + return &Jobs{client: c} +} + +func (j *Jobs) Validate(job *Job, q *WriteOptions) (*JobValidateResponse, *WriteMeta, error) { + var resp JobValidateResponse + req := &JobValidateRequest{Job: job} + if q != nil { + req.WriteRequest = WriteRequest{Region: q.Region} + } + wm, err := j.client.write("/v1/validate/job", req, &resp, q) + return &resp, wm, err +} + +// RegisterOptions is used to pass through job registration parameters +type RegisterOptions struct { + EnforceIndex bool + ModifyIndex uint64 + PolicyOverride bool +} + +// Register is used to register a new job. It returns the ID +// of the evaluation, along with any errors encountered. +func (j *Jobs) Register(job *Job, q *WriteOptions) (*JobRegisterResponse, *WriteMeta, error) { + return j.RegisterOpts(job, nil, q) +} + +// EnforceRegister is used to register a job enforcing its job modify index. +func (j *Jobs) EnforceRegister(job *Job, modifyIndex uint64, q *WriteOptions) (*JobRegisterResponse, *WriteMeta, error) { + opts := RegisterOptions{EnforceIndex: true, ModifyIndex: modifyIndex} + return j.RegisterOpts(job, &opts, q) +} + +// Register is used to register a new job. It returns the ID +// of the evaluation, along with any errors encountered. +func (j *Jobs) RegisterOpts(job *Job, opts *RegisterOptions, q *WriteOptions) (*JobRegisterResponse, *WriteMeta, error) { + // Format the request + req := &RegisterJobRequest{ + Job: job, + } + if opts != nil { + if opts.EnforceIndex { + req.EnforceIndex = true + req.JobModifyIndex = opts.ModifyIndex + } + if opts.PolicyOverride { + req.PolicyOverride = true + } + } + + var resp JobRegisterResponse + wm, err := j.client.write("/v1/jobs", req, &resp, q) + if err != nil { + return nil, nil, err + } + return &resp, wm, nil +} + +// List is used to list all of the existing jobs. +func (j *Jobs) List(q *QueryOptions) ([]*JobListStub, *QueryMeta, error) { + var resp []*JobListStub + qm, err := j.client.query("/v1/jobs", &resp, q) + if err != nil { + return nil, qm, err + } + sort.Sort(JobIDSort(resp)) + return resp, qm, nil +} + +// PrefixList is used to list all existing jobs that match the prefix. +func (j *Jobs) PrefixList(prefix string) ([]*JobListStub, *QueryMeta, error) { + return j.List(&QueryOptions{Prefix: prefix}) +} + +// Info is used to retrieve information about a particular +// job given its unique ID. +func (j *Jobs) Info(jobID string, q *QueryOptions) (*Job, *QueryMeta, error) { + var resp Job + qm, err := j.client.query("/v1/job/"+jobID, &resp, q) + if err != nil { + return nil, nil, err + } + return &resp, qm, nil +} + +// Versions is used to retrieve all versions of a particular job given its +// unique ID. +func (j *Jobs) Versions(jobID string, diffs bool, q *QueryOptions) ([]*Job, []*JobDiff, *QueryMeta, error) { + var resp JobVersionsResponse + qm, err := j.client.query(fmt.Sprintf("/v1/job/%s/versions?diffs=%v", jobID, diffs), &resp, q) + if err != nil { + return nil, nil, nil, err + } + return resp.Versions, resp.Diffs, qm, nil +} + +// Allocations is used to return the allocs for a given job ID. +func (j *Jobs) Allocations(jobID string, allAllocs bool, q *QueryOptions) ([]*AllocationListStub, *QueryMeta, error) { + var resp []*AllocationListStub + u, err := url.Parse("/v1/job/" + jobID + "/allocations") + if err != nil { + return nil, nil, err + } + + v := u.Query() + v.Add("all", strconv.FormatBool(allAllocs)) + u.RawQuery = v.Encode() + + qm, err := j.client.query(u.String(), &resp, q) + if err != nil { + return nil, nil, err + } + sort.Sort(AllocIndexSort(resp)) + return resp, qm, nil +} + +// Deployments is used to query the deployments associated with the given job +// ID. +func (j *Jobs) Deployments(jobID string, q *QueryOptions) ([]*Deployment, *QueryMeta, error) { + var resp []*Deployment + qm, err := j.client.query("/v1/job/"+jobID+"/deployments", &resp, q) + if err != nil { + return nil, nil, err + } + sort.Sort(DeploymentIndexSort(resp)) + return resp, qm, nil +} + +// LatestDeployment is used to query for the latest deployment associated with +// the given job ID. +func (j *Jobs) LatestDeployment(jobID string, q *QueryOptions) (*Deployment, *QueryMeta, error) { + var resp *Deployment + qm, err := j.client.query("/v1/job/"+jobID+"/deployment", &resp, q) + if err != nil { + return nil, nil, err + } + return resp, qm, nil +} + +// Evaluations is used to query the evaluations associated with the given job +// ID. +func (j *Jobs) Evaluations(jobID string, q *QueryOptions) ([]*Evaluation, *QueryMeta, error) { + var resp []*Evaluation + qm, err := j.client.query("/v1/job/"+jobID+"/evaluations", &resp, q) + if err != nil { + return nil, nil, err + } + sort.Sort(EvalIndexSort(resp)) + return resp, qm, nil +} + +// Deregister is used to remove an existing job. If purge is set to true, the job +// is deregistered and purged from the system versus still being queryable and +// eventually GC'ed from the system. Most callers should not specify purge. +func (j *Jobs) Deregister(jobID string, purge bool, q *WriteOptions) (string, *WriteMeta, error) { + var resp JobDeregisterResponse + wm, err := j.client.delete(fmt.Sprintf("/v1/job/%v?purge=%t", jobID, purge), &resp, q) + if err != nil { + return "", nil, err + } + return resp.EvalID, wm, nil +} + +// ForceEvaluate is used to force-evaluate an existing job. +func (j *Jobs) ForceEvaluate(jobID string, q *WriteOptions) (string, *WriteMeta, error) { + var resp JobRegisterResponse + wm, err := j.client.write("/v1/job/"+jobID+"/evaluate", nil, &resp, q) + if err != nil { + return "", nil, err + } + return resp.EvalID, wm, nil +} + +// PeriodicForce spawns a new instance of the periodic job and returns the eval ID +func (j *Jobs) PeriodicForce(jobID string, q *WriteOptions) (string, *WriteMeta, error) { + var resp periodicForceResponse + wm, err := j.client.write("/v1/job/"+jobID+"/periodic/force", nil, &resp, q) + if err != nil { + return "", nil, err + } + return resp.EvalID, wm, nil +} + +// PlanOptions is used to pass through job planning parameters +type PlanOptions struct { + Diff bool + PolicyOverride bool +} + +func (j *Jobs) Plan(job *Job, diff bool, q *WriteOptions) (*JobPlanResponse, *WriteMeta, error) { + opts := PlanOptions{Diff: diff} + return j.PlanOpts(job, &opts, q) +} + +func (j *Jobs) PlanOpts(job *Job, opts *PlanOptions, q *WriteOptions) (*JobPlanResponse, *WriteMeta, error) { + if job == nil { + return nil, nil, fmt.Errorf("must pass non-nil job") + } + + // Setup the request + req := &JobPlanRequest{ + Job: job, + } + if opts != nil { + req.Diff = opts.Diff + req.PolicyOverride = opts.PolicyOverride + } + + var resp JobPlanResponse + wm, err := j.client.write("/v1/job/"+*job.ID+"/plan", req, &resp, q) + if err != nil { + return nil, nil, err + } + return &resp, wm, nil +} + +func (j *Jobs) Summary(jobID string, q *QueryOptions) (*JobSummary, *QueryMeta, error) { + var resp JobSummary + qm, err := j.client.query("/v1/job/"+jobID+"/summary", &resp, q) + if err != nil { + return nil, nil, err + } + return &resp, qm, nil +} + +func (j *Jobs) Dispatch(jobID string, meta map[string]string, + payload []byte, q *WriteOptions) (*JobDispatchResponse, *WriteMeta, error) { + var resp JobDispatchResponse + req := &JobDispatchRequest{ + JobID: jobID, + Meta: meta, + Payload: payload, + } + wm, err := j.client.write("/v1/job/"+jobID+"/dispatch", req, &resp, q) + if err != nil { + return nil, nil, err + } + return &resp, wm, nil +} + +// Revert is used to revert the given job to the passed version. If +// enforceVersion is set, the job is only reverted if the current version is at +// the passed version. +func (j *Jobs) Revert(jobID string, version uint64, enforcePriorVersion *uint64, + q *WriteOptions) (*JobRegisterResponse, *WriteMeta, error) { + + var resp JobRegisterResponse + req := &JobRevertRequest{ + JobID: jobID, + JobVersion: version, + EnforcePriorVersion: enforcePriorVersion, + } + wm, err := j.client.write("/v1/job/"+jobID+"/revert", req, &resp, q) + if err != nil { + return nil, nil, err + } + return &resp, wm, nil +} + +// Stable is used to mark a job version's stability. +func (j *Jobs) Stable(jobID string, version uint64, stable bool, + q *WriteOptions) (*JobStabilityResponse, *WriteMeta, error) { + + var resp JobStabilityResponse + req := &JobStabilityRequest{ + JobID: jobID, + JobVersion: version, + Stable: stable, + } + wm, err := j.client.write("/v1/job/"+jobID+"/stable", req, &resp, q) + if err != nil { + return nil, nil, err + } + return &resp, wm, nil +} + +// periodicForceResponse is used to deserialize a force response +type periodicForceResponse struct { + EvalID string +} + +// UpdateStrategy defines a task groups update strategy. +type UpdateStrategy struct { + Stagger *time.Duration `mapstructure:"stagger"` + MaxParallel *int `mapstructure:"max_parallel"` + HealthCheck *string `mapstructure:"health_check"` + MinHealthyTime *time.Duration `mapstructure:"min_healthy_time"` + HealthyDeadline *time.Duration `mapstructure:"healthy_deadline"` + AutoRevert *bool `mapstructure:"auto_revert"` + Canary *int `mapstructure:"canary"` +} + +func (u *UpdateStrategy) Copy() *UpdateStrategy { + if u == nil { + return nil + } + + copy := new(UpdateStrategy) + + if u.Stagger != nil { + copy.Stagger = helper.TimeToPtr(*u.Stagger) + } + + if u.MaxParallel != nil { + copy.MaxParallel = helper.IntToPtr(*u.MaxParallel) + } + + if u.HealthCheck != nil { + copy.HealthCheck = helper.StringToPtr(*u.HealthCheck) + } + + if u.MinHealthyTime != nil { + copy.MinHealthyTime = helper.TimeToPtr(*u.MinHealthyTime) + } + + if u.HealthyDeadline != nil { + copy.HealthyDeadline = helper.TimeToPtr(*u.HealthyDeadline) + } + + if u.AutoRevert != nil { + copy.AutoRevert = helper.BoolToPtr(*u.AutoRevert) + } + + if u.Canary != nil { + copy.Canary = helper.IntToPtr(*u.Canary) + } + + return copy +} + +func (u *UpdateStrategy) Merge(o *UpdateStrategy) { + if o == nil { + return + } + + if o.Stagger != nil { + u.Stagger = helper.TimeToPtr(*o.Stagger) + } + + if o.MaxParallel != nil { + u.MaxParallel = helper.IntToPtr(*o.MaxParallel) + } + + if o.HealthCheck != nil { + u.HealthCheck = helper.StringToPtr(*o.HealthCheck) + } + + if o.MinHealthyTime != nil { + u.MinHealthyTime = helper.TimeToPtr(*o.MinHealthyTime) + } + + if o.HealthyDeadline != nil { + u.HealthyDeadline = helper.TimeToPtr(*o.HealthyDeadline) + } + + if o.AutoRevert != nil { + u.AutoRevert = helper.BoolToPtr(*o.AutoRevert) + } + + if o.Canary != nil { + u.Canary = helper.IntToPtr(*o.Canary) + } +} + +func (u *UpdateStrategy) Canonicalize() { + d := structs.DefaultUpdateStrategy + + if u.MaxParallel == nil { + u.MaxParallel = helper.IntToPtr(d.MaxParallel) + } + + if u.Stagger == nil { + u.Stagger = helper.TimeToPtr(d.Stagger) + } + + if u.HealthCheck == nil { + u.HealthCheck = helper.StringToPtr(d.HealthCheck) + } + + if u.HealthyDeadline == nil { + u.HealthyDeadline = helper.TimeToPtr(d.HealthyDeadline) + } + + if u.MinHealthyTime == nil { + u.MinHealthyTime = helper.TimeToPtr(d.MinHealthyTime) + } + + if u.AutoRevert == nil { + u.AutoRevert = helper.BoolToPtr(d.AutoRevert) + } + + if u.Canary == nil { + u.Canary = helper.IntToPtr(d.Canary) + } +} + +// Empty returns whether the UpdateStrategy is empty or has user defined values. +func (u *UpdateStrategy) Empty() bool { + if u == nil { + return true + } + + if u.Stagger != nil && *u.Stagger != 0 { + return false + } + + if u.MaxParallel != nil && *u.MaxParallel != 0 { + return false + } + + if u.HealthCheck != nil && *u.HealthCheck != "" { + return false + } + + if u.MinHealthyTime != nil && *u.MinHealthyTime != 0 { + return false + } + + if u.HealthyDeadline != nil && *u.HealthyDeadline != 0 { + return false + } + + if u.AutoRevert != nil && *u.AutoRevert { + return false + } + + if u.Canary != nil && *u.Canary != 0 { + return false + } + + return true +} + +// PeriodicConfig is for serializing periodic config for a job. +type PeriodicConfig struct { + Enabled *bool + Spec *string + SpecType *string + ProhibitOverlap *bool `mapstructure:"prohibit_overlap"` + TimeZone *string `mapstructure:"time_zone"` +} + +func (p *PeriodicConfig) Canonicalize() { + if p.Enabled == nil { + p.Enabled = helper.BoolToPtr(true) + } + if p.Spec == nil { + p.Spec = helper.StringToPtr("") + } + if p.SpecType == nil { + p.SpecType = helper.StringToPtr(PeriodicSpecCron) + } + if p.ProhibitOverlap == nil { + p.ProhibitOverlap = helper.BoolToPtr(false) + } + if p.TimeZone == nil || *p.TimeZone == "" { + p.TimeZone = helper.StringToPtr("UTC") + } +} + +// Next returns the closest time instant matching the spec that is after the +// passed time. If no matching instance exists, the zero value of time.Time is +// returned. The `time.Location` of the returned value matches that of the +// passed time. +func (p *PeriodicConfig) Next(fromTime time.Time) time.Time { + if *p.SpecType == PeriodicSpecCron { + if e, err := cronexpr.Parse(*p.Spec); err == nil { + return e.Next(fromTime) + } + } + + return time.Time{} +} + +func (p *PeriodicConfig) GetLocation() (*time.Location, error) { + if p.TimeZone == nil || *p.TimeZone == "" { + return time.UTC, nil + } + + return time.LoadLocation(*p.TimeZone) +} + +// ParameterizedJobConfig is used to configure the parameterized job. +type ParameterizedJobConfig struct { + Payload string + MetaRequired []string `mapstructure:"meta_required"` + MetaOptional []string `mapstructure:"meta_optional"` +} + +// Job is used to serialize a job. +type Job struct { + Stop *bool + Region *string + Namespace *string + ID *string + ParentID *string + Name *string + Type *string + Priority *int + AllAtOnce *bool `mapstructure:"all_at_once"` + Datacenters []string + Constraints []*Constraint + TaskGroups []*TaskGroup + Update *UpdateStrategy + Periodic *PeriodicConfig + ParameterizedJob *ParameterizedJobConfig + Payload []byte + Meta map[string]string + VaultToken *string `mapstructure:"vault_token"` + Status *string + StatusDescription *string + Stable *bool + Version *uint64 + SubmitTime *int64 + CreateIndex *uint64 + ModifyIndex *uint64 + JobModifyIndex *uint64 +} + +// IsPeriodic returns whether a job is periodic. +func (j *Job) IsPeriodic() bool { + return j.Periodic != nil +} + +// IsParameterized returns whether a job is parameterized job. +func (j *Job) IsParameterized() bool { + return j.ParameterizedJob != nil +} + +func (j *Job) Canonicalize() { + if j.ID == nil { + j.ID = helper.StringToPtr("") + } + if j.Name == nil { + j.Name = helper.StringToPtr(*j.ID) + } + if j.ParentID == nil { + j.ParentID = helper.StringToPtr("") + } + if j.Namespace == nil { + j.Namespace = helper.StringToPtr(DefaultNamespace) + } + if j.Priority == nil { + j.Priority = helper.IntToPtr(50) + } + if j.Stop == nil { + j.Stop = helper.BoolToPtr(false) + } + if j.Region == nil { + j.Region = helper.StringToPtr("global") + } + if j.Namespace == nil { + j.Namespace = helper.StringToPtr("default") + } + if j.Type == nil { + j.Type = helper.StringToPtr("service") + } + if j.AllAtOnce == nil { + j.AllAtOnce = helper.BoolToPtr(false) + } + if j.VaultToken == nil { + j.VaultToken = helper.StringToPtr("") + } + if j.Status == nil { + j.Status = helper.StringToPtr("") + } + if j.StatusDescription == nil { + j.StatusDescription = helper.StringToPtr("") + } + if j.Stable == nil { + j.Stable = helper.BoolToPtr(false) + } + if j.Version == nil { + j.Version = helper.Uint64ToPtr(0) + } + if j.CreateIndex == nil { + j.CreateIndex = helper.Uint64ToPtr(0) + } + if j.ModifyIndex == nil { + j.ModifyIndex = helper.Uint64ToPtr(0) + } + if j.JobModifyIndex == nil { + j.JobModifyIndex = helper.Uint64ToPtr(0) + } + if j.Periodic != nil { + j.Periodic.Canonicalize() + } + if j.Update != nil { + j.Update.Canonicalize() + } + + for _, tg := range j.TaskGroups { + tg.Canonicalize(j) + } +} + +// JobSummary summarizes the state of the allocations of a job +type JobSummary struct { + JobID string + Namespace string + Summary map[string]TaskGroupSummary + Children *JobChildrenSummary + + // Raft Indexes + CreateIndex uint64 + ModifyIndex uint64 +} + +// JobChildrenSummary contains the summary of children job status +type JobChildrenSummary struct { + Pending int64 + Running int64 + Dead int64 +} + +func (jc *JobChildrenSummary) Sum() int { + if jc == nil { + return 0 + } + + return int(jc.Pending + jc.Running + jc.Dead) +} + +// TaskGroup summarizes the state of all the allocations of a particular +// TaskGroup +type TaskGroupSummary struct { + Queued int + Complete int + Failed int + Running int + Starting int + Lost int +} + +// JobListStub is used to return a subset of information about +// jobs during list operations. +type JobListStub struct { + ID string + ParentID string + Name string + Type string + Priority int + Periodic bool + ParameterizedJob bool + Stop bool + Status string + StatusDescription string + JobSummary *JobSummary + CreateIndex uint64 + ModifyIndex uint64 + JobModifyIndex uint64 + SubmitTime int64 +} + +// JobIDSort is used to sort jobs by their job ID's. +type JobIDSort []*JobListStub + +func (j JobIDSort) Len() int { + return len(j) +} + +func (j JobIDSort) Less(a, b int) bool { + return j[a].ID < j[b].ID +} + +func (j JobIDSort) Swap(a, b int) { + j[a], j[b] = j[b], j[a] +} + +// NewServiceJob creates and returns a new service-style job +// for long-lived processes using the provided name, ID, and +// relative job priority. +func NewServiceJob(id, name, region string, pri int) *Job { + return newJob(id, name, region, JobTypeService, pri) +} + +// NewBatchJob creates and returns a new batch-style job for +// short-lived processes using the provided name and ID along +// with the relative job priority. +func NewBatchJob(id, name, region string, pri int) *Job { + return newJob(id, name, region, JobTypeBatch, pri) +} + +// newJob is used to create a new Job struct. +func newJob(id, name, region, typ string, pri int) *Job { + return &Job{ + Region: ®ion, + ID: &id, + Name: &name, + Type: &typ, + Priority: &pri, + } +} + +// SetMeta is used to set arbitrary k/v pairs of metadata on a job. +func (j *Job) SetMeta(key, val string) *Job { + if j.Meta == nil { + j.Meta = make(map[string]string) + } + j.Meta[key] = val + return j +} + +// AddDatacenter is used to add a datacenter to a job. +func (j *Job) AddDatacenter(dc string) *Job { + j.Datacenters = append(j.Datacenters, dc) + return j +} + +// Constrain is used to add a constraint to a job. +func (j *Job) Constrain(c *Constraint) *Job { + j.Constraints = append(j.Constraints, c) + return j +} + +// AddTaskGroup adds a task group to an existing job. +func (j *Job) AddTaskGroup(grp *TaskGroup) *Job { + j.TaskGroups = append(j.TaskGroups, grp) + return j +} + +// AddPeriodicConfig adds a periodic config to an existing job. +func (j *Job) AddPeriodicConfig(cfg *PeriodicConfig) *Job { + j.Periodic = cfg + return j +} + +type WriteRequest struct { + // The target region for this write + Region string + + // Namespace is the target namespace for this write + Namespace string + + // SecretID is the secret ID of an ACL token + SecretID string +} + +// JobValidateRequest is used to validate a job +type JobValidateRequest struct { + Job *Job + WriteRequest +} + +// JobValidateResponse is the response from validate request +type JobValidateResponse struct { + // DriverConfigValidated indicates whether the agent validated the driver + // config + DriverConfigValidated bool + + // ValidationErrors is a list of validation errors + ValidationErrors []string + + // Error is a string version of any error that may have occurred + Error string + + // Warnings contains any warnings about the given job. These may include + // deprecation warnings. + Warnings string +} + +// JobRevertRequest is used to revert a job to a prior version. +type JobRevertRequest struct { + // JobID is the ID of the job being reverted + JobID string + + // JobVersion the version to revert to. + JobVersion uint64 + + // EnforcePriorVersion if set will enforce that the job is at the given + // version before reverting. + EnforcePriorVersion *uint64 + + WriteRequest +} + +// JobUpdateRequest is used to update a job +type JobRegisterRequest struct { + Job *Job + // If EnforceIndex is set then the job will only be registered if the passed + // JobModifyIndex matches the current Jobs index. If the index is zero, the + // register only occurs if the job is new. + EnforceIndex bool + JobModifyIndex uint64 + PolicyOverride bool + + WriteRequest +} + +// RegisterJobRequest is used to serialize a job registration +type RegisterJobRequest struct { + Job *Job + EnforceIndex bool `json:",omitempty"` + JobModifyIndex uint64 `json:",omitempty"` + PolicyOverride bool `json:",omitempty"` +} + +// JobRegisterResponse is used to respond to a job registration +type JobRegisterResponse struct { + EvalID string + EvalCreateIndex uint64 + JobModifyIndex uint64 + + // Warnings contains any warnings about the given job. These may include + // deprecation warnings. + Warnings string + + QueryMeta +} + +// JobDeregisterResponse is used to respond to a job deregistration +type JobDeregisterResponse struct { + EvalID string + EvalCreateIndex uint64 + JobModifyIndex uint64 + QueryMeta +} + +type JobPlanRequest struct { + Job *Job + Diff bool + PolicyOverride bool + WriteRequest +} + +type JobPlanResponse struct { + JobModifyIndex uint64 + CreatedEvals []*Evaluation + Diff *JobDiff + Annotations *PlanAnnotations + FailedTGAllocs map[string]*AllocationMetric + NextPeriodicLaunch time.Time + + // Warnings contains any warnings about the given job. These may include + // deprecation warnings. + Warnings string +} + +type JobDiff struct { + Type string + ID string + Fields []*FieldDiff + Objects []*ObjectDiff + TaskGroups []*TaskGroupDiff +} + +type TaskGroupDiff struct { + Type string + Name string + Fields []*FieldDiff + Objects []*ObjectDiff + Tasks []*TaskDiff + Updates map[string]uint64 +} + +type TaskDiff struct { + Type string + Name string + Fields []*FieldDiff + Objects []*ObjectDiff + Annotations []string +} + +type FieldDiff struct { + Type string + Name string + Old, New string + Annotations []string +} + +type ObjectDiff struct { + Type string + Name string + Fields []*FieldDiff + Objects []*ObjectDiff +} + +type PlanAnnotations struct { + DesiredTGUpdates map[string]*DesiredUpdates +} + +type DesiredUpdates struct { + Ignore uint64 + Place uint64 + Migrate uint64 + Stop uint64 + InPlaceUpdate uint64 + DestructiveUpdate uint64 + Canary uint64 +} + +type JobDispatchRequest struct { + JobID string + Payload []byte + Meta map[string]string +} + +type JobDispatchResponse struct { + DispatchedJobID string + EvalID string + EvalCreateIndex uint64 + JobCreateIndex uint64 + WriteMeta +} + +// JobVersionsResponse is used for a job get versions request +type JobVersionsResponse struct { + Versions []*Job + Diffs []*JobDiff + QueryMeta +} + +// JobStabilityRequest is used to marked a job as stable. +type JobStabilityRequest struct { + // Job to set the stability on + JobID string + JobVersion uint64 + + // Set the stability + Stable bool + WriteRequest +} + +// JobStabilityResponse is the response when marking a job as stable. +type JobStabilityResponse struct { + JobModifyIndex uint64 + WriteMeta +} diff --git a/vendor/github.com/hashicorp/nomad/api/jobs_testing.go b/vendor/github.com/hashicorp/nomad/api/jobs_testing.go new file mode 100644 index 000000000..bed9ac474 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/api/jobs_testing.go @@ -0,0 +1,110 @@ +package api + +import ( + "time" + + "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/nomad/structs" +) + +func MockJob() *Job { + job := &Job{ + Region: helper.StringToPtr("global"), + ID: helper.StringToPtr(structs.GenerateUUID()), + Name: helper.StringToPtr("my-job"), + Type: helper.StringToPtr("service"), + Priority: helper.IntToPtr(50), + AllAtOnce: helper.BoolToPtr(false), + Datacenters: []string{"dc1"}, + Constraints: []*Constraint{ + &Constraint{ + LTarget: "${attr.kernel.name}", + RTarget: "linux", + Operand: "=", + }, + }, + TaskGroups: []*TaskGroup{ + &TaskGroup{ + Name: helper.StringToPtr("web"), + Count: helper.IntToPtr(10), + EphemeralDisk: &EphemeralDisk{ + SizeMB: helper.IntToPtr(150), + }, + RestartPolicy: &RestartPolicy{ + Attempts: helper.IntToPtr(3), + Interval: helper.TimeToPtr(10 * time.Minute), + Delay: helper.TimeToPtr(1 * time.Minute), + Mode: helper.StringToPtr("delay"), + }, + Tasks: []*Task{ + &Task{ + Name: "web", + Driver: "exec", + Config: map[string]interface{}{ + "command": "/bin/date", + }, + Env: map[string]string{ + "FOO": "bar", + }, + Services: []*Service{ + { + Name: "${TASK}-frontend", + PortLabel: "http", + Tags: []string{"pci:${meta.pci-dss}", "datacenter:${node.datacenter}"}, + Checks: []ServiceCheck{ + { + Name: "check-table", + Type: "script", + Command: "/usr/local/check-table-${meta.database}", + Args: []string{"${meta.version}"}, + Interval: 30 * time.Second, + Timeout: 5 * time.Second, + }, + }, + }, + { + Name: "${TASK}-admin", + PortLabel: "admin", + }, + }, + LogConfig: DefaultLogConfig(), + Resources: &Resources{ + CPU: helper.IntToPtr(500), + MemoryMB: helper.IntToPtr(256), + Networks: []*NetworkResource{ + &NetworkResource{ + MBits: helper.IntToPtr(50), + DynamicPorts: []Port{{Label: "http"}, {Label: "admin"}}, + }, + }, + }, + Meta: map[string]string{ + "foo": "bar", + }, + }, + }, + Meta: map[string]string{ + "elb_check_type": "http", + "elb_check_interval": "30s", + "elb_check_min": "3", + }, + }, + }, + Meta: map[string]string{ + "owner": "armon", + }, + } + job.Canonicalize() + return job +} + +func MockPeriodicJob() *Job { + j := MockJob() + j.Type = helper.StringToPtr("batch") + j.Periodic = &PeriodicConfig{ + Enabled: helper.BoolToPtr(true), + SpecType: helper.StringToPtr("cron"), + Spec: helper.StringToPtr("*/30 * * * *"), + } + return j +} diff --git a/vendor/github.com/hashicorp/nomad/api/namespace.go b/vendor/github.com/hashicorp/nomad/api/namespace.go new file mode 100644 index 000000000..1771d891d --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/api/namespace.go @@ -0,0 +1,90 @@ +package api + +import ( + "fmt" + "sort" +) + +// Namespaces is used to query the namespace endpoints. +type Namespaces struct { + client *Client +} + +// Namespaces returns a new handle on the namespaces. +func (c *Client) Namespaces() *Namespaces { + return &Namespaces{client: c} +} + +// List is used to dump all of the namespaces. +func (n *Namespaces) List(q *QueryOptions) ([]*Namespace, *QueryMeta, error) { + var resp []*Namespace + qm, err := n.client.query("/v1/namespaces", &resp, q) + if err != nil { + return nil, nil, err + } + sort.Sort(NamespaceIndexSort(resp)) + return resp, qm, nil +} + +// PrefixList is used to do a PrefixList search over namespaces +func (n *Namespaces) PrefixList(prefix string, q *QueryOptions) ([]*Namespace, *QueryMeta, error) { + if q == nil { + q = &QueryOptions{Prefix: prefix} + } else { + q.Prefix = prefix + } + + return n.List(q) +} + +// Info is used to query a single namespace by its name. +func (n *Namespaces) Info(name string, q *QueryOptions) (*Namespace, *QueryMeta, error) { + var resp Namespace + qm, err := n.client.query("/v1/namespace/"+name, &resp, q) + if err != nil { + return nil, nil, err + } + return &resp, qm, nil +} + +// Register is used to register a namespace. +func (n *Namespaces) Register(namespace *Namespace, q *WriteOptions) (*WriteMeta, error) { + wm, err := n.client.write("/v1/namespace", namespace, nil, q) + if err != nil { + return nil, err + } + return wm, nil +} + +// Delete is used to delete a namespace +func (n *Namespaces) Delete(namespace string, q *WriteOptions) (*WriteMeta, error) { + wm, err := n.client.delete(fmt.Sprintf("/v1/namespace/%s", namespace), nil, q) + if err != nil { + return nil, err + } + return wm, nil +} + +// Namespace is used to serialize a namespace. +type Namespace struct { + Name string + Description string + CreateIndex uint64 + ModifyIndex uint64 +} + +// NamespaceIndexSort is a wrapper to sort Namespaces by CreateIndex. We +// reverse the test so that we get the highest index first. +type NamespaceIndexSort []*Namespace + +func (n NamespaceIndexSort) Len() int { + return len(n) +} + +func (n NamespaceIndexSort) Less(i, j int) bool { + return n[i].CreateIndex > n[j].CreateIndex +} + +func (n NamespaceIndexSort) Swap(i, j int) { + n[i], n[j] = n[j], n[i] +} diff --git a/vendor/github.com/hashicorp/nomad/api/nodes.go b/vendor/github.com/hashicorp/nomad/api/nodes.go new file mode 100644 index 000000000..50a159628 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/api/nodes.go @@ -0,0 +1,199 @@ +package api + +import ( + "sort" + "strconv" +) + +// Nodes is used to query node-related API endpoints +type Nodes struct { + client *Client +} + +// Nodes returns a handle on the node endpoints. +func (c *Client) Nodes() *Nodes { + return &Nodes{client: c} +} + +// List is used to list out all of the nodes +func (n *Nodes) List(q *QueryOptions) ([]*NodeListStub, *QueryMeta, error) { + var resp NodeIndexSort + qm, err := n.client.query("/v1/nodes", &resp, q) + if err != nil { + return nil, nil, err + } + sort.Sort(NodeIndexSort(resp)) + return resp, qm, nil +} + +func (n *Nodes) PrefixList(prefix string) ([]*NodeListStub, *QueryMeta, error) { + return n.List(&QueryOptions{Prefix: prefix}) +} + +// Info is used to query a specific node by its ID. +func (n *Nodes) Info(nodeID string, q *QueryOptions) (*Node, *QueryMeta, error) { + var resp Node + qm, err := n.client.query("/v1/node/"+nodeID, &resp, q) + if err != nil { + return nil, nil, err + } + return &resp, qm, nil +} + +// ToggleDrain is used to toggle drain mode on/off for a given node. +func (n *Nodes) ToggleDrain(nodeID string, drain bool, q *WriteOptions) (*WriteMeta, error) { + drainArg := strconv.FormatBool(drain) + wm, err := n.client.write("/v1/node/"+nodeID+"/drain?enable="+drainArg, nil, nil, q) + if err != nil { + return nil, err + } + return wm, nil +} + +// Allocations is used to return the allocations associated with a node. +func (n *Nodes) Allocations(nodeID string, q *QueryOptions) ([]*Allocation, *QueryMeta, error) { + var resp []*Allocation + qm, err := n.client.query("/v1/node/"+nodeID+"/allocations", &resp, q) + if err != nil { + return nil, nil, err + } + sort.Sort(AllocationSort(resp)) + return resp, qm, nil +} + +// ForceEvaluate is used to force-evaluate an existing node. +func (n *Nodes) ForceEvaluate(nodeID string, q *WriteOptions) (string, *WriteMeta, error) { + var resp nodeEvalResponse + wm, err := n.client.write("/v1/node/"+nodeID+"/evaluate", nil, &resp, q) + if err != nil { + return "", nil, err + } + return resp.EvalID, wm, nil +} + +func (n *Nodes) Stats(nodeID string, q *QueryOptions) (*HostStats, error) { + nodeClient, err := n.client.GetNodeClient(nodeID, q) + if err != nil { + return nil, err + } + var resp HostStats + if _, err := nodeClient.query("/v1/client/stats", &resp, nil); err != nil { + return nil, err + } + return &resp, nil +} + +func (n *Nodes) GC(nodeID string, q *QueryOptions) error { + nodeClient, err := n.client.GetNodeClient(nodeID, q) + if err != nil { + return err + } + + var resp struct{} + _, err = nodeClient.query("/v1/client/gc", &resp, nil) + return err +} + +// Node is used to deserialize a node entry. +type Node struct { + ID string + Datacenter string + Name string + HTTPAddr string + TLSEnabled bool + Attributes map[string]string + Resources *Resources + Reserved *Resources + Links map[string]string + Meta map[string]string + NodeClass string + Drain bool + Status string + StatusDescription string + StatusUpdatedAt int64 + CreateIndex uint64 + ModifyIndex uint64 +} + +// HostStats represents resource usage stats of the host running a Nomad client +type HostStats struct { + Memory *HostMemoryStats + CPU []*HostCPUStats + DiskStats []*HostDiskStats + Uptime uint64 + CPUTicksConsumed float64 +} + +type HostMemoryStats struct { + Total uint64 + Available uint64 + Used uint64 + Free uint64 +} + +type HostCPUStats struct { + CPU string + User float64 + System float64 + Idle float64 +} + +type HostDiskStats struct { + Device string + Mountpoint string + Size uint64 + Used uint64 + Available uint64 + UsedPercent float64 + InodesUsedPercent float64 +} + +// NodeListStub is a subset of information returned during +// node list operations. +type NodeListStub struct { + ID string + Datacenter string + Name string + NodeClass string + Version string + Drain bool + Status string + StatusDescription string + CreateIndex uint64 + ModifyIndex uint64 +} + +// NodeIndexSort reverse sorts nodes by CreateIndex +type NodeIndexSort []*NodeListStub + +func (n NodeIndexSort) Len() int { + return len(n) +} + +func (n NodeIndexSort) Less(i, j int) bool { + return n[i].CreateIndex > n[j].CreateIndex +} + +func (n NodeIndexSort) Swap(i, j int) { + n[i], n[j] = n[j], n[i] +} + +// nodeEvalResponse is used to decode a force-eval. +type nodeEvalResponse struct { + EvalID string +} + +// AllocationSort reverse sorts allocs by CreateIndex. +type AllocationSort []*Allocation + +func (a AllocationSort) Len() int { + return len(a) +} + +func (a AllocationSort) Less(i, j int) bool { + return a[i].CreateIndex > a[j].CreateIndex +} + +func (a AllocationSort) Swap(i, j int) { + a[i], a[j] = a[j], a[i] +} diff --git a/vendor/github.com/hashicorp/nomad/api/operator.go b/vendor/github.com/hashicorp/nomad/api/operator.go new file mode 100644 index 000000000..a10648a29 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/api/operator.go @@ -0,0 +1,87 @@ +package api + +// Operator can be used to perform low-level operator tasks for Nomad. +type Operator struct { + c *Client +} + +// Operator returns a handle to the operator endpoints. +func (c *Client) Operator() *Operator { + return &Operator{c} +} + +// RaftServer has information about a server in the Raft configuration. +type RaftServer struct { + // ID is the unique ID for the server. These are currently the same + // as the address, but they will be changed to a real GUID in a future + // release of Nomad. + ID string + + // Node is the node name of the server, as known by Nomad, or this + // will be set to "(unknown)" otherwise. + Node string + + // Address is the IP:port of the server, used for Raft communications. + Address string + + // Leader is true if this server is the current cluster leader. + Leader bool + + // Voter is true if this server has a vote in the cluster. This might + // be false if the server is staging and still coming online, or if + // it's a non-voting server, which will be added in a future release of + // Nomad. + Voter bool +} + +// RaftConfigration is returned when querying for the current Raft configuration. +type RaftConfiguration struct { + // Servers has the list of servers in the Raft configuration. + Servers []*RaftServer + + // Index has the Raft index of this configuration. + Index uint64 +} + +// RaftGetConfiguration is used to query the current Raft peer set. +func (op *Operator) RaftGetConfiguration(q *QueryOptions) (*RaftConfiguration, error) { + r, err := op.c.newRequest("GET", "/v1/operator/raft/configuration") + if err != nil { + return nil, err + } + r.setQueryOptions(q) + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out RaftConfiguration + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return &out, nil +} + +// RaftRemovePeerByAddress is used to kick a stale peer (one that it in the Raft +// quorum but no longer known to Serf or the catalog) by address in the form of +// "IP:port". +func (op *Operator) RaftRemovePeerByAddress(address string, q *WriteOptions) error { + r, err := op.c.newRequest("DELETE", "/v1/operator/raft/peer") + if err != nil { + return err + } + r.setWriteOptions(q) + + // TODO (alexdadgar) Currently we made address a query parameter. Once + // IDs are in place this will be DELETE /v1/operator/raft/peer/. + r.params.Set("address", string(address)) + + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return err + } + + resp.Body.Close() + return nil +} diff --git a/vendor/github.com/hashicorp/nomad/api/raw.go b/vendor/github.com/hashicorp/nomad/api/raw.go new file mode 100644 index 000000000..9369829c5 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/api/raw.go @@ -0,0 +1,38 @@ +package api + +import "io" + +// Raw can be used to do raw queries against custom endpoints +type Raw struct { + c *Client +} + +// Raw returns a handle to query endpoints +func (c *Client) Raw() *Raw { + return &Raw{c} +} + +// Query is used to do a GET request against an endpoint +// and deserialize the response into an interface using +// standard Nomad conventions. +func (raw *Raw) Query(endpoint string, out interface{}, q *QueryOptions) (*QueryMeta, error) { + return raw.c.query(endpoint, out, q) +} + +// Response is used to make a GET request against an endpoint and returns the +// response body +func (raw *Raw) Response(endpoint string, q *QueryOptions) (io.ReadCloser, error) { + return raw.c.rawQuery(endpoint, q) +} + +// Write is used to do a PUT request against an endpoint +// and serialize/deserialized using the standard Nomad conventions. +func (raw *Raw) Write(endpoint string, in, out interface{}, q *WriteOptions) (*WriteMeta, error) { + return raw.c.write(endpoint, in, out, q) +} + +// Delete is used to do a DELETE request against an endpoint +// and serialize/deserialized using the standard Nomad conventions. +func (raw *Raw) Delete(endpoint string, out interface{}, q *WriteOptions) (*WriteMeta, error) { + return raw.c.delete(endpoint, out, q) +} diff --git a/vendor/github.com/hashicorp/nomad/api/regions.go b/vendor/github.com/hashicorp/nomad/api/regions.go new file mode 100644 index 000000000..c94ce297a --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/api/regions.go @@ -0,0 +1,23 @@ +package api + +import "sort" + +// Regions is used to query the regions in the cluster. +type Regions struct { + client *Client +} + +// Regions returns a handle on the regions endpoints. +func (c *Client) Regions() *Regions { + return &Regions{client: c} +} + +// List returns a list of all of the regions. +func (r *Regions) List() ([]string, error) { + var resp []string + if _, err := r.client.query("/v1/regions", &resp, nil); err != nil { + return nil, err + } + sort.Strings(resp) + return resp, nil +} diff --git a/vendor/github.com/hashicorp/nomad/api/resources.go b/vendor/github.com/hashicorp/nomad/api/resources.go new file mode 100644 index 000000000..8d3f27c6c --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/api/resources.go @@ -0,0 +1,81 @@ +package api + +import "github.com/hashicorp/nomad/helper" + +// Resources encapsulates the required resources of +// a given task or task group. +type Resources struct { + CPU *int + MemoryMB *int `mapstructure:"memory"` + DiskMB *int `mapstructure:"disk"` + IOPS *int + Networks []*NetworkResource +} + +func (r *Resources) Canonicalize() { + if r.CPU == nil { + r.CPU = helper.IntToPtr(100) + } + if r.MemoryMB == nil { + r.MemoryMB = helper.IntToPtr(10) + } + if r.IOPS == nil { + r.IOPS = helper.IntToPtr(0) + } + for _, n := range r.Networks { + n.Canonicalize() + } +} + +func MinResources() *Resources { + return &Resources{ + CPU: helper.IntToPtr(100), + MemoryMB: helper.IntToPtr(10), + IOPS: helper.IntToPtr(0), + } + +} + +// Merge merges this resource with another resource. +func (r *Resources) Merge(other *Resources) { + if other == nil { + return + } + if other.CPU != nil { + r.CPU = other.CPU + } + if other.MemoryMB != nil { + r.MemoryMB = other.MemoryMB + } + if other.DiskMB != nil { + r.DiskMB = other.DiskMB + } + if other.IOPS != nil { + r.IOPS = other.IOPS + } + if len(other.Networks) != 0 { + r.Networks = other.Networks + } +} + +type Port struct { + Label string + Value int `mapstructure:"static"` +} + +// NetworkResource is used to describe required network +// resources of a given task. +type NetworkResource struct { + Device string + CIDR string + IP string + MBits *int + ReservedPorts []Port + DynamicPorts []Port +} + +func (n *NetworkResource) Canonicalize() { + if n.MBits == nil { + n.MBits = helper.IntToPtr(10) + } +} diff --git a/vendor/github.com/hashicorp/nomad/api/search.go b/vendor/github.com/hashicorp/nomad/api/search.go new file mode 100644 index 000000000..6a6cb9b59 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/api/search.go @@ -0,0 +1,39 @@ +package api + +import ( + "github.com/hashicorp/nomad/api/contexts" +) + +type Search struct { + client *Client +} + +// Search returns a handle on the Search endpoints +func (c *Client) Search() *Search { + return &Search{client: c} +} + +// PrefixSearch returns a list of matches for a particular context and prefix. +func (s *Search) PrefixSearch(prefix string, context contexts.Context, q *QueryOptions) (*SearchResponse, *QueryMeta, error) { + var resp SearchResponse + req := &SearchRequest{Prefix: prefix, Context: context} + + qm, err := s.client.putQuery("/v1/search", req, &resp, q) + if err != nil { + return nil, nil, err + } + + return &resp, qm, nil +} + +type SearchRequest struct { + Prefix string + Context contexts.Context + QueryOptions +} + +type SearchResponse struct { + Matches map[contexts.Context][]string + Truncations map[contexts.Context]bool + QueryMeta +} diff --git a/vendor/github.com/hashicorp/nomad/api/sentinel.go b/vendor/github.com/hashicorp/nomad/api/sentinel.go new file mode 100644 index 000000000..c1e52c7cb --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/api/sentinel.go @@ -0,0 +1,79 @@ +package api + +import "fmt" + +// SentinelPolicies is used to query the Sentinel Policy endpoints. +type SentinelPolicies struct { + client *Client +} + +// SentinelPolicies returns a new handle on the Sentinel policies. +func (c *Client) SentinelPolicies() *SentinelPolicies { + return &SentinelPolicies{client: c} +} + +// List is used to dump all of the policies. +func (a *SentinelPolicies) List(q *QueryOptions) ([]*SentinelPolicyListStub, *QueryMeta, error) { + var resp []*SentinelPolicyListStub + qm, err := a.client.query("/v1/sentinel/policies", &resp, q) + if err != nil { + return nil, nil, err + } + return resp, qm, nil +} + +// Upsert is used to create or update a policy +func (a *SentinelPolicies) Upsert(policy *SentinelPolicy, q *WriteOptions) (*WriteMeta, error) { + if policy == nil || policy.Name == "" { + return nil, fmt.Errorf("missing policy name") + } + wm, err := a.client.write("/v1/sentinel/policy/"+policy.Name, policy, nil, q) + if err != nil { + return nil, err + } + return wm, nil +} + +// Delete is used to delete a policy +func (a *SentinelPolicies) Delete(policyName string, q *WriteOptions) (*WriteMeta, error) { + if policyName == "" { + return nil, fmt.Errorf("missing policy name") + } + wm, err := a.client.delete("/v1/sentinel/policy/"+policyName, nil, q) + if err != nil { + return nil, err + } + return wm, nil +} + +// Info is used to query a specific policy +func (a *SentinelPolicies) Info(policyName string, q *QueryOptions) (*SentinelPolicy, *QueryMeta, error) { + if policyName == "" { + return nil, nil, fmt.Errorf("missing policy name") + } + var resp SentinelPolicy + wm, err := a.client.query("/v1/sentinel/policy/"+policyName, &resp, q) + if err != nil { + return nil, nil, err + } + return &resp, wm, nil +} + +type SentinelPolicy struct { + Name string + Description string + Scope string + EnforcementLevel string + Policy string + CreateIndex uint64 + ModifyIndex uint64 +} + +type SentinelPolicyListStub struct { + Name string + Description string + Scope string + EnforcementLevel string + CreateIndex uint64 + ModifyIndex uint64 +} diff --git a/vendor/github.com/hashicorp/nomad/api/status.go b/vendor/github.com/hashicorp/nomad/api/status.go new file mode 100644 index 000000000..da1cb4c02 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/api/status.go @@ -0,0 +1,43 @@ +package api + +// Status is used to query the status-related endpoints. +type Status struct { + client *Client +} + +// Status returns a handle on the status endpoints. +func (c *Client) Status() *Status { + return &Status{client: c} +} + +// Leader is used to query for the current cluster leader. +func (s *Status) Leader() (string, error) { + var resp string + _, err := s.client.query("/v1/status/leader", &resp, nil) + if err != nil { + return "", err + } + return resp, nil +} + +// RegionLeader is used to query for the leader in the passed region. +func (s *Status) RegionLeader(region string) (string, error) { + var resp string + q := QueryOptions{Region: region} + _, err := s.client.query("/v1/status/leader", &resp, &q) + if err != nil { + return "", err + } + return resp, nil +} + +// Peers is used to query the addresses of the server peers +// in the cluster. +func (s *Status) Peers() ([]string, error) { + var resp []string + _, err := s.client.query("/v1/status/peers", &resp, nil) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/github.com/hashicorp/nomad/api/system.go b/vendor/github.com/hashicorp/nomad/api/system.go new file mode 100644 index 000000000..3717b9aea --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/api/system.go @@ -0,0 +1,23 @@ +package api + +// Status is used to query the status-related endpoints. +type System struct { + client *Client +} + +// System returns a handle on the system endpoints. +func (c *Client) System() *System { + return &System{client: c} +} + +func (s *System) GarbageCollect() error { + var req struct{} + _, err := s.client.write("/v1/system/gc", &req, nil, nil) + return err +} + +func (s *System) ReconcileSummaries() error { + var req struct{} + _, err := s.client.write("/v1/system/reconcile/summaries", &req, nil, nil) + return err +} diff --git a/vendor/github.com/hashicorp/nomad/api/tasks.go b/vendor/github.com/hashicorp/nomad/api/tasks.go new file mode 100644 index 000000000..a3d10831e --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/api/tasks.go @@ -0,0 +1,617 @@ +package api + +import ( + "fmt" + "path" + + "path/filepath" + "strings" + "time" + + "github.com/hashicorp/nomad/helper" +) + +// MemoryStats holds memory usage related stats +type MemoryStats struct { + RSS uint64 + Cache uint64 + Swap uint64 + MaxUsage uint64 + KernelUsage uint64 + KernelMaxUsage uint64 + Measured []string +} + +// CpuStats holds cpu usage related stats +type CpuStats struct { + SystemMode float64 + UserMode float64 + TotalTicks float64 + ThrottledPeriods uint64 + ThrottledTime uint64 + Percent float64 + Measured []string +} + +// ResourceUsage holds information related to cpu and memory stats +type ResourceUsage struct { + MemoryStats *MemoryStats + CpuStats *CpuStats +} + +// TaskResourceUsage holds aggregated resource usage of all processes in a Task +// and the resource usage of the individual pids +type TaskResourceUsage struct { + ResourceUsage *ResourceUsage + Timestamp int64 + Pids map[string]*ResourceUsage +} + +// AllocResourceUsage holds the aggregated task resource usage of the +// allocation. +type AllocResourceUsage struct { + ResourceUsage *ResourceUsage + Tasks map[string]*TaskResourceUsage + Timestamp int64 +} + +// RestartPolicy defines how the Nomad client restarts +// tasks in a taskgroup when they fail +type RestartPolicy struct { + Interval *time.Duration + Attempts *int + Delay *time.Duration + Mode *string +} + +func (r *RestartPolicy) Merge(rp *RestartPolicy) { + if rp.Interval != nil { + r.Interval = rp.Interval + } + if rp.Attempts != nil { + r.Attempts = rp.Attempts + } + if rp.Delay != nil { + r.Delay = rp.Delay + } + if rp.Mode != nil { + r.Mode = rp.Mode + } +} + +// CheckRestart describes if and when a task should be restarted based on +// failing health checks. +type CheckRestart struct { + Limit int `mapstructure:"limit"` + Grace *time.Duration `mapstructure:"grace_period"` + IgnoreWarnings bool `mapstructure:"ignore_warnings"` +} + +// Canonicalize CheckRestart fields if not nil. +func (c *CheckRestart) Canonicalize() { + if c == nil { + return + } + + if c.Grace == nil { + c.Grace = helper.TimeToPtr(1 * time.Second) + } +} + +// Copy returns a copy of CheckRestart or nil if unset. +func (c *CheckRestart) Copy() *CheckRestart { + if c == nil { + return nil + } + + nc := new(CheckRestart) + nc.Limit = c.Limit + if c.Grace != nil { + g := *c.Grace + nc.Grace = &g + } + nc.IgnoreWarnings = c.IgnoreWarnings + return nc +} + +// Merge values from other CheckRestart over default values on this +// CheckRestart and return merged copy. +func (c *CheckRestart) Merge(o *CheckRestart) *CheckRestart { + if c == nil { + // Just return other + return o + } + + nc := c.Copy() + + if o == nil { + // Nothing to merge + return nc + } + + if nc.Limit == 0 { + nc.Limit = o.Limit + } + + if nc.Grace == nil { + nc.Grace = o.Grace + } + + if nc.IgnoreWarnings { + nc.IgnoreWarnings = o.IgnoreWarnings + } + + return nc +} + +// The ServiceCheck data model represents the consul health check that +// Nomad registers for a Task +type ServiceCheck struct { + Id string + Name string + Type string + Command string + Args []string + Path string + Protocol string + PortLabel string `mapstructure:"port"` + Interval time.Duration + Timeout time.Duration + InitialStatus string `mapstructure:"initial_status"` + TLSSkipVerify bool `mapstructure:"tls_skip_verify"` + Header map[string][]string + Method string + CheckRestart *CheckRestart `mapstructure:"check_restart"` +} + +// The Service model represents a Consul service definition +type Service struct { + Id string + Name string + Tags []string + PortLabel string `mapstructure:"port"` + AddressMode string `mapstructure:"address_mode"` + Checks []ServiceCheck + CheckRestart *CheckRestart `mapstructure:"check_restart"` +} + +func (s *Service) Canonicalize(t *Task, tg *TaskGroup, job *Job) { + if s.Name == "" { + s.Name = fmt.Sprintf("%s-%s-%s", *job.Name, *tg.Name, t.Name) + } + + // Default to AddressModeAuto + if s.AddressMode == "" { + s.AddressMode = "auto" + } + + s.CheckRestart.Canonicalize() + + // Canonicallize CheckRestart on Checks and merge Service.CheckRestart + // into each check. + for _, c := range s.Checks { + c.CheckRestart.Canonicalize() + c.CheckRestart = c.CheckRestart.Merge(s.CheckRestart) + } +} + +// EphemeralDisk is an ephemeral disk object +type EphemeralDisk struct { + Sticky *bool + Migrate *bool + SizeMB *int `mapstructure:"size"` +} + +func DefaultEphemeralDisk() *EphemeralDisk { + return &EphemeralDisk{ + Sticky: helper.BoolToPtr(false), + Migrate: helper.BoolToPtr(false), + SizeMB: helper.IntToPtr(300), + } +} + +func (e *EphemeralDisk) Canonicalize() { + if e.Sticky == nil { + e.Sticky = helper.BoolToPtr(false) + } + if e.Migrate == nil { + e.Migrate = helper.BoolToPtr(false) + } + if e.SizeMB == nil { + e.SizeMB = helper.IntToPtr(300) + } +} + +// TaskGroup is the unit of scheduling. +type TaskGroup struct { + Name *string + Count *int + Constraints []*Constraint + Tasks []*Task + RestartPolicy *RestartPolicy + EphemeralDisk *EphemeralDisk + Update *UpdateStrategy + Meta map[string]string +} + +// NewTaskGroup creates a new TaskGroup. +func NewTaskGroup(name string, count int) *TaskGroup { + return &TaskGroup{ + Name: helper.StringToPtr(name), + Count: helper.IntToPtr(count), + } +} + +func (g *TaskGroup) Canonicalize(job *Job) { + if g.Name == nil { + g.Name = helper.StringToPtr("") + } + if g.Count == nil { + g.Count = helper.IntToPtr(1) + } + for _, t := range g.Tasks { + t.Canonicalize(g, job) + } + if g.EphemeralDisk == nil { + g.EphemeralDisk = DefaultEphemeralDisk() + } else { + g.EphemeralDisk.Canonicalize() + } + + // Merge the update policy from the job + if ju, tu := job.Update != nil, g.Update != nil; ju && tu { + // Merge the jobs and task groups definition of the update strategy + jc := job.Update.Copy() + jc.Merge(g.Update) + g.Update = jc + } else if ju && !job.Update.Empty() { + // Inherit the jobs as long as it is non-empty. + jc := job.Update.Copy() + g.Update = jc + } + + if g.Update != nil { + g.Update.Canonicalize() + } + + var defaultRestartPolicy *RestartPolicy + switch *job.Type { + case "service", "system": + defaultRestartPolicy = &RestartPolicy{ + Delay: helper.TimeToPtr(15 * time.Second), + Attempts: helper.IntToPtr(2), + Interval: helper.TimeToPtr(1 * time.Minute), + Mode: helper.StringToPtr("delay"), + } + default: + defaultRestartPolicy = &RestartPolicy{ + Delay: helper.TimeToPtr(15 * time.Second), + Attempts: helper.IntToPtr(15), + Interval: helper.TimeToPtr(7 * 24 * time.Hour), + Mode: helper.StringToPtr("delay"), + } + } + + if g.RestartPolicy != nil { + defaultRestartPolicy.Merge(g.RestartPolicy) + } + g.RestartPolicy = defaultRestartPolicy +} + +// Constrain is used to add a constraint to a task group. +func (g *TaskGroup) Constrain(c *Constraint) *TaskGroup { + g.Constraints = append(g.Constraints, c) + return g +} + +// AddMeta is used to add a meta k/v pair to a task group +func (g *TaskGroup) SetMeta(key, val string) *TaskGroup { + if g.Meta == nil { + g.Meta = make(map[string]string) + } + g.Meta[key] = val + return g +} + +// AddTask is used to add a new task to a task group. +func (g *TaskGroup) AddTask(t *Task) *TaskGroup { + g.Tasks = append(g.Tasks, t) + return g +} + +// RequireDisk adds a ephemeral disk to the task group +func (g *TaskGroup) RequireDisk(disk *EphemeralDisk) *TaskGroup { + g.EphemeralDisk = disk + return g +} + +// LogConfig provides configuration for log rotation +type LogConfig struct { + MaxFiles *int `mapstructure:"max_files"` + MaxFileSizeMB *int `mapstructure:"max_file_size"` +} + +func DefaultLogConfig() *LogConfig { + return &LogConfig{ + MaxFiles: helper.IntToPtr(10), + MaxFileSizeMB: helper.IntToPtr(10), + } +} + +func (l *LogConfig) Canonicalize() { + if l.MaxFiles == nil { + l.MaxFiles = helper.IntToPtr(10) + } + if l.MaxFileSizeMB == nil { + l.MaxFileSizeMB = helper.IntToPtr(10) + } +} + +// DispatchPayloadConfig configures how a task gets its input from a job dispatch +type DispatchPayloadConfig struct { + File string +} + +// Task is a single process in a task group. +type Task struct { + Name string + Driver string + User string + Config map[string]interface{} + Constraints []*Constraint + Env map[string]string + Services []*Service + Resources *Resources + Meta map[string]string + KillTimeout *time.Duration `mapstructure:"kill_timeout"` + LogConfig *LogConfig `mapstructure:"logs"` + Artifacts []*TaskArtifact + Vault *Vault + Templates []*Template + DispatchPayload *DispatchPayloadConfig + Leader bool + ShutdownDelay time.Duration `mapstructure:"shutdown_delay"` +} + +func (t *Task) Canonicalize(tg *TaskGroup, job *Job) { + min := MinResources() + min.Merge(t.Resources) + min.Canonicalize() + t.Resources = min + + if t.KillTimeout == nil { + t.KillTimeout = helper.TimeToPtr(5 * time.Second) + } + if t.LogConfig == nil { + t.LogConfig = DefaultLogConfig() + } else { + t.LogConfig.Canonicalize() + } + for _, artifact := range t.Artifacts { + artifact.Canonicalize() + } + if t.Vault != nil { + t.Vault.Canonicalize() + } + for _, tmpl := range t.Templates { + tmpl.Canonicalize() + } + for _, s := range t.Services { + s.Canonicalize(t, tg, job) + } +} + +// TaskArtifact is used to download artifacts before running a task. +type TaskArtifact struct { + GetterSource *string `mapstructure:"source"` + GetterOptions map[string]string `mapstructure:"options"` + GetterMode *string `mapstructure:"mode"` + RelativeDest *string `mapstructure:"destination"` +} + +func (a *TaskArtifact) Canonicalize() { + if a.GetterMode == nil { + a.GetterMode = helper.StringToPtr("any") + } + if a.GetterSource == nil { + // Shouldn't be possible, but we don't want to panic + a.GetterSource = helper.StringToPtr("") + } + if a.RelativeDest == nil { + switch *a.GetterMode { + case "file": + // File mode should default to local/filename + dest := *a.GetterSource + dest = path.Base(dest) + dest = filepath.Join("local", dest) + a.RelativeDest = &dest + default: + // Default to a directory + a.RelativeDest = helper.StringToPtr("local/") + } + } +} + +type Template struct { + SourcePath *string `mapstructure:"source"` + DestPath *string `mapstructure:"destination"` + EmbeddedTmpl *string `mapstructure:"data"` + ChangeMode *string `mapstructure:"change_mode"` + ChangeSignal *string `mapstructure:"change_signal"` + Splay *time.Duration `mapstructure:"splay"` + Perms *string `mapstructure:"perms"` + LeftDelim *string `mapstructure:"left_delimiter"` + RightDelim *string `mapstructure:"right_delimiter"` + Envvars *bool `mapstructure:"env"` + VaultGrace *time.Duration `mapstructure:"vault_grace"` +} + +func (tmpl *Template) Canonicalize() { + if tmpl.SourcePath == nil { + tmpl.SourcePath = helper.StringToPtr("") + } + if tmpl.DestPath == nil { + tmpl.DestPath = helper.StringToPtr("") + } + if tmpl.EmbeddedTmpl == nil { + tmpl.EmbeddedTmpl = helper.StringToPtr("") + } + if tmpl.ChangeMode == nil { + tmpl.ChangeMode = helper.StringToPtr("restart") + } + if tmpl.ChangeSignal == nil { + if *tmpl.ChangeMode == "signal" { + tmpl.ChangeSignal = helper.StringToPtr("SIGHUP") + } else { + tmpl.ChangeSignal = helper.StringToPtr("") + } + } else { + sig := *tmpl.ChangeSignal + tmpl.ChangeSignal = helper.StringToPtr(strings.ToUpper(sig)) + } + if tmpl.Splay == nil { + tmpl.Splay = helper.TimeToPtr(5 * time.Second) + } + if tmpl.Perms == nil { + tmpl.Perms = helper.StringToPtr("0644") + } + if tmpl.LeftDelim == nil { + tmpl.LeftDelim = helper.StringToPtr("{{") + } + if tmpl.RightDelim == nil { + tmpl.RightDelim = helper.StringToPtr("}}") + } + if tmpl.Envvars == nil { + tmpl.Envvars = helper.BoolToPtr(false) + } + if tmpl.VaultGrace == nil { + tmpl.VaultGrace = helper.TimeToPtr(5 * time.Minute) + } +} + +type Vault struct { + Policies []string + Env *bool + ChangeMode *string `mapstructure:"change_mode"` + ChangeSignal *string `mapstructure:"change_signal"` +} + +func (v *Vault) Canonicalize() { + if v.Env == nil { + v.Env = helper.BoolToPtr(true) + } + if v.ChangeMode == nil { + v.ChangeMode = helper.StringToPtr("restart") + } + if v.ChangeSignal == nil { + v.ChangeSignal = helper.StringToPtr("SIGHUP") + } +} + +// NewTask creates and initializes a new Task. +func NewTask(name, driver string) *Task { + return &Task{ + Name: name, + Driver: driver, + } +} + +// Configure is used to configure a single k/v pair on +// the task. +func (t *Task) SetConfig(key string, val interface{}) *Task { + if t.Config == nil { + t.Config = make(map[string]interface{}) + } + t.Config[key] = val + return t +} + +// SetMeta is used to add metadata k/v pairs to the task. +func (t *Task) SetMeta(key, val string) *Task { + if t.Meta == nil { + t.Meta = make(map[string]string) + } + t.Meta[key] = val + return t +} + +// Require is used to add resource requirements to a task. +func (t *Task) Require(r *Resources) *Task { + t.Resources = r + return t +} + +// Constraint adds a new constraints to a single task. +func (t *Task) Constrain(c *Constraint) *Task { + t.Constraints = append(t.Constraints, c) + return t +} + +// SetLogConfig sets a log config to a task +func (t *Task) SetLogConfig(l *LogConfig) *Task { + t.LogConfig = l + return t +} + +// TaskState tracks the current state of a task and events that caused state +// transitions. +type TaskState struct { + State string + Failed bool + Restarts uint64 + LastRestart time.Time + StartedAt time.Time + FinishedAt time.Time + Events []*TaskEvent +} + +const ( + TaskSetup = "Task Setup" + TaskSetupFailure = "Setup Failure" + TaskDriverFailure = "Driver Failure" + TaskDriverMessage = "Driver" + TaskReceived = "Received" + TaskFailedValidation = "Failed Validation" + TaskStarted = "Started" + TaskTerminated = "Terminated" + TaskKilling = "Killing" + TaskKilled = "Killed" + TaskRestarting = "Restarting" + TaskNotRestarting = "Not Restarting" + TaskDownloadingArtifacts = "Downloading Artifacts" + TaskArtifactDownloadFailed = "Failed Artifact Download" + TaskSiblingFailed = "Sibling Task Failed" + TaskSignaling = "Signaling" + TaskRestartSignal = "Restart Signaled" + TaskLeaderDead = "Leader Task Dead" + TaskBuildingTaskDir = "Building Task Directory" + TaskGenericMessage = "Generic" +) + +// TaskEvent is an event that effects the state of a task and contains meta-data +// appropriate to the events type. +type TaskEvent struct { + Type string + Time int64 + FailsTask bool + RestartReason string + SetupError string + DriverError string + DriverMessage string + ExitCode int + Signal int + Message string + KillReason string + KillTimeout time.Duration + KillError string + StartDelay int64 + DownloadError string + ValidationError string + DiskLimit int64 + DiskSize int64 + FailedSibling string + VaultError string + TaskSignalReason string + TaskSignal string + GenericSource string +} From 57fc8e11da63f0c6c0f9bd8ff2c3de233402ac7f Mon Sep 17 00:00:00 2001 From: Nicolas Corrarello Date: Mon, 2 Oct 2017 16:48:23 +0100 Subject: [PATCH 13/52] Added nomad as dependency after include fix --- vendor/vendor.json | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/vendor/vendor.json b/vendor/vendor.json index 2ccc97e6d..ce23fa219 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -1144,8 +1144,7 @@ "checksumSHA1": "4tY6k1MqB50R66TJJH/rsG69Yd4=", "path": "github.com/hashicorp/nomad/api", "revision": "a1e08240a1cff7d1bd10c89d722110a053f8dc6e", - "revisionTime": "2017-09-20T19:48:06Z", - "version": "v0.7.0-beta1" + "revisionTime": "2017-09-20T19:48:06Z" }, { "checksumSHA1": "/oss17GO4hXGM7QnUdI3VzcAHzA=", From 4fbf9253a2e641b0417acc521f1b0e8974a3edf7 Mon Sep 17 00:00:00 2001 From: Nicolas Corrarello Date: Mon, 2 Oct 2017 16:56:39 +0100 Subject: [PATCH 14/52] Adding further nomad deps --- vendor/github.com/gorhill/cronexpr/APLv2 | 202 + vendor/github.com/gorhill/cronexpr/GPLv3 | 674 ++ vendor/github.com/gorhill/cronexpr/README.md | 134 + .../github.com/gorhill/cronexpr/cronexpr.go | 266 + .../gorhill/cronexpr/cronexpr_next.go | 292 + .../gorhill/cronexpr/cronexpr_parse.go | 498 ++ .../hashicorp/nomad/api/contexts/contexts.go | 14 + .../hashicorp/nomad/helper/funcs.go | 272 + .../hashicorp/nomad/nomad/structs/bitmap.go | 78 + .../hashicorp/nomad/nomad/structs/diff.go | 1231 ++++ .../hashicorp/nomad/nomad/structs/funcs.go | 310 + .../hashicorp/nomad/nomad/structs/network.go | 326 + .../nomad/nomad/structs/node_class.go | 94 + .../hashicorp/nomad/nomad/structs/operator.go | 49 + .../hashicorp/nomad/nomad/structs/structs.go | 5783 +++++++++++++++++ .../nomad/nomad/structs/structs_codegen.go | 3 + vendor/vendor.json | 24 + 17 files changed, 10250 insertions(+) create mode 100644 vendor/github.com/gorhill/cronexpr/APLv2 create mode 100644 vendor/github.com/gorhill/cronexpr/GPLv3 create mode 100644 vendor/github.com/gorhill/cronexpr/README.md create mode 100644 vendor/github.com/gorhill/cronexpr/cronexpr.go create mode 100644 vendor/github.com/gorhill/cronexpr/cronexpr_next.go create mode 100644 vendor/github.com/gorhill/cronexpr/cronexpr_parse.go create mode 100644 vendor/github.com/hashicorp/nomad/api/contexts/contexts.go create mode 100644 vendor/github.com/hashicorp/nomad/helper/funcs.go create mode 100644 vendor/github.com/hashicorp/nomad/nomad/structs/bitmap.go create mode 100644 vendor/github.com/hashicorp/nomad/nomad/structs/diff.go create mode 100644 vendor/github.com/hashicorp/nomad/nomad/structs/funcs.go create mode 100644 vendor/github.com/hashicorp/nomad/nomad/structs/network.go create mode 100644 vendor/github.com/hashicorp/nomad/nomad/structs/node_class.go create mode 100644 vendor/github.com/hashicorp/nomad/nomad/structs/operator.go create mode 100644 vendor/github.com/hashicorp/nomad/nomad/structs/structs.go create mode 100644 vendor/github.com/hashicorp/nomad/nomad/structs/structs_codegen.go diff --git a/vendor/github.com/gorhill/cronexpr/APLv2 b/vendor/github.com/gorhill/cronexpr/APLv2 new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/gorhill/cronexpr/APLv2 @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/gorhill/cronexpr/GPLv3 b/vendor/github.com/gorhill/cronexpr/GPLv3 new file mode 100644 index 000000000..c13fcfaf1 --- /dev/null +++ b/vendor/github.com/gorhill/cronexpr/GPLv3 @@ -0,0 +1,674 @@ +GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. {http://fsf.org/} + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + {one line to give the program's name and a brief idea of what it does.} + Copyright (C) {year} {name of author} + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see {http://www.gnu.org/licenses/}. + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + cronexpr Copyright (C) 2013 Raymond Hill + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +{http://www.gnu.org/licenses/}. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +{http://www.gnu.org/philosophy/why-not-lgpl.html}. diff --git a/vendor/github.com/gorhill/cronexpr/README.md b/vendor/github.com/gorhill/cronexpr/README.md new file mode 100644 index 000000000..e8c56d29d --- /dev/null +++ b/vendor/github.com/gorhill/cronexpr/README.md @@ -0,0 +1,134 @@ +Golang Cron expression parser +============================= +Given a cron expression and a time stamp, you can get the next time stamp which satisfies the cron expression. + +In another project, I decided to use cron expression syntax to encode scheduling information. Thus this standalone library to parse and apply time stamps to cron expressions. + +The time-matching algorithm in this implementation is efficient, it avoids as much as possible to guess the next matching time stamp, a common technique seen in a number of implementations out there. + +There is also a companion command-line utility to evaluate cron time expressions: (which of course uses this library). + +Implementation +-------------- +The reference documentation for this implementation is found at +, which I copy/pasted here (laziness!) with modifications where this implementation differs: + + Field name Mandatory? Allowed values Allowed special characters + ---------- ---------- -------------- -------------------------- + Seconds No 0-59 * / , - + Minutes Yes 0-59 * / , - + Hours Yes 0-23 * / , - + Day of month Yes 1-31 * / , - L W + Month Yes 1-12 or JAN-DEC * / , - + Day of week Yes 0-6 or SUN-SAT * / , - L # + Year No 1970–2099 * / , - + +#### Asterisk ( * ) +The asterisk indicates that the cron expression matches for all values of the field. E.g., using an asterisk in the 4th field (month) indicates every month. + +#### Slash ( / ) +Slashes describe increments of ranges. For example `3-59/15` in the minute field indicate the third minute of the hour and every 15 minutes thereafter. The form `*/...` is equivalent to the form "first-last/...", that is, an increment over the largest possible range of the field. + +#### Comma ( , ) +Commas are used to separate items of a list. For example, using `MON,WED,FRI` in the 5th field (day of week) means Mondays, Wednesdays and Fridays. + +#### Hyphen ( - ) +Hyphens define ranges. For example, 2000-2010 indicates every year between 2000 and 2010 AD, inclusive. + +#### L +`L` stands for "last". When used in the day-of-week field, it allows you to specify constructs such as "the last Friday" (`5L`) of a given month. In the day-of-month field, it specifies the last day of the month. + +#### W +The `W` character is allowed for the day-of-month field. This character is used to specify the business day (Monday-Friday) nearest the given day. As an example, if you were to specify `15W` as the value for the day-of-month field, the meaning is: "the nearest business day to the 15th of the month." + +So, if the 15th is a Saturday, the trigger fires on Friday the 14th. If the 15th is a Sunday, the trigger fires on Monday the 16th. If the 15th is a Tuesday, then it fires on Tuesday the 15th. However if you specify `1W` as the value for day-of-month, and the 1st is a Saturday, the trigger fires on Monday the 3rd, as it does not 'jump' over the boundary of a month's days. + +The `W` character can be specified only when the day-of-month is a single day, not a range or list of days. + +The `W` character can also be combined with `L`, i.e. `LW` to mean "the last business day of the month." + +#### Hash ( # ) +`#` is allowed for the day-of-week field, and must be followed by a number between one and five. It allows you to specify constructs such as "the second Friday" of a given month. + +Predefined cron expressions +--------------------------- +(Copied from , with text modified according to this implementation) + + Entry Description Equivalent to + @annually Run once a year at midnight in the morning of January 1 0 0 0 1 1 * * + @yearly Run once a year at midnight in the morning of January 1 0 0 0 1 1 * * + @monthly Run once a month at midnight in the morning of the first of the month 0 0 0 1 * * * + @weekly Run once a week at midnight in the morning of Sunday 0 0 0 * * 0 * + @daily Run once a day at midnight 0 0 0 * * * * + @hourly Run once an hour at the beginning of the hour 0 0 * * * * * + @reboot Not supported + +Other details +------------- +* If only six fields are present, a `0` second field is prepended, that is, `* * * * * 2013` internally become `0 * * * * * 2013`. +* If only five fields are present, a `0` second field is prepended and a wildcard year field is appended, that is, `* * * * Mon` internally become `0 * * * * Mon *`. +* Domain for day-of-week field is [0-7] instead of [0-6], 7 being Sunday (like 0). This to comply with http://linux.die.net/man/5/crontab#. +* As of now, the behavior of the code is undetermined if a malformed cron expression is supplied + +Install +------- + go get github.com/gorhill/cronexpr + +Usage +----- +Import the library: + + import "github.com/gorhill/cronexpr" + import "time" + +Simplest way: + + nextTime := cronexpr.MustParse("0 0 29 2 *").Next(time.Now()) + +Assuming `time.Now()` is "2013-08-29 09:28:00", then `nextTime` will be "2016-02-29 00:00:00". + +You can keep the returned Expression pointer around if you want to reuse it: + + expr := cronexpr.MustParse("0 0 29 2 *") + nextTime := expr.Next(time.Now()) + ... + nextTime = expr.Next(nextTime) + +Use `time.IsZero()` to find out whether a valid time was returned. For example, + + cronexpr.MustParse("* * * * * 1980").Next(time.Now()).IsZero() + +will return `true`, whereas + + cronexpr.MustParse("* * * * * 2050").Next(time.Now()).IsZero() + +will return `false` (as of 2013-08-29...) + +You may also query for `n` next time stamps: + + cronexpr.MustParse("0 0 29 2 *").NextN(time.Now(), 5) + +which returns a slice of time.Time objects, containing the following time stamps (as of 2013-08-30): + + 2016-02-29 00:00:00 + 2020-02-29 00:00:00 + 2024-02-29 00:00:00 + 2028-02-29 00:00:00 + 2032-02-29 00:00:00 + +The time zone of time values returned by `Next` and `NextN` is always the +time zone of the time value passed as argument, unless a zero time value is +returned. + +API +--- + + +License +------- + +License: pick the one which suits you best: + +- GPL v3 see +- APL v2 see + diff --git a/vendor/github.com/gorhill/cronexpr/cronexpr.go b/vendor/github.com/gorhill/cronexpr/cronexpr.go new file mode 100644 index 000000000..58b518fa5 --- /dev/null +++ b/vendor/github.com/gorhill/cronexpr/cronexpr.go @@ -0,0 +1,266 @@ +/*! + * Copyright 2013 Raymond Hill + * + * Project: github.com/gorhill/cronexpr + * File: cronexpr.go + * Version: 1.0 + * License: pick the one which suits you : + * GPL v3 see + * APL v2 see + * + */ + +// Package cronexpr parses cron time expressions. +package cronexpr + +/******************************************************************************/ + +import ( + "fmt" + "sort" + "time" +) + +/******************************************************************************/ + +// A Expression represents a specific cron time expression as defined at +// +type Expression struct { + expression string + secondList []int + minuteList []int + hourList []int + daysOfMonth map[int]bool + workdaysOfMonth map[int]bool + lastDayOfMonth bool + lastWorkdayOfMonth bool + daysOfMonthRestricted bool + actualDaysOfMonthList []int + monthList []int + daysOfWeek map[int]bool + specificWeekDaysOfWeek map[int]bool + lastWeekDaysOfWeek map[int]bool + daysOfWeekRestricted bool + yearList []int +} + +/******************************************************************************/ + +// MustParse returns a new Expression pointer. It expects a well-formed cron +// expression. If a malformed cron expression is supplied, it will `panic`. +// See for documentation +// about what is a well-formed cron expression from this library's point of +// view. +func MustParse(cronLine string) *Expression { + expr, err := Parse(cronLine) + if err != nil { + panic(err) + } + return expr +} + +/******************************************************************************/ + +// Parse returns a new Expression pointer. An error is returned if a malformed +// cron expression is supplied. +// See for documentation +// about what is a well-formed cron expression from this library's point of +// view. +func Parse(cronLine string) (*Expression, error) { + + // Maybe one of the built-in aliases is being used + cron := cronNormalizer.Replace(cronLine) + + indices := fieldFinder.FindAllStringIndex(cron, -1) + fieldCount := len(indices) + if fieldCount < 5 { + return nil, fmt.Errorf("missing field(s)") + } + // ignore fields beyond 7th + if fieldCount > 7 { + fieldCount = 7 + } + + var expr = Expression{} + var field = 0 + var err error + + // second field (optional) + if fieldCount == 7 { + err = expr.secondFieldHandler(cron[indices[field][0]:indices[field][1]]) + if err != nil { + return nil, err + } + field += 1 + } else { + expr.secondList = []int{0} + } + + // minute field + err = expr.minuteFieldHandler(cron[indices[field][0]:indices[field][1]]) + if err != nil { + return nil, err + } + field += 1 + + // hour field + err = expr.hourFieldHandler(cron[indices[field][0]:indices[field][1]]) + if err != nil { + return nil, err + } + field += 1 + + // day of month field + err = expr.domFieldHandler(cron[indices[field][0]:indices[field][1]]) + if err != nil { + return nil, err + } + field += 1 + + // month field + err = expr.monthFieldHandler(cron[indices[field][0]:indices[field][1]]) + if err != nil { + return nil, err + } + field += 1 + + // day of week field + err = expr.dowFieldHandler(cron[indices[field][0]:indices[field][1]]) + if err != nil { + return nil, err + } + field += 1 + + // year field + if field < fieldCount { + err = expr.yearFieldHandler(cron[indices[field][0]:indices[field][1]]) + if err != nil { + return nil, err + } + } else { + expr.yearList = yearDescriptor.defaultList + } + + return &expr, nil +} + +/******************************************************************************/ + +// Next returns the closest time instant immediately following `fromTime` which +// matches the cron expression `expr`. +// +// The `time.Location` of the returned time instant is the same as that of +// `fromTime`. +// +// The zero value of time.Time is returned if no matching time instant exists +// or if a `fromTime` is itself a zero value. +func (expr *Expression) Next(fromTime time.Time) time.Time { + // Special case + if fromTime.IsZero() { + return fromTime + } + + // Since expr.nextSecond()-expr.nextMonth() expects that the + // supplied time stamp is a perfect match to the underlying cron + // expression, and since this function is an entry point where `fromTime` + // does not necessarily matches the underlying cron expression, + // we first need to ensure supplied time stamp matches + // the cron expression. If not, this means the supplied time + // stamp falls in between matching time stamps, thus we move + // to closest future matching immediately upon encountering a mismatching + // time stamp. + + // year + v := fromTime.Year() + i := sort.SearchInts(expr.yearList, v) + if i == len(expr.yearList) { + return time.Time{} + } + if v != expr.yearList[i] { + return expr.nextYear(fromTime) + } + // month + v = int(fromTime.Month()) + i = sort.SearchInts(expr.monthList, v) + if i == len(expr.monthList) { + return expr.nextYear(fromTime) + } + if v != expr.monthList[i] { + return expr.nextMonth(fromTime) + } + + expr.actualDaysOfMonthList = expr.calculateActualDaysOfMonth(fromTime.Year(), int(fromTime.Month())) + if len(expr.actualDaysOfMonthList) == 0 { + return expr.nextMonth(fromTime) + } + + // day of month + v = fromTime.Day() + i = sort.SearchInts(expr.actualDaysOfMonthList, v) + if i == len(expr.actualDaysOfMonthList) { + return expr.nextMonth(fromTime) + } + if v != expr.actualDaysOfMonthList[i] { + return expr.nextDayOfMonth(fromTime) + } + // hour + v = fromTime.Hour() + i = sort.SearchInts(expr.hourList, v) + if i == len(expr.hourList) { + return expr.nextDayOfMonth(fromTime) + } + if v != expr.hourList[i] { + return expr.nextHour(fromTime) + } + // minute + v = fromTime.Minute() + i = sort.SearchInts(expr.minuteList, v) + if i == len(expr.minuteList) { + return expr.nextHour(fromTime) + } + if v != expr.minuteList[i] { + return expr.nextMinute(fromTime) + } + // second + v = fromTime.Second() + i = sort.SearchInts(expr.secondList, v) + if i == len(expr.secondList) { + return expr.nextMinute(fromTime) + } + + // If we reach this point, there is nothing better to do + // than to move to the next second + + return expr.nextSecond(fromTime) +} + +/******************************************************************************/ + +// NextN returns a slice of `n` closest time instants immediately following +// `fromTime` which match the cron expression `expr`. +// +// The time instants in the returned slice are in chronological ascending order. +// The `time.Location` of the returned time instants is the same as that of +// `fromTime`. +// +// A slice with len between [0-`n`] is returned, that is, if not enough existing +// matching time instants exist, the number of returned entries will be less +// than `n`. +func (expr *Expression) NextN(fromTime time.Time, n uint) []time.Time { + nextTimes := make([]time.Time, 0, n) + if n > 0 { + fromTime = expr.Next(fromTime) + for { + if fromTime.IsZero() { + break + } + nextTimes = append(nextTimes, fromTime) + n -= 1 + if n == 0 { + break + } + fromTime = expr.nextSecond(fromTime) + } + } + return nextTimes +} diff --git a/vendor/github.com/gorhill/cronexpr/cronexpr_next.go b/vendor/github.com/gorhill/cronexpr/cronexpr_next.go new file mode 100644 index 000000000..a0ebdb6b2 --- /dev/null +++ b/vendor/github.com/gorhill/cronexpr/cronexpr_next.go @@ -0,0 +1,292 @@ +/*! + * Copyright 2013 Raymond Hill + * + * Project: github.com/gorhill/cronexpr + * File: cronexpr_next.go + * Version: 1.0 + * License: pick the one which suits you : + * GPL v3 see + * APL v2 see + * + */ + +package cronexpr + +/******************************************************************************/ + +import ( + "sort" + "time" +) + +/******************************************************************************/ + +var dowNormalizedOffsets = [][]int{ + {1, 8, 15, 22, 29}, + {2, 9, 16, 23, 30}, + {3, 10, 17, 24, 31}, + {4, 11, 18, 25}, + {5, 12, 19, 26}, + {6, 13, 20, 27}, + {7, 14, 21, 28}, +} + +/******************************************************************************/ + +func (expr *Expression) nextYear(t time.Time) time.Time { + // Find index at which item in list is greater or equal to + // candidate year + i := sort.SearchInts(expr.yearList, t.Year()+1) + if i == len(expr.yearList) { + return time.Time{} + } + // Year changed, need to recalculate actual days of month + expr.actualDaysOfMonthList = expr.calculateActualDaysOfMonth(expr.yearList[i], expr.monthList[0]) + if len(expr.actualDaysOfMonthList) == 0 { + return expr.nextMonth(time.Date( + expr.yearList[i], + time.Month(expr.monthList[0]), + 1, + expr.hourList[0], + expr.minuteList[0], + expr.secondList[0], + 0, + t.Location())) + } + return time.Date( + expr.yearList[i], + time.Month(expr.monthList[0]), + expr.actualDaysOfMonthList[0], + expr.hourList[0], + expr.minuteList[0], + expr.secondList[0], + 0, + t.Location()) +} + +/******************************************************************************/ + +func (expr *Expression) nextMonth(t time.Time) time.Time { + // Find index at which item in list is greater or equal to + // candidate month + i := sort.SearchInts(expr.monthList, int(t.Month())+1) + if i == len(expr.monthList) { + return expr.nextYear(t) + } + // Month changed, need to recalculate actual days of month + expr.actualDaysOfMonthList = expr.calculateActualDaysOfMonth(t.Year(), expr.monthList[i]) + if len(expr.actualDaysOfMonthList) == 0 { + return expr.nextMonth(time.Date( + t.Year(), + time.Month(expr.monthList[i]), + 1, + expr.hourList[0], + expr.minuteList[0], + expr.secondList[0], + 0, + t.Location())) + } + + return time.Date( + t.Year(), + time.Month(expr.monthList[i]), + expr.actualDaysOfMonthList[0], + expr.hourList[0], + expr.minuteList[0], + expr.secondList[0], + 0, + t.Location()) +} + +/******************************************************************************/ + +func (expr *Expression) nextDayOfMonth(t time.Time) time.Time { + // Find index at which item in list is greater or equal to + // candidate day of month + i := sort.SearchInts(expr.actualDaysOfMonthList, t.Day()+1) + if i == len(expr.actualDaysOfMonthList) { + return expr.nextMonth(t) + } + + return time.Date( + t.Year(), + t.Month(), + expr.actualDaysOfMonthList[i], + expr.hourList[0], + expr.minuteList[0], + expr.secondList[0], + 0, + t.Location()) +} + +/******************************************************************************/ + +func (expr *Expression) nextHour(t time.Time) time.Time { + // Find index at which item in list is greater or equal to + // candidate hour + i := sort.SearchInts(expr.hourList, t.Hour()+1) + if i == len(expr.hourList) { + return expr.nextDayOfMonth(t) + } + + return time.Date( + t.Year(), + t.Month(), + t.Day(), + expr.hourList[i], + expr.minuteList[0], + expr.secondList[0], + 0, + t.Location()) +} + +/******************************************************************************/ + +func (expr *Expression) nextMinute(t time.Time) time.Time { + // Find index at which item in list is greater or equal to + // candidate minute + i := sort.SearchInts(expr.minuteList, t.Minute()+1) + if i == len(expr.minuteList) { + return expr.nextHour(t) + } + + return time.Date( + t.Year(), + t.Month(), + t.Day(), + t.Hour(), + expr.minuteList[i], + expr.secondList[0], + 0, + t.Location()) +} + +/******************************************************************************/ + +func (expr *Expression) nextSecond(t time.Time) time.Time { + // nextSecond() assumes all other fields are exactly matched + // to the cron expression + + // Find index at which item in list is greater or equal to + // candidate second + i := sort.SearchInts(expr.secondList, t.Second()+1) + if i == len(expr.secondList) { + return expr.nextMinute(t) + } + + return time.Date( + t.Year(), + t.Month(), + t.Day(), + t.Hour(), + t.Minute(), + expr.secondList[i], + 0, + t.Location()) +} + +/******************************************************************************/ + +func (expr *Expression) calculateActualDaysOfMonth(year, month int) []int { + actualDaysOfMonthMap := make(map[int]bool) + firstDayOfMonth := time.Date(year, time.Month(month), 1, 0, 0, 0, 0, time.UTC) + lastDayOfMonth := firstDayOfMonth.AddDate(0, 1, -1) + + // As per crontab man page (http://linux.die.net/man/5/crontab#): + // "The day of a command's execution can be specified by two + // "fields - day of month, and day of week. If both fields are + // "restricted (ie, aren't *), the command will be run when + // "either field matches the current time" + + // If both fields are not restricted, all days of the month are a hit + if expr.daysOfMonthRestricted == false && expr.daysOfWeekRestricted == false { + return genericDefaultList[1 : lastDayOfMonth.Day()+1] + } + + // day-of-month != `*` + if expr.daysOfMonthRestricted { + // Last day of month + if expr.lastDayOfMonth { + actualDaysOfMonthMap[lastDayOfMonth.Day()] = true + } + // Last work day of month + if expr.lastWorkdayOfMonth { + actualDaysOfMonthMap[workdayOfMonth(lastDayOfMonth, lastDayOfMonth)] = true + } + // Days of month + for v := range expr.daysOfMonth { + // Ignore days beyond end of month + if v <= lastDayOfMonth.Day() { + actualDaysOfMonthMap[v] = true + } + } + // Work days of month + // As per Wikipedia: month boundaries are not crossed. + for v := range expr.workdaysOfMonth { + // Ignore days beyond end of month + if v <= lastDayOfMonth.Day() { + actualDaysOfMonthMap[workdayOfMonth(firstDayOfMonth.AddDate(0, 0, v-1), lastDayOfMonth)] = true + } + } + } + + // day-of-week != `*` + if expr.daysOfWeekRestricted { + // How far first sunday is from first day of month + offset := 7 - int(firstDayOfMonth.Weekday()) + // days of week + // offset : (7 - day_of_week_of_1st_day_of_month) + // target : 1 + (7 * week_of_month) + (offset + day_of_week) % 7 + for v := range expr.daysOfWeek { + w := dowNormalizedOffsets[(offset+v)%7] + actualDaysOfMonthMap[w[0]] = true + actualDaysOfMonthMap[w[1]] = true + actualDaysOfMonthMap[w[2]] = true + actualDaysOfMonthMap[w[3]] = true + if len(w) > 4 && w[4] <= lastDayOfMonth.Day() { + actualDaysOfMonthMap[w[4]] = true + } + } + // days of week of specific week in the month + // offset : (7 - day_of_week_of_1st_day_of_month) + // target : 1 + (7 * week_of_month) + (offset + day_of_week) % 7 + for v := range expr.specificWeekDaysOfWeek { + v = 1 + 7*(v/7) + (offset+v)%7 + if v <= lastDayOfMonth.Day() { + actualDaysOfMonthMap[v] = true + } + } + // Last days of week of the month + lastWeekOrigin := firstDayOfMonth.AddDate(0, 1, -7) + offset = 7 - int(lastWeekOrigin.Weekday()) + for v := range expr.lastWeekDaysOfWeek { + v = lastWeekOrigin.Day() + (offset+v)%7 + if v <= lastDayOfMonth.Day() { + actualDaysOfMonthMap[v] = true + } + } + } + + return toList(actualDaysOfMonthMap) +} + +func workdayOfMonth(targetDom, lastDom time.Time) int { + // If saturday, then friday + // If sunday, then monday + dom := targetDom.Day() + dow := targetDom.Weekday() + if dow == time.Saturday { + if dom > 1 { + dom -= 1 + } else { + dom += 2 + } + } else if dow == time.Sunday { + if dom < lastDom.Day() { + dom += 1 + } else { + dom -= 2 + } + } + return dom +} diff --git a/vendor/github.com/gorhill/cronexpr/cronexpr_parse.go b/vendor/github.com/gorhill/cronexpr/cronexpr_parse.go new file mode 100644 index 000000000..be6ef1769 --- /dev/null +++ b/vendor/github.com/gorhill/cronexpr/cronexpr_parse.go @@ -0,0 +1,498 @@ +/*! + * Copyright 2013 Raymond Hill + * + * Project: github.com/gorhill/cronexpr + * File: cronexpr_parse.go + * Version: 1.0 + * License: pick the one which suits you best: + * GPL v3 see + * APL v2 see + * + */ + +package cronexpr + +/******************************************************************************/ + +import ( + "fmt" + "regexp" + "sort" + "strings" +) + +/******************************************************************************/ + +var ( + genericDefaultList = []int{ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + } + yearDefaultList = []int{ + 1970, 1971, 1972, 1973, 1974, 1975, 1976, 1977, 1978, 1979, + 1980, 1981, 1982, 1983, 1984, 1985, 1986, 1987, 1988, 1989, + 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, + 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, + 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, + 2020, 2021, 2022, 2023, 2024, 2025, 2026, 2027, 2028, 2029, + 2030, 2031, 2032, 2033, 2034, 2035, 2036, 2037, 2038, 2039, + 2040, 2041, 2042, 2043, 2044, 2045, 2046, 2047, 2048, 2049, + 2050, 2051, 2052, 2053, 2054, 2055, 2056, 2057, 2058, 2059, + 2060, 2061, 2062, 2063, 2064, 2065, 2066, 2067, 2068, 2069, + 2070, 2071, 2072, 2073, 2074, 2075, 2076, 2077, 2078, 2079, + 2080, 2081, 2082, 2083, 2084, 2085, 2086, 2087, 2088, 2089, + 2090, 2091, 2092, 2093, 2094, 2095, 2096, 2097, 2098, 2099, + } +) + +/******************************************************************************/ + +var ( + numberTokens = map[string]int{ + "0": 0, "1": 1, "2": 2, "3": 3, "4": 4, "5": 5, "6": 6, "7": 7, "8": 8, "9": 9, + "00": 0, "01": 1, "02": 2, "03": 3, "04": 4, "05": 5, "06": 6, "07": 7, "08": 8, "09": 9, + "10": 10, "11": 11, "12": 12, "13": 13, "14": 14, "15": 15, "16": 16, "17": 17, "18": 18, "19": 19, + "20": 20, "21": 21, "22": 22, "23": 23, "24": 24, "25": 25, "26": 26, "27": 27, "28": 28, "29": 29, + "30": 30, "31": 31, "32": 32, "33": 33, "34": 34, "35": 35, "36": 36, "37": 37, "38": 38, "39": 39, + "40": 40, "41": 41, "42": 42, "43": 43, "44": 44, "45": 45, "46": 46, "47": 47, "48": 48, "49": 49, + "50": 50, "51": 51, "52": 52, "53": 53, "54": 54, "55": 55, "56": 56, "57": 57, "58": 58, "59": 59, + "1970": 1970, "1971": 1971, "1972": 1972, "1973": 1973, "1974": 1974, "1975": 1975, "1976": 1976, "1977": 1977, "1978": 1978, "1979": 1979, + "1980": 1980, "1981": 1981, "1982": 1982, "1983": 1983, "1984": 1984, "1985": 1985, "1986": 1986, "1987": 1987, "1988": 1988, "1989": 1989, + "1990": 1990, "1991": 1991, "1992": 1992, "1993": 1993, "1994": 1994, "1995": 1995, "1996": 1996, "1997": 1997, "1998": 1998, "1999": 1999, + "2000": 2000, "2001": 2001, "2002": 2002, "2003": 2003, "2004": 2004, "2005": 2005, "2006": 2006, "2007": 2007, "2008": 2008, "2009": 2009, + "2010": 2010, "2011": 2011, "2012": 2012, "2013": 2013, "2014": 2014, "2015": 2015, "2016": 2016, "2017": 2017, "2018": 2018, "2019": 2019, + "2020": 2020, "2021": 2021, "2022": 2022, "2023": 2023, "2024": 2024, "2025": 2025, "2026": 2026, "2027": 2027, "2028": 2028, "2029": 2029, + "2030": 2030, "2031": 2031, "2032": 2032, "2033": 2033, "2034": 2034, "2035": 2035, "2036": 2036, "2037": 2037, "2038": 2038, "2039": 2039, + "2040": 2040, "2041": 2041, "2042": 2042, "2043": 2043, "2044": 2044, "2045": 2045, "2046": 2046, "2047": 2047, "2048": 2048, "2049": 2049, + "2050": 2050, "2051": 2051, "2052": 2052, "2053": 2053, "2054": 2054, "2055": 2055, "2056": 2056, "2057": 2057, "2058": 2058, "2059": 2059, + "2060": 2060, "2061": 2061, "2062": 2062, "2063": 2063, "2064": 2064, "2065": 2065, "2066": 2066, "2067": 2067, "2068": 2068, "2069": 2069, + "2070": 2070, "2071": 2071, "2072": 2072, "2073": 2073, "2074": 2074, "2075": 2075, "2076": 2076, "2077": 2077, "2078": 2078, "2079": 2079, + "2080": 2080, "2081": 2081, "2082": 2082, "2083": 2083, "2084": 2084, "2085": 2085, "2086": 2086, "2087": 2087, "2088": 2088, "2089": 2089, + "2090": 2090, "2091": 2091, "2092": 2092, "2093": 2093, "2094": 2094, "2095": 2095, "2096": 2096, "2097": 2097, "2098": 2098, "2099": 2099, + } + monthTokens = map[string]int{ + `1`: 1, `jan`: 1, `january`: 1, + `2`: 2, `feb`: 2, `february`: 2, + `3`: 3, `mar`: 3, `march`: 3, + `4`: 4, `apr`: 4, `april`: 4, + `5`: 5, `may`: 5, + `6`: 6, `jun`: 6, `june`: 6, + `7`: 7, `jul`: 7, `july`: 7, + `8`: 8, `aug`: 8, `august`: 8, + `9`: 9, `sep`: 9, `september`: 9, + `10`: 10, `oct`: 10, `october`: 10, + `11`: 11, `nov`: 11, `november`: 11, + `12`: 12, `dec`: 12, `december`: 12, + } + dowTokens = map[string]int{ + `0`: 0, `sun`: 0, `sunday`: 0, + `1`: 1, `mon`: 1, `monday`: 1, + `2`: 2, `tue`: 2, `tuesday`: 2, + `3`: 3, `wed`: 3, `wednesday`: 3, + `4`: 4, `thu`: 4, `thursday`: 4, + `5`: 5, `fri`: 5, `friday`: 5, + `6`: 6, `sat`: 6, `saturday`: 6, + `7`: 0, + } +) + +/******************************************************************************/ + +func atoi(s string) int { + return numberTokens[s] +} + +type fieldDescriptor struct { + name string + min, max int + defaultList []int + valuePattern string + atoi func(string) int +} + +var ( + secondDescriptor = fieldDescriptor{ + name: "second", + min: 0, + max: 59, + defaultList: genericDefaultList[0:60], + valuePattern: `0?[0-9]|[1-5][0-9]`, + atoi: atoi, + } + minuteDescriptor = fieldDescriptor{ + name: "minute", + min: 0, + max: 59, + defaultList: genericDefaultList[0:60], + valuePattern: `0?[0-9]|[1-5][0-9]`, + atoi: atoi, + } + hourDescriptor = fieldDescriptor{ + name: "hour", + min: 0, + max: 23, + defaultList: genericDefaultList[0:24], + valuePattern: `0?[0-9]|1[0-9]|2[0-3]`, + atoi: atoi, + } + domDescriptor = fieldDescriptor{ + name: "day-of-month", + min: 1, + max: 31, + defaultList: genericDefaultList[1:32], + valuePattern: `0?[1-9]|[12][0-9]|3[01]`, + atoi: atoi, + } + monthDescriptor = fieldDescriptor{ + name: "month", + min: 1, + max: 12, + defaultList: genericDefaultList[1:13], + valuePattern: `0?[1-9]|1[012]|jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|dec|january|february|march|april|march|april|june|july|august|september|october|november|december`, + atoi: func(s string) int { + return monthTokens[s] + }, + } + dowDescriptor = fieldDescriptor{ + name: "day-of-week", + min: 0, + max: 6, + defaultList: genericDefaultList[0:7], + valuePattern: `0?[0-7]|sun|mon|tue|wed|thu|fri|sat|sunday|monday|tuesday|wednesday|thursday|friday|saturday`, + atoi: func(s string) int { + return dowTokens[s] + }, + } + yearDescriptor = fieldDescriptor{ + name: "year", + min: 1970, + max: 2099, + defaultList: yearDefaultList[:], + valuePattern: `19[789][0-9]|20[0-9]{2}`, + atoi: atoi, + } +) + +/******************************************************************************/ + +var ( + layoutWildcard = `^\*$|^\?$` + layoutValue = `^(%value%)$` + layoutRange = `^(%value%)-(%value%)$` + layoutWildcardAndInterval = `^\*/(\d+)$` + layoutValueAndInterval = `^(%value%)/(\d+)$` + layoutRangeAndInterval = `^(%value%)-(%value%)/(\d+)$` + layoutLastDom = `^l$` + layoutWorkdom = `^(%value%)w$` + layoutLastWorkdom = `^lw$` + layoutDowOfLastWeek = `^(%value%)l$` + layoutDowOfSpecificWeek = `^(%value%)#([1-5])$` + fieldFinder = regexp.MustCompile(`\S+`) + entryFinder = regexp.MustCompile(`[^,]+`) + layoutRegexp = make(map[string]*regexp.Regexp) +) + +/******************************************************************************/ + +var cronNormalizer = strings.NewReplacer( + "@yearly", "0 0 0 1 1 * *", + "@annually", "0 0 0 1 1 * *", + "@monthly", "0 0 0 1 * * *", + "@weekly", "0 0 0 * * 0 *", + "@daily", "0 0 0 * * * *", + "@hourly", "0 0 * * * * *") + +/******************************************************************************/ + +func (expr *Expression) secondFieldHandler(s string) error { + var err error + expr.secondList, err = genericFieldHandler(s, secondDescriptor) + return err +} + +/******************************************************************************/ + +func (expr *Expression) minuteFieldHandler(s string) error { + var err error + expr.minuteList, err = genericFieldHandler(s, minuteDescriptor) + return err +} + +/******************************************************************************/ + +func (expr *Expression) hourFieldHandler(s string) error { + var err error + expr.hourList, err = genericFieldHandler(s, hourDescriptor) + return err +} + +/******************************************************************************/ + +func (expr *Expression) monthFieldHandler(s string) error { + var err error + expr.monthList, err = genericFieldHandler(s, monthDescriptor) + return err +} + +/******************************************************************************/ + +func (expr *Expression) yearFieldHandler(s string) error { + var err error + expr.yearList, err = genericFieldHandler(s, yearDescriptor) + return err +} + +/******************************************************************************/ + +const ( + none = 0 + one = 1 + span = 2 + all = 3 +) + +type cronDirective struct { + kind int + first int + last int + step int + sbeg int + send int +} + +func genericFieldHandler(s string, desc fieldDescriptor) ([]int, error) { + directives, err := genericFieldParse(s, desc) + if err != nil { + return nil, err + } + values := make(map[int]bool) + for _, directive := range directives { + switch directive.kind { + case none: + return nil, fmt.Errorf("syntax error in %s field: '%s'", desc.name, s[directive.sbeg:directive.send]) + case one: + populateOne(values, directive.first) + case span: + populateMany(values, directive.first, directive.last, directive.step) + case all: + return desc.defaultList, nil + } + } + return toList(values), nil +} + +func (expr *Expression) dowFieldHandler(s string) error { + expr.daysOfWeekRestricted = true + expr.daysOfWeek = make(map[int]bool) + expr.lastWeekDaysOfWeek = make(map[int]bool) + expr.specificWeekDaysOfWeek = make(map[int]bool) + + directives, err := genericFieldParse(s, dowDescriptor) + if err != nil { + return err + } + + for _, directive := range directives { + switch directive.kind { + case none: + sdirective := s[directive.sbeg:directive.send] + snormal := strings.ToLower(sdirective) + // `5L` + pairs := makeLayoutRegexp(layoutDowOfLastWeek, dowDescriptor.valuePattern).FindStringSubmatchIndex(snormal) + if len(pairs) > 0 { + populateOne(expr.lastWeekDaysOfWeek, dowDescriptor.atoi(snormal[pairs[2]:pairs[3]])) + } else { + // `5#3` + pairs := makeLayoutRegexp(layoutDowOfSpecificWeek, dowDescriptor.valuePattern).FindStringSubmatchIndex(snormal) + if len(pairs) > 0 { + populateOne(expr.specificWeekDaysOfWeek, (dowDescriptor.atoi(snormal[pairs[4]:pairs[5]])-1)*7+(dowDescriptor.atoi(snormal[pairs[2]:pairs[3]])%7)) + } else { + return fmt.Errorf("syntax error in day-of-week field: '%s'", sdirective) + } + } + case one: + populateOne(expr.daysOfWeek, directive.first) + case span: + populateMany(expr.daysOfWeek, directive.first, directive.last, directive.step) + case all: + populateMany(expr.daysOfWeek, directive.first, directive.last, directive.step) + expr.daysOfWeekRestricted = false + } + } + return nil +} + +func (expr *Expression) domFieldHandler(s string) error { + expr.daysOfMonthRestricted = true + expr.lastDayOfMonth = false + expr.lastWorkdayOfMonth = false + expr.daysOfMonth = make(map[int]bool) // days of month map + expr.workdaysOfMonth = make(map[int]bool) // work days of month map + + directives, err := genericFieldParse(s, domDescriptor) + if err != nil { + return err + } + + for _, directive := range directives { + switch directive.kind { + case none: + sdirective := s[directive.sbeg:directive.send] + snormal := strings.ToLower(sdirective) + // `L` + if makeLayoutRegexp(layoutLastDom, domDescriptor.valuePattern).MatchString(snormal) { + expr.lastDayOfMonth = true + } else { + // `LW` + if makeLayoutRegexp(layoutLastWorkdom, domDescriptor.valuePattern).MatchString(snormal) { + expr.lastWorkdayOfMonth = true + } else { + // `15W` + pairs := makeLayoutRegexp(layoutWorkdom, domDescriptor.valuePattern).FindStringSubmatchIndex(snormal) + if len(pairs) > 0 { + populateOne(expr.workdaysOfMonth, domDescriptor.atoi(snormal[pairs[2]:pairs[3]])) + } else { + return fmt.Errorf("syntax error in day-of-month field: '%s'", sdirective) + } + } + } + case one: + populateOne(expr.daysOfMonth, directive.first) + case span: + populateMany(expr.daysOfMonth, directive.first, directive.last, directive.step) + case all: + populateMany(expr.daysOfMonth, directive.first, directive.last, directive.step) + expr.daysOfMonthRestricted = false + } + } + return nil +} + +/******************************************************************************/ + +func populateOne(values map[int]bool, v int) { + values[v] = true +} + +func populateMany(values map[int]bool, min, max, step int) { + for i := min; i <= max; i += step { + values[i] = true + } +} + +func toList(set map[int]bool) []int { + list := make([]int, len(set)) + i := 0 + for k := range set { + list[i] = k + i += 1 + } + sort.Ints(list) + return list +} + +/******************************************************************************/ + +func genericFieldParse(s string, desc fieldDescriptor) ([]*cronDirective, error) { + // At least one entry must be present + indices := entryFinder.FindAllStringIndex(s, -1) + if len(indices) == 0 { + return nil, fmt.Errorf("%s field: missing directive", desc.name) + } + + directives := make([]*cronDirective, 0, len(indices)) + + for i := range indices { + directive := cronDirective{ + sbeg: indices[i][0], + send: indices[i][1], + } + snormal := strings.ToLower(s[indices[i][0]:indices[i][1]]) + + // `*` + if makeLayoutRegexp(layoutWildcard, desc.valuePattern).MatchString(snormal) { + directive.kind = all + directive.first = desc.min + directive.last = desc.max + directive.step = 1 + directives = append(directives, &directive) + continue + } + // `5` + if makeLayoutRegexp(layoutValue, desc.valuePattern).MatchString(snormal) { + directive.kind = one + directive.first = desc.atoi(snormal) + directives = append(directives, &directive) + continue + } + // `5-20` + pairs := makeLayoutRegexp(layoutRange, desc.valuePattern).FindStringSubmatchIndex(snormal) + if len(pairs) > 0 { + directive.kind = span + directive.first = desc.atoi(snormal[pairs[2]:pairs[3]]) + directive.last = desc.atoi(snormal[pairs[4]:pairs[5]]) + directive.step = 1 + directives = append(directives, &directive) + continue + } + // `*/2` + pairs = makeLayoutRegexp(layoutWildcardAndInterval, desc.valuePattern).FindStringSubmatchIndex(snormal) + if len(pairs) > 0 { + directive.kind = span + directive.first = desc.min + directive.last = desc.max + directive.step = atoi(snormal[pairs[2]:pairs[3]]) + if directive.step < 1 || directive.step > desc.max { + return nil, fmt.Errorf("invalid interval %s", snormal) + } + directives = append(directives, &directive) + continue + } + // `5/2` + pairs = makeLayoutRegexp(layoutValueAndInterval, desc.valuePattern).FindStringSubmatchIndex(snormal) + if len(pairs) > 0 { + directive.kind = span + directive.first = desc.atoi(snormal[pairs[2]:pairs[3]]) + directive.last = desc.max + directive.step = atoi(snormal[pairs[4]:pairs[5]]) + if directive.step < 1 || directive.step > desc.max { + return nil, fmt.Errorf("invalid interval %s", snormal) + } + directives = append(directives, &directive) + continue + } + // `5-20/2` + pairs = makeLayoutRegexp(layoutRangeAndInterval, desc.valuePattern).FindStringSubmatchIndex(snormal) + if len(pairs) > 0 { + directive.kind = span + directive.first = desc.atoi(snormal[pairs[2]:pairs[3]]) + directive.last = desc.atoi(snormal[pairs[4]:pairs[5]]) + directive.step = atoi(snormal[pairs[6]:pairs[7]]) + if directive.step < 1 || directive.step > desc.max { + return nil, fmt.Errorf("invalid interval %s", snormal) + } + directives = append(directives, &directive) + continue + } + // No behavior for this one, let caller deal with it + directive.kind = none + directives = append(directives, &directive) + } + return directives, nil +} + +/******************************************************************************/ + +func makeLayoutRegexp(layout, value string) *regexp.Regexp { + layout = strings.Replace(layout, `%value%`, value, -1) + re := layoutRegexp[layout] + if re == nil { + re = regexp.MustCompile(layout) + layoutRegexp[layout] = re + } + return re +} diff --git a/vendor/github.com/hashicorp/nomad/api/contexts/contexts.go b/vendor/github.com/hashicorp/nomad/api/contexts/contexts.go new file mode 100644 index 000000000..f3e6e8ca4 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/api/contexts/contexts.go @@ -0,0 +1,14 @@ +package contexts + +// Context defines the scope in which a search for Nomad object operates +type Context string + +const ( + Allocs Context = "allocs" + Deployments Context = "deployment" + Evals Context = "evals" + Jobs Context = "jobs" + Nodes Context = "nodes" + Namespaces Context = "namespaces" + All Context = "all" +) diff --git a/vendor/github.com/hashicorp/nomad/helper/funcs.go b/vendor/github.com/hashicorp/nomad/helper/funcs.go new file mode 100644 index 000000000..0b0796059 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/helper/funcs.go @@ -0,0 +1,272 @@ +package helper + +import ( + "crypto/sha512" + "fmt" + "regexp" + "time" +) + +// validUUID is used to check if a given string looks like a UUID +var validUUID = regexp.MustCompile(`(?i)^[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12}$`) + +// IsUUID returns true if the given string is a valid UUID. +func IsUUID(str string) bool { + const uuidLen = 36 + if len(str) != uuidLen { + return false + } + + return validUUID.MatchString(str) +} + +// HashUUID takes an input UUID and returns a hashed version of the UUID to +// ensure it is well distributed. +func HashUUID(input string) (output string, hashed bool) { + if !IsUUID(input) { + return "", false + } + + // Hash the input + buf := sha512.Sum512([]byte(input)) + output = fmt.Sprintf("%08x-%04x-%04x-%04x-%12x", + buf[0:4], + buf[4:6], + buf[6:8], + buf[8:10], + buf[10:16]) + + return output, true +} + +// boolToPtr returns the pointer to a boolean +func BoolToPtr(b bool) *bool { + return &b +} + +// IntToPtr returns the pointer to an int +func IntToPtr(i int) *int { + return &i +} + +// Int64ToPtr returns the pointer to an int +func Int64ToPtr(i int64) *int64 { + return &i +} + +// UintToPtr returns the pointer to an uint +func Uint64ToPtr(u uint64) *uint64 { + return &u +} + +// StringToPtr returns the pointer to a string +func StringToPtr(str string) *string { + return &str +} + +// TimeToPtr returns the pointer to a time stamp +func TimeToPtr(t time.Duration) *time.Duration { + return &t +} + +func IntMin(a, b int) int { + if a < b { + return a + } + return b +} + +func IntMax(a, b int) int { + if a > b { + return a + } + return b +} + +func Uint64Max(a, b uint64) uint64 { + if a > b { + return a + } + return b +} + +// MapStringStringSliceValueSet returns the set of values in a map[string][]string +func MapStringStringSliceValueSet(m map[string][]string) []string { + set := make(map[string]struct{}) + for _, slice := range m { + for _, v := range slice { + set[v] = struct{}{} + } + } + + flat := make([]string, 0, len(set)) + for k := range set { + flat = append(flat, k) + } + return flat +} + +func SliceStringToSet(s []string) map[string]struct{} { + m := make(map[string]struct{}, (len(s)+1)/2) + for _, k := range s { + m[k] = struct{}{} + } + return m +} + +// SliceStringIsSubset returns whether the smaller set of strings is a subset of +// the larger. If the smaller slice is not a subset, the offending elements are +// returned. +func SliceStringIsSubset(larger, smaller []string) (bool, []string) { + largerSet := make(map[string]struct{}, len(larger)) + for _, l := range larger { + largerSet[l] = struct{}{} + } + + subset := true + var offending []string + for _, s := range smaller { + if _, ok := largerSet[s]; !ok { + subset = false + offending = append(offending, s) + } + } + + return subset, offending +} + +func SliceSetDisjoint(first, second []string) (bool, []string) { + contained := make(map[string]struct{}, len(first)) + for _, k := range first { + contained[k] = struct{}{} + } + + offending := make(map[string]struct{}) + for _, k := range second { + if _, ok := contained[k]; ok { + offending[k] = struct{}{} + } + } + + if len(offending) == 0 { + return true, nil + } + + flattened := make([]string, 0, len(offending)) + for k := range offending { + flattened = append(flattened, k) + } + return false, flattened +} + +// Helpers for copying generic structures. +func CopyMapStringString(m map[string]string) map[string]string { + l := len(m) + if l == 0 { + return nil + } + + c := make(map[string]string, l) + for k, v := range m { + c[k] = v + } + return c +} + +func CopyMapStringStruct(m map[string]struct{}) map[string]struct{} { + l := len(m) + if l == 0 { + return nil + } + + c := make(map[string]struct{}, l) + for k, _ := range m { + c[k] = struct{}{} + } + return c +} + +func CopyMapStringInt(m map[string]int) map[string]int { + l := len(m) + if l == 0 { + return nil + } + + c := make(map[string]int, l) + for k, v := range m { + c[k] = v + } + return c +} + +func CopyMapStringFloat64(m map[string]float64) map[string]float64 { + l := len(m) + if l == 0 { + return nil + } + + c := make(map[string]float64, l) + for k, v := range m { + c[k] = v + } + return c +} + +// CopyMapStringSliceString copies a map of strings to string slices such as +// http.Header +func CopyMapStringSliceString(m map[string][]string) map[string][]string { + l := len(m) + if l == 0 { + return nil + } + + c := make(map[string][]string, l) + for k, v := range m { + c[k] = CopySliceString(v) + } + return c +} + +func CopySliceString(s []string) []string { + l := len(s) + if l == 0 { + return nil + } + + c := make([]string, l) + for i, v := range s { + c[i] = v + } + return c +} + +func CopySliceInt(s []int) []int { + l := len(s) + if l == 0 { + return nil + } + + c := make([]int, l) + for i, v := range s { + c[i] = v + } + return c +} + +// CleanEnvVar replaces all occurrences of illegal characters in an environment +// variable with the specified byte. +func CleanEnvVar(s string, r byte) string { + b := []byte(s) + for i, c := range b { + switch { + case c == '_': + case c >= 'a' && c <= 'z': + case c >= 'A' && c <= 'Z': + case i > 0 && c >= '0' && c <= '9': + default: + // Replace! + b[i] = r + } + } + return string(b) +} diff --git a/vendor/github.com/hashicorp/nomad/nomad/structs/bitmap.go b/vendor/github.com/hashicorp/nomad/nomad/structs/bitmap.go new file mode 100644 index 000000000..63758a0be --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/nomad/structs/bitmap.go @@ -0,0 +1,78 @@ +package structs + +import "fmt" + +// Bitmap is a simple uncompressed bitmap +type Bitmap []byte + +// NewBitmap returns a bitmap with up to size indexes +func NewBitmap(size uint) (Bitmap, error) { + if size == 0 { + return nil, fmt.Errorf("bitmap must be positive size") + } + if size&7 != 0 { + return nil, fmt.Errorf("bitmap must be byte aligned") + } + b := make([]byte, size>>3) + return Bitmap(b), nil +} + +// Copy returns a copy of the Bitmap +func (b Bitmap) Copy() (Bitmap, error) { + if b == nil { + return nil, fmt.Errorf("can't copy nil Bitmap") + } + + raw := make([]byte, len(b)) + copy(raw, b) + return Bitmap(raw), nil +} + +// Size returns the size of the bitmap +func (b Bitmap) Size() uint { + return uint(len(b) << 3) +} + +// Set is used to set the given index of the bitmap +func (b Bitmap) Set(idx uint) { + bucket := idx >> 3 + mask := byte(1 << (idx & 7)) + b[bucket] |= mask +} + +// Unset is used to unset the given index of the bitmap +func (b Bitmap) Unset(idx uint) { + bucket := idx >> 3 + // Mask should be all ones minus the idx position + offset := 1 << (idx & 7) + mask := byte(offset ^ 0xff) + b[bucket] &= mask +} + +// Check is used to check the given index of the bitmap +func (b Bitmap) Check(idx uint) bool { + bucket := idx >> 3 + mask := byte(1 << (idx & 7)) + return (b[bucket] & mask) != 0 +} + +// Clear is used to efficiently clear the bitmap +func (b Bitmap) Clear() { + for i := range b { + b[i] = 0 + } +} + +// IndexesInRange returns the indexes in which the values are either set or unset based +// on the passed parameter in the passed range +func (b Bitmap) IndexesInRange(set bool, from, to uint) []int { + var indexes []int + for i := from; i <= to && i < b.Size(); i++ { + c := b.Check(i) + if c && set || !c && !set { + indexes = append(indexes, int(i)) + } + } + + return indexes +} diff --git a/vendor/github.com/hashicorp/nomad/nomad/structs/diff.go b/vendor/github.com/hashicorp/nomad/nomad/structs/diff.go new file mode 100644 index 000000000..b8ced9180 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/nomad/structs/diff.go @@ -0,0 +1,1231 @@ +package structs + +import ( + "fmt" + "reflect" + "sort" + "strings" + + "github.com/hashicorp/nomad/helper/flatmap" + "github.com/mitchellh/hashstructure" +) + +// DiffType denotes the type of a diff object. +type DiffType string + +var ( + DiffTypeNone DiffType = "None" + DiffTypeAdded DiffType = "Added" + DiffTypeDeleted DiffType = "Deleted" + DiffTypeEdited DiffType = "Edited" +) + +func (d DiffType) Less(other DiffType) bool { + // Edited > Added > Deleted > None + // But we do a reverse sort + if d == other { + return false + } + + if d == DiffTypeEdited { + return true + } else if other == DiffTypeEdited { + return false + } else if d == DiffTypeAdded { + return true + } else if other == DiffTypeAdded { + return false + } else if d == DiffTypeDeleted { + return true + } else if other == DiffTypeDeleted { + return false + } + + return true +} + +// JobDiff contains the diff of two jobs. +type JobDiff struct { + Type DiffType + ID string + Fields []*FieldDiff + Objects []*ObjectDiff + TaskGroups []*TaskGroupDiff +} + +// Diff returns a diff of two jobs and a potential error if the Jobs are not +// diffable. If contextual diff is enabled, objects within the job will contain +// field information even if unchanged. +func (j *Job) Diff(other *Job, contextual bool) (*JobDiff, error) { + // COMPAT: Remove "Update" in 0.7.0. Update pushed down to task groups + // in 0.6.0 + diff := &JobDiff{Type: DiffTypeNone} + var oldPrimitiveFlat, newPrimitiveFlat map[string]string + filter := []string{"ID", "Status", "StatusDescription", "Version", "Stable", "CreateIndex", + "ModifyIndex", "JobModifyIndex", "Update", "SubmitTime"} + + if j == nil && other == nil { + return diff, nil + } else if j == nil { + j = &Job{} + diff.Type = DiffTypeAdded + newPrimitiveFlat = flatmap.Flatten(other, filter, true) + diff.ID = other.ID + } else if other == nil { + other = &Job{} + diff.Type = DiffTypeDeleted + oldPrimitiveFlat = flatmap.Flatten(j, filter, true) + diff.ID = j.ID + } else { + if j.ID != other.ID { + return nil, fmt.Errorf("can not diff jobs with different IDs: %q and %q", j.ID, other.ID) + } + + oldPrimitiveFlat = flatmap.Flatten(j, filter, true) + newPrimitiveFlat = flatmap.Flatten(other, filter, true) + diff.ID = other.ID + } + + // Diff the primitive fields. + diff.Fields = fieldDiffs(oldPrimitiveFlat, newPrimitiveFlat, false) + + // Datacenters diff + if setDiff := stringSetDiff(j.Datacenters, other.Datacenters, "Datacenters", contextual); setDiff != nil && setDiff.Type != DiffTypeNone { + diff.Objects = append(diff.Objects, setDiff) + } + + // Constraints diff + conDiff := primitiveObjectSetDiff( + interfaceSlice(j.Constraints), + interfaceSlice(other.Constraints), + []string{"str"}, + "Constraint", + contextual) + if conDiff != nil { + diff.Objects = append(diff.Objects, conDiff...) + } + + // Task groups diff + tgs, err := taskGroupDiffs(j.TaskGroups, other.TaskGroups, contextual) + if err != nil { + return nil, err + } + diff.TaskGroups = tgs + + // Periodic diff + if pDiff := primitiveObjectDiff(j.Periodic, other.Periodic, nil, "Periodic", contextual); pDiff != nil { + diff.Objects = append(diff.Objects, pDiff) + } + + // ParameterizedJob diff + if cDiff := parameterizedJobDiff(j.ParameterizedJob, other.ParameterizedJob, contextual); cDiff != nil { + diff.Objects = append(diff.Objects, cDiff) + } + + // Check to see if there is a diff. We don't use reflect because we are + // filtering quite a few fields that will change on each diff. + if diff.Type == DiffTypeNone { + for _, fd := range diff.Fields { + if fd.Type != DiffTypeNone { + diff.Type = DiffTypeEdited + break + } + } + } + + if diff.Type == DiffTypeNone { + for _, od := range diff.Objects { + if od.Type != DiffTypeNone { + diff.Type = DiffTypeEdited + break + } + } + } + + if diff.Type == DiffTypeNone { + for _, tg := range diff.TaskGroups { + if tg.Type != DiffTypeNone { + diff.Type = DiffTypeEdited + break + } + } + } + + return diff, nil +} + +func (j *JobDiff) GoString() string { + out := fmt.Sprintf("Job %q (%s):\n", j.ID, j.Type) + + for _, f := range j.Fields { + out += fmt.Sprintf("%#v\n", f) + } + + for _, o := range j.Objects { + out += fmt.Sprintf("%#v\n", o) + } + + for _, tg := range j.TaskGroups { + out += fmt.Sprintf("%#v\n", tg) + } + + return out +} + +// TaskGroupDiff contains the diff of two task groups. +type TaskGroupDiff struct { + Type DiffType + Name string + Fields []*FieldDiff + Objects []*ObjectDiff + Tasks []*TaskDiff + Updates map[string]uint64 +} + +// Diff returns a diff of two task groups. If contextual diff is enabled, +// objects' fields will be stored even if no diff occurred as long as one field +// changed. +func (tg *TaskGroup) Diff(other *TaskGroup, contextual bool) (*TaskGroupDiff, error) { + diff := &TaskGroupDiff{Type: DiffTypeNone} + var oldPrimitiveFlat, newPrimitiveFlat map[string]string + filter := []string{"Name"} + + if tg == nil && other == nil { + return diff, nil + } else if tg == nil { + tg = &TaskGroup{} + diff.Type = DiffTypeAdded + diff.Name = other.Name + newPrimitiveFlat = flatmap.Flatten(other, filter, true) + } else if other == nil { + other = &TaskGroup{} + diff.Type = DiffTypeDeleted + diff.Name = tg.Name + oldPrimitiveFlat = flatmap.Flatten(tg, filter, true) + } else { + if !reflect.DeepEqual(tg, other) { + diff.Type = DiffTypeEdited + } + if tg.Name != other.Name { + return nil, fmt.Errorf("can not diff task groups with different names: %q and %q", tg.Name, other.Name) + } + diff.Name = other.Name + oldPrimitiveFlat = flatmap.Flatten(tg, filter, true) + newPrimitiveFlat = flatmap.Flatten(other, filter, true) + } + + // Diff the primitive fields. + diff.Fields = fieldDiffs(oldPrimitiveFlat, newPrimitiveFlat, false) + + // Constraints diff + conDiff := primitiveObjectSetDiff( + interfaceSlice(tg.Constraints), + interfaceSlice(other.Constraints), + []string{"str"}, + "Constraint", + contextual) + if conDiff != nil { + diff.Objects = append(diff.Objects, conDiff...) + } + + // Restart policy diff + rDiff := primitiveObjectDiff(tg.RestartPolicy, other.RestartPolicy, nil, "RestartPolicy", contextual) + if rDiff != nil { + diff.Objects = append(diff.Objects, rDiff) + } + + // EphemeralDisk diff + diskDiff := primitiveObjectDiff(tg.EphemeralDisk, other.EphemeralDisk, nil, "EphemeralDisk", contextual) + if diskDiff != nil { + diff.Objects = append(diff.Objects, diskDiff) + } + + // Update diff + // COMPAT: Remove "Stagger" in 0.7.0. + if uDiff := primitiveObjectDiff(tg.Update, other.Update, []string{"Stagger"}, "Update", contextual); uDiff != nil { + diff.Objects = append(diff.Objects, uDiff) + } + + // Tasks diff + tasks, err := taskDiffs(tg.Tasks, other.Tasks, contextual) + if err != nil { + return nil, err + } + diff.Tasks = tasks + + return diff, nil +} + +func (tg *TaskGroupDiff) GoString() string { + out := fmt.Sprintf("Group %q (%s):\n", tg.Name, tg.Type) + + if len(tg.Updates) != 0 { + out += "Updates {\n" + for update, count := range tg.Updates { + out += fmt.Sprintf("%d %s\n", count, update) + } + out += "}\n" + } + + for _, f := range tg.Fields { + out += fmt.Sprintf("%#v\n", f) + } + + for _, o := range tg.Objects { + out += fmt.Sprintf("%#v\n", o) + } + + for _, t := range tg.Tasks { + out += fmt.Sprintf("%#v\n", t) + } + + return out +} + +// TaskGroupDiffs diffs two sets of task groups. If contextual diff is enabled, +// objects' fields will be stored even if no diff occurred as long as one field +// changed. +func taskGroupDiffs(old, new []*TaskGroup, contextual bool) ([]*TaskGroupDiff, error) { + oldMap := make(map[string]*TaskGroup, len(old)) + newMap := make(map[string]*TaskGroup, len(new)) + for _, o := range old { + oldMap[o.Name] = o + } + for _, n := range new { + newMap[n.Name] = n + } + + var diffs []*TaskGroupDiff + for name, oldGroup := range oldMap { + // Diff the same, deleted and edited + diff, err := oldGroup.Diff(newMap[name], contextual) + if err != nil { + return nil, err + } + diffs = append(diffs, diff) + } + + for name, newGroup := range newMap { + // Diff the added + if old, ok := oldMap[name]; !ok { + diff, err := old.Diff(newGroup, contextual) + if err != nil { + return nil, err + } + diffs = append(diffs, diff) + } + } + + sort.Sort(TaskGroupDiffs(diffs)) + return diffs, nil +} + +// For sorting TaskGroupDiffs +type TaskGroupDiffs []*TaskGroupDiff + +func (tg TaskGroupDiffs) Len() int { return len(tg) } +func (tg TaskGroupDiffs) Swap(i, j int) { tg[i], tg[j] = tg[j], tg[i] } +func (tg TaskGroupDiffs) Less(i, j int) bool { return tg[i].Name < tg[j].Name } + +// TaskDiff contains the diff of two Tasks +type TaskDiff struct { + Type DiffType + Name string + Fields []*FieldDiff + Objects []*ObjectDiff + Annotations []string +} + +// Diff returns a diff of two tasks. If contextual diff is enabled, objects +// within the task will contain field information even if unchanged. +func (t *Task) Diff(other *Task, contextual bool) (*TaskDiff, error) { + diff := &TaskDiff{Type: DiffTypeNone} + var oldPrimitiveFlat, newPrimitiveFlat map[string]string + filter := []string{"Name", "Config"} + + if t == nil && other == nil { + return diff, nil + } else if t == nil { + t = &Task{} + diff.Type = DiffTypeAdded + diff.Name = other.Name + newPrimitiveFlat = flatmap.Flatten(other, filter, true) + } else if other == nil { + other = &Task{} + diff.Type = DiffTypeDeleted + diff.Name = t.Name + oldPrimitiveFlat = flatmap.Flatten(t, filter, true) + } else { + if !reflect.DeepEqual(t, other) { + diff.Type = DiffTypeEdited + } + if t.Name != other.Name { + return nil, fmt.Errorf("can not diff tasks with different names: %q and %q", t.Name, other.Name) + } + diff.Name = other.Name + oldPrimitiveFlat = flatmap.Flatten(t, filter, true) + newPrimitiveFlat = flatmap.Flatten(other, filter, true) + } + + // Diff the primitive fields. + diff.Fields = fieldDiffs(oldPrimitiveFlat, newPrimitiveFlat, false) + + // Constraints diff + conDiff := primitiveObjectSetDiff( + interfaceSlice(t.Constraints), + interfaceSlice(other.Constraints), + []string{"str"}, + "Constraint", + contextual) + if conDiff != nil { + diff.Objects = append(diff.Objects, conDiff...) + } + + // Config diff + if cDiff := configDiff(t.Config, other.Config, contextual); cDiff != nil { + diff.Objects = append(diff.Objects, cDiff) + } + + // Resources diff + if rDiff := t.Resources.Diff(other.Resources, contextual); rDiff != nil { + diff.Objects = append(diff.Objects, rDiff) + } + + // LogConfig diff + lDiff := primitiveObjectDiff(t.LogConfig, other.LogConfig, nil, "LogConfig", contextual) + if lDiff != nil { + diff.Objects = append(diff.Objects, lDiff) + } + + // Dispatch payload diff + dDiff := primitiveObjectDiff(t.DispatchPayload, other.DispatchPayload, nil, "DispatchPayload", contextual) + if dDiff != nil { + diff.Objects = append(diff.Objects, dDiff) + } + + // Artifacts diff + diffs := primitiveObjectSetDiff( + interfaceSlice(t.Artifacts), + interfaceSlice(other.Artifacts), + nil, + "Artifact", + contextual) + if diffs != nil { + diff.Objects = append(diff.Objects, diffs...) + } + + // Services diff + if sDiffs := serviceDiffs(t.Services, other.Services, contextual); sDiffs != nil { + diff.Objects = append(diff.Objects, sDiffs...) + } + + // Vault diff + vDiff := vaultDiff(t.Vault, other.Vault, contextual) + if vDiff != nil { + diff.Objects = append(diff.Objects, vDiff) + } + + // Template diff + tmplDiffs := primitiveObjectSetDiff( + interfaceSlice(t.Templates), + interfaceSlice(other.Templates), + nil, + "Template", + contextual) + if tmplDiffs != nil { + diff.Objects = append(diff.Objects, tmplDiffs...) + } + + return diff, nil +} + +func (t *TaskDiff) GoString() string { + var out string + if len(t.Annotations) == 0 { + out = fmt.Sprintf("Task %q (%s):\n", t.Name, t.Type) + } else { + out = fmt.Sprintf("Task %q (%s) (%s):\n", t.Name, t.Type, strings.Join(t.Annotations, ",")) + } + + for _, f := range t.Fields { + out += fmt.Sprintf("%#v\n", f) + } + + for _, o := range t.Objects { + out += fmt.Sprintf("%#v\n", o) + } + + return out +} + +// taskDiffs diffs a set of tasks. If contextual diff is enabled, unchanged +// fields within objects nested in the tasks will be returned. +func taskDiffs(old, new []*Task, contextual bool) ([]*TaskDiff, error) { + oldMap := make(map[string]*Task, len(old)) + newMap := make(map[string]*Task, len(new)) + for _, o := range old { + oldMap[o.Name] = o + } + for _, n := range new { + newMap[n.Name] = n + } + + var diffs []*TaskDiff + for name, oldGroup := range oldMap { + // Diff the same, deleted and edited + diff, err := oldGroup.Diff(newMap[name], contextual) + if err != nil { + return nil, err + } + diffs = append(diffs, diff) + } + + for name, newGroup := range newMap { + // Diff the added + if old, ok := oldMap[name]; !ok { + diff, err := old.Diff(newGroup, contextual) + if err != nil { + return nil, err + } + diffs = append(diffs, diff) + } + } + + sort.Sort(TaskDiffs(diffs)) + return diffs, nil +} + +// For sorting TaskDiffs +type TaskDiffs []*TaskDiff + +func (t TaskDiffs) Len() int { return len(t) } +func (t TaskDiffs) Swap(i, j int) { t[i], t[j] = t[j], t[i] } +func (t TaskDiffs) Less(i, j int) bool { return t[i].Name < t[j].Name } + +// serviceDiff returns the diff of two service objects. If contextual diff is +// enabled, all fields will be returned, even if no diff occurred. +func serviceDiff(old, new *Service, contextual bool) *ObjectDiff { + diff := &ObjectDiff{Type: DiffTypeNone, Name: "Service"} + var oldPrimitiveFlat, newPrimitiveFlat map[string]string + + if reflect.DeepEqual(old, new) { + return nil + } else if old == nil { + old = &Service{} + diff.Type = DiffTypeAdded + newPrimitiveFlat = flatmap.Flatten(new, nil, true) + } else if new == nil { + new = &Service{} + diff.Type = DiffTypeDeleted + oldPrimitiveFlat = flatmap.Flatten(old, nil, true) + } else { + diff.Type = DiffTypeEdited + oldPrimitiveFlat = flatmap.Flatten(old, nil, true) + newPrimitiveFlat = flatmap.Flatten(new, nil, true) + } + + // Diff the primitive fields. + diff.Fields = fieldDiffs(oldPrimitiveFlat, newPrimitiveFlat, contextual) + + // Checks diffs + if cDiffs := serviceCheckDiffs(old.Checks, new.Checks, contextual); cDiffs != nil { + diff.Objects = append(diff.Objects, cDiffs...) + } + + return diff +} + +// serviceDiffs diffs a set of services. If contextual diff is enabled, unchanged +// fields within objects nested in the tasks will be returned. +func serviceDiffs(old, new []*Service, contextual bool) []*ObjectDiff { + oldMap := make(map[string]*Service, len(old)) + newMap := make(map[string]*Service, len(new)) + for _, o := range old { + oldMap[o.Name] = o + } + for _, n := range new { + newMap[n.Name] = n + } + + var diffs []*ObjectDiff + for name, oldService := range oldMap { + // Diff the same, deleted and edited + if diff := serviceDiff(oldService, newMap[name], contextual); diff != nil { + diffs = append(diffs, diff) + } + } + + for name, newService := range newMap { + // Diff the added + if old, ok := oldMap[name]; !ok { + if diff := serviceDiff(old, newService, contextual); diff != nil { + diffs = append(diffs, diff) + } + } + } + + sort.Sort(ObjectDiffs(diffs)) + return diffs +} + +// serviceCheckDiff returns the diff of two service check objects. If contextual +// diff is enabled, all fields will be returned, even if no diff occurred. +func serviceCheckDiff(old, new *ServiceCheck, contextual bool) *ObjectDiff { + diff := &ObjectDiff{Type: DiffTypeNone, Name: "Check"} + var oldPrimitiveFlat, newPrimitiveFlat map[string]string + + if reflect.DeepEqual(old, new) { + return nil + } else if old == nil { + old = &ServiceCheck{} + diff.Type = DiffTypeAdded + newPrimitiveFlat = flatmap.Flatten(new, nil, true) + } else if new == nil { + new = &ServiceCheck{} + diff.Type = DiffTypeDeleted + oldPrimitiveFlat = flatmap.Flatten(old, nil, true) + } else { + diff.Type = DiffTypeEdited + oldPrimitiveFlat = flatmap.Flatten(old, nil, true) + newPrimitiveFlat = flatmap.Flatten(new, nil, true) + } + + // Diff the primitive fields. + diff.Fields = fieldDiffs(oldPrimitiveFlat, newPrimitiveFlat, contextual) + + // Diff Header + if headerDiff := checkHeaderDiff(old.Header, new.Header, contextual); headerDiff != nil { + diff.Objects = append(diff.Objects, headerDiff) + } + + return diff +} + +// checkHeaderDiff returns the diff of two service check header objects. If +// contextual diff is enabled, all fields will be returned, even if no diff +// occurred. +func checkHeaderDiff(old, new map[string][]string, contextual bool) *ObjectDiff { + diff := &ObjectDiff{Type: DiffTypeNone, Name: "Header"} + if reflect.DeepEqual(old, new) { + return nil + } else if len(old) == 0 { + diff.Type = DiffTypeAdded + } else if len(new) == 0 { + diff.Type = DiffTypeDeleted + } else { + diff.Type = DiffTypeEdited + } + oldFlat := flatmap.Flatten(old, nil, false) + newFlat := flatmap.Flatten(new, nil, false) + diff.Fields = fieldDiffs(oldFlat, newFlat, contextual) + return diff +} + +// serviceCheckDiffs diffs a set of service checks. If contextual diff is +// enabled, unchanged fields within objects nested in the tasks will be +// returned. +func serviceCheckDiffs(old, new []*ServiceCheck, contextual bool) []*ObjectDiff { + oldMap := make(map[string]*ServiceCheck, len(old)) + newMap := make(map[string]*ServiceCheck, len(new)) + for _, o := range old { + oldMap[o.Name] = o + } + for _, n := range new { + newMap[n.Name] = n + } + + var diffs []*ObjectDiff + for name, oldCheck := range oldMap { + // Diff the same, deleted and edited + if diff := serviceCheckDiff(oldCheck, newMap[name], contextual); diff != nil { + diffs = append(diffs, diff) + } + } + + for name, newCheck := range newMap { + // Diff the added + if old, ok := oldMap[name]; !ok { + if diff := serviceCheckDiff(old, newCheck, contextual); diff != nil { + diffs = append(diffs, diff) + } + } + } + + sort.Sort(ObjectDiffs(diffs)) + return diffs +} + +// vaultDiff returns the diff of two vault objects. If contextual diff is +// enabled, all fields will be returned, even if no diff occurred. +func vaultDiff(old, new *Vault, contextual bool) *ObjectDiff { + diff := &ObjectDiff{Type: DiffTypeNone, Name: "Vault"} + var oldPrimitiveFlat, newPrimitiveFlat map[string]string + + if reflect.DeepEqual(old, new) { + return nil + } else if old == nil { + old = &Vault{} + diff.Type = DiffTypeAdded + newPrimitiveFlat = flatmap.Flatten(new, nil, true) + } else if new == nil { + new = &Vault{} + diff.Type = DiffTypeDeleted + oldPrimitiveFlat = flatmap.Flatten(old, nil, true) + } else { + diff.Type = DiffTypeEdited + oldPrimitiveFlat = flatmap.Flatten(old, nil, true) + newPrimitiveFlat = flatmap.Flatten(new, nil, true) + } + + // Diff the primitive fields. + diff.Fields = fieldDiffs(oldPrimitiveFlat, newPrimitiveFlat, contextual) + + // Policies diffs + if setDiff := stringSetDiff(old.Policies, new.Policies, "Policies", contextual); setDiff != nil { + diff.Objects = append(diff.Objects, setDiff) + } + + return diff +} + +// parameterizedJobDiff returns the diff of two parameterized job objects. If +// contextual diff is enabled, all fields will be returned, even if no diff +// occurred. +func parameterizedJobDiff(old, new *ParameterizedJobConfig, contextual bool) *ObjectDiff { + diff := &ObjectDiff{Type: DiffTypeNone, Name: "ParameterizedJob"} + var oldPrimitiveFlat, newPrimitiveFlat map[string]string + + if reflect.DeepEqual(old, new) { + return nil + } else if old == nil { + old = &ParameterizedJobConfig{} + diff.Type = DiffTypeAdded + newPrimitiveFlat = flatmap.Flatten(new, nil, true) + } else if new == nil { + new = &ParameterizedJobConfig{} + diff.Type = DiffTypeDeleted + oldPrimitiveFlat = flatmap.Flatten(old, nil, true) + } else { + diff.Type = DiffTypeEdited + oldPrimitiveFlat = flatmap.Flatten(old, nil, true) + newPrimitiveFlat = flatmap.Flatten(new, nil, true) + } + + // Diff the primitive fields. + diff.Fields = fieldDiffs(oldPrimitiveFlat, newPrimitiveFlat, contextual) + + // Meta diffs + if optionalDiff := stringSetDiff(old.MetaOptional, new.MetaOptional, "MetaOptional", contextual); optionalDiff != nil { + diff.Objects = append(diff.Objects, optionalDiff) + } + + if requiredDiff := stringSetDiff(old.MetaRequired, new.MetaRequired, "MetaRequired", contextual); requiredDiff != nil { + diff.Objects = append(diff.Objects, requiredDiff) + } + + return diff +} + +// Diff returns a diff of two resource objects. If contextual diff is enabled, +// non-changed fields will still be returned. +func (r *Resources) Diff(other *Resources, contextual bool) *ObjectDiff { + diff := &ObjectDiff{Type: DiffTypeNone, Name: "Resources"} + var oldPrimitiveFlat, newPrimitiveFlat map[string]string + + if reflect.DeepEqual(r, other) { + return nil + } else if r == nil { + r = &Resources{} + diff.Type = DiffTypeAdded + newPrimitiveFlat = flatmap.Flatten(other, nil, true) + } else if other == nil { + other = &Resources{} + diff.Type = DiffTypeDeleted + oldPrimitiveFlat = flatmap.Flatten(r, nil, true) + } else { + diff.Type = DiffTypeEdited + oldPrimitiveFlat = flatmap.Flatten(r, nil, true) + newPrimitiveFlat = flatmap.Flatten(other, nil, true) + } + + // Diff the primitive fields. + diff.Fields = fieldDiffs(oldPrimitiveFlat, newPrimitiveFlat, contextual) + + // Network Resources diff + if nDiffs := networkResourceDiffs(r.Networks, other.Networks, contextual); nDiffs != nil { + diff.Objects = append(diff.Objects, nDiffs...) + } + + return diff +} + +// Diff returns a diff of two network resources. If contextual diff is enabled, +// non-changed fields will still be returned. +func (r *NetworkResource) Diff(other *NetworkResource, contextual bool) *ObjectDiff { + diff := &ObjectDiff{Type: DiffTypeNone, Name: "Network"} + var oldPrimitiveFlat, newPrimitiveFlat map[string]string + filter := []string{"Device", "CIDR", "IP"} + + if reflect.DeepEqual(r, other) { + return nil + } else if r == nil { + r = &NetworkResource{} + diff.Type = DiffTypeAdded + newPrimitiveFlat = flatmap.Flatten(other, filter, true) + } else if other == nil { + other = &NetworkResource{} + diff.Type = DiffTypeDeleted + oldPrimitiveFlat = flatmap.Flatten(r, filter, true) + } else { + diff.Type = DiffTypeEdited + oldPrimitiveFlat = flatmap.Flatten(r, filter, true) + newPrimitiveFlat = flatmap.Flatten(other, filter, true) + } + + // Diff the primitive fields. + diff.Fields = fieldDiffs(oldPrimitiveFlat, newPrimitiveFlat, contextual) + + // Port diffs + resPorts := portDiffs(r.ReservedPorts, other.ReservedPorts, false, contextual) + dynPorts := portDiffs(r.DynamicPorts, other.DynamicPorts, true, contextual) + if resPorts != nil { + diff.Objects = append(diff.Objects, resPorts...) + } + if dynPorts != nil { + diff.Objects = append(diff.Objects, dynPorts...) + } + + return diff +} + +// networkResourceDiffs diffs a set of NetworkResources. If contextual diff is enabled, +// non-changed fields will still be returned. +func networkResourceDiffs(old, new []*NetworkResource, contextual bool) []*ObjectDiff { + makeSet := func(objects []*NetworkResource) map[string]*NetworkResource { + objMap := make(map[string]*NetworkResource, len(objects)) + for _, obj := range objects { + hash, err := hashstructure.Hash(obj, nil) + if err != nil { + panic(err) + } + objMap[fmt.Sprintf("%d", hash)] = obj + } + + return objMap + } + + oldSet := makeSet(old) + newSet := makeSet(new) + + var diffs []*ObjectDiff + for k, oldV := range oldSet { + if newV, ok := newSet[k]; !ok { + if diff := oldV.Diff(newV, contextual); diff != nil { + diffs = append(diffs, diff) + } + } + } + for k, newV := range newSet { + if oldV, ok := oldSet[k]; !ok { + if diff := oldV.Diff(newV, contextual); diff != nil { + diffs = append(diffs, diff) + } + } + } + + sort.Sort(ObjectDiffs(diffs)) + return diffs + +} + +// portDiffs returns the diff of two sets of ports. The dynamic flag marks the +// set of ports as being Dynamic ports versus Static ports. If contextual diff is enabled, +// non-changed fields will still be returned. +func portDiffs(old, new []Port, dynamic bool, contextual bool) []*ObjectDiff { + makeSet := func(ports []Port) map[string]Port { + portMap := make(map[string]Port, len(ports)) + for _, port := range ports { + portMap[port.Label] = port + } + + return portMap + } + + oldPorts := makeSet(old) + newPorts := makeSet(new) + + var filter []string + name := "Static Port" + if dynamic { + filter = []string{"Value"} + name = "Dynamic Port" + } + + var diffs []*ObjectDiff + for portLabel, oldPort := range oldPorts { + // Diff the same, deleted and edited + if newPort, ok := newPorts[portLabel]; ok { + diff := primitiveObjectDiff(oldPort, newPort, filter, name, contextual) + if diff != nil { + diffs = append(diffs, diff) + } + } else { + diff := primitiveObjectDiff(oldPort, nil, filter, name, contextual) + if diff != nil { + diffs = append(diffs, diff) + } + } + } + for label, newPort := range newPorts { + // Diff the added + if _, ok := oldPorts[label]; !ok { + diff := primitiveObjectDiff(nil, newPort, filter, name, contextual) + if diff != nil { + diffs = append(diffs, diff) + } + } + } + + sort.Sort(ObjectDiffs(diffs)) + return diffs + +} + +// configDiff returns the diff of two Task Config objects. If contextual diff is +// enabled, all fields will be returned, even if no diff occurred. +func configDiff(old, new map[string]interface{}, contextual bool) *ObjectDiff { + diff := &ObjectDiff{Type: DiffTypeNone, Name: "Config"} + if reflect.DeepEqual(old, new) { + return nil + } else if len(old) == 0 { + diff.Type = DiffTypeAdded + } else if len(new) == 0 { + diff.Type = DiffTypeDeleted + } else { + diff.Type = DiffTypeEdited + } + + // Diff the primitive fields. + oldPrimitiveFlat := flatmap.Flatten(old, nil, false) + newPrimitiveFlat := flatmap.Flatten(new, nil, false) + diff.Fields = fieldDiffs(oldPrimitiveFlat, newPrimitiveFlat, contextual) + return diff +} + +// ObjectDiff contains the diff of two generic objects. +type ObjectDiff struct { + Type DiffType + Name string + Fields []*FieldDiff + Objects []*ObjectDiff +} + +func (o *ObjectDiff) GoString() string { + out := fmt.Sprintf("\n%q (%s) {\n", o.Name, o.Type) + for _, f := range o.Fields { + out += fmt.Sprintf("%#v\n", f) + } + for _, o := range o.Objects { + out += fmt.Sprintf("%#v\n", o) + } + out += "}" + return out +} + +func (o *ObjectDiff) Less(other *ObjectDiff) bool { + if reflect.DeepEqual(o, other) { + return false + } else if other == nil { + return false + } else if o == nil { + return true + } + + if o.Name != other.Name { + return o.Name < other.Name + } + + if o.Type != other.Type { + return o.Type.Less(other.Type) + } + + if lO, lOther := len(o.Fields), len(other.Fields); lO != lOther { + return lO < lOther + } + + if lO, lOther := len(o.Objects), len(other.Objects); lO != lOther { + return lO < lOther + } + + // Check each field + sort.Sort(FieldDiffs(o.Fields)) + sort.Sort(FieldDiffs(other.Fields)) + + for i, oV := range o.Fields { + if oV.Less(other.Fields[i]) { + return true + } + } + + // Check each object + sort.Sort(ObjectDiffs(o.Objects)) + sort.Sort(ObjectDiffs(other.Objects)) + for i, oV := range o.Objects { + if oV.Less(other.Objects[i]) { + return true + } + } + + return false +} + +// For sorting ObjectDiffs +type ObjectDiffs []*ObjectDiff + +func (o ObjectDiffs) Len() int { return len(o) } +func (o ObjectDiffs) Swap(i, j int) { o[i], o[j] = o[j], o[i] } +func (o ObjectDiffs) Less(i, j int) bool { return o[i].Less(o[j]) } + +type FieldDiff struct { + Type DiffType + Name string + Old, New string + Annotations []string +} + +// fieldDiff returns a FieldDiff if old and new are different otherwise, it +// returns nil. If contextual diff is enabled, even non-changed fields will be +// returned. +func fieldDiff(old, new, name string, contextual bool) *FieldDiff { + diff := &FieldDiff{Name: name, Type: DiffTypeNone} + if old == new { + if !contextual { + return nil + } + diff.Old, diff.New = old, new + return diff + } + + if old == "" { + diff.Type = DiffTypeAdded + diff.New = new + } else if new == "" { + diff.Type = DiffTypeDeleted + diff.Old = old + } else { + diff.Type = DiffTypeEdited + diff.Old = old + diff.New = new + } + return diff +} + +func (f *FieldDiff) GoString() string { + out := fmt.Sprintf("%q (%s): %q => %q", f.Name, f.Type, f.Old, f.New) + if len(f.Annotations) != 0 { + out += fmt.Sprintf(" (%s)", strings.Join(f.Annotations, ", ")) + } + + return out +} + +func (f *FieldDiff) Less(other *FieldDiff) bool { + if reflect.DeepEqual(f, other) { + return false + } else if other == nil { + return false + } else if f == nil { + return true + } + + if f.Name != other.Name { + return f.Name < other.Name + } else if f.Old != other.Old { + return f.Old < other.Old + } + + return f.New < other.New +} + +// For sorting FieldDiffs +type FieldDiffs []*FieldDiff + +func (f FieldDiffs) Len() int { return len(f) } +func (f FieldDiffs) Swap(i, j int) { f[i], f[j] = f[j], f[i] } +func (f FieldDiffs) Less(i, j int) bool { return f[i].Less(f[j]) } + +// fieldDiffs takes a map of field names to their values and returns a set of +// field diffs. If contextual diff is enabled, even non-changed fields will be +// returned. +func fieldDiffs(old, new map[string]string, contextual bool) []*FieldDiff { + var diffs []*FieldDiff + visited := make(map[string]struct{}) + for k, oldV := range old { + visited[k] = struct{}{} + newV := new[k] + if diff := fieldDiff(oldV, newV, k, contextual); diff != nil { + diffs = append(diffs, diff) + } + } + + for k, newV := range new { + if _, ok := visited[k]; !ok { + if diff := fieldDiff("", newV, k, contextual); diff != nil { + diffs = append(diffs, diff) + } + } + } + + sort.Sort(FieldDiffs(diffs)) + return diffs +} + +// stringSetDiff diffs two sets of strings with the given name. +func stringSetDiff(old, new []string, name string, contextual bool) *ObjectDiff { + oldMap := make(map[string]struct{}, len(old)) + newMap := make(map[string]struct{}, len(new)) + for _, o := range old { + oldMap[o] = struct{}{} + } + for _, n := range new { + newMap[n] = struct{}{} + } + if reflect.DeepEqual(oldMap, newMap) && !contextual { + return nil + } + + diff := &ObjectDiff{Name: name} + var added, removed bool + for k := range oldMap { + if _, ok := newMap[k]; !ok { + diff.Fields = append(diff.Fields, fieldDiff(k, "", name, contextual)) + removed = true + } else if contextual { + diff.Fields = append(diff.Fields, fieldDiff(k, k, name, contextual)) + } + } + + for k := range newMap { + if _, ok := oldMap[k]; !ok { + diff.Fields = append(diff.Fields, fieldDiff("", k, name, contextual)) + added = true + } + } + + sort.Sort(FieldDiffs(diff.Fields)) + + // Determine the type + if added && removed { + diff.Type = DiffTypeEdited + } else if added { + diff.Type = DiffTypeAdded + } else if removed { + diff.Type = DiffTypeDeleted + } else { + // Diff of an empty set + if len(diff.Fields) == 0 { + return nil + } + + diff.Type = DiffTypeNone + } + + return diff +} + +// primitiveObjectDiff returns a diff of the passed objects' primitive fields. +// The filter field can be used to exclude fields from the diff. The name is the +// name of the objects. If contextual is set, non-changed fields will also be +// stored in the object diff. +func primitiveObjectDiff(old, new interface{}, filter []string, name string, contextual bool) *ObjectDiff { + oldPrimitiveFlat := flatmap.Flatten(old, filter, true) + newPrimitiveFlat := flatmap.Flatten(new, filter, true) + delete(oldPrimitiveFlat, "") + delete(newPrimitiveFlat, "") + + diff := &ObjectDiff{Name: name} + diff.Fields = fieldDiffs(oldPrimitiveFlat, newPrimitiveFlat, contextual) + + var added, deleted, edited bool + for _, f := range diff.Fields { + switch f.Type { + case DiffTypeEdited: + edited = true + break + case DiffTypeDeleted: + deleted = true + case DiffTypeAdded: + added = true + } + } + + if edited || added && deleted { + diff.Type = DiffTypeEdited + } else if added { + diff.Type = DiffTypeAdded + } else if deleted { + diff.Type = DiffTypeDeleted + } else { + return nil + } + + return diff +} + +// primitiveObjectSetDiff does a set difference of the old and new sets. The +// filter parameter can be used to filter a set of primitive fields in the +// passed structs. The name corresponds to the name of the passed objects. If +// contextual diff is enabled, objects' primtive fields will be returned even if +// no diff exists. +func primitiveObjectSetDiff(old, new []interface{}, filter []string, name string, contextual bool) []*ObjectDiff { + makeSet := func(objects []interface{}) map[string]interface{} { + objMap := make(map[string]interface{}, len(objects)) + for _, obj := range objects { + hash, err := hashstructure.Hash(obj, nil) + if err != nil { + panic(err) + } + objMap[fmt.Sprintf("%d", hash)] = obj + } + + return objMap + } + + oldSet := makeSet(old) + newSet := makeSet(new) + + var diffs []*ObjectDiff + for k, v := range oldSet { + // Deleted + if _, ok := newSet[k]; !ok { + diffs = append(diffs, primitiveObjectDiff(v, nil, filter, name, contextual)) + } + } + for k, v := range newSet { + // Added + if _, ok := oldSet[k]; !ok { + diffs = append(diffs, primitiveObjectDiff(nil, v, filter, name, contextual)) + } + } + + sort.Sort(ObjectDiffs(diffs)) + return diffs +} + +// interfaceSlice is a helper method that takes a slice of typed elements and +// returns a slice of interface. This method will panic if given a non-slice +// input. +func interfaceSlice(slice interface{}) []interface{} { + s := reflect.ValueOf(slice) + if s.Kind() != reflect.Slice { + panic("InterfaceSlice() given a non-slice type") + } + + ret := make([]interface{}, s.Len()) + + for i := 0; i < s.Len(); i++ { + ret[i] = s.Index(i).Interface() + } + + return ret +} diff --git a/vendor/github.com/hashicorp/nomad/nomad/structs/funcs.go b/vendor/github.com/hashicorp/nomad/nomad/structs/funcs.go new file mode 100644 index 000000000..751befd65 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/nomad/structs/funcs.go @@ -0,0 +1,310 @@ +package structs + +import ( + crand "crypto/rand" + "encoding/binary" + "fmt" + "math" + "sort" + "strings" + + "golang.org/x/crypto/blake2b" + + multierror "github.com/hashicorp/go-multierror" + lru "github.com/hashicorp/golang-lru" + "github.com/hashicorp/nomad/acl" +) + +// MergeMultierrorWarnings takes job warnings and canonicalize warnings and +// merges them into a returnable string. Both the errors may be nil. +func MergeMultierrorWarnings(warnings ...error) string { + var warningMsg multierror.Error + for _, warn := range warnings { + if warn != nil { + multierror.Append(&warningMsg, warn) + } + } + + if len(warningMsg.Errors) == 0 { + return "" + } + + // Set the formatter + warningMsg.ErrorFormat = warningsFormatter + return warningMsg.Error() +} + +// warningsFormatter is used to format job warnings +func warningsFormatter(es []error) string { + points := make([]string, len(es)) + for i, err := range es { + points[i] = fmt.Sprintf("* %s", err) + } + + return fmt.Sprintf( + "%d warning(s):\n\n%s", + len(es), strings.Join(points, "\n")) +} + +// RemoveAllocs is used to remove any allocs with the given IDs +// from the list of allocations +func RemoveAllocs(alloc []*Allocation, remove []*Allocation) []*Allocation { + // Convert remove into a set + removeSet := make(map[string]struct{}) + for _, remove := range remove { + removeSet[remove.ID] = struct{}{} + } + + n := len(alloc) + for i := 0; i < n; i++ { + if _, ok := removeSet[alloc[i].ID]; ok { + alloc[i], alloc[n-1] = alloc[n-1], nil + i-- + n-- + } + } + + alloc = alloc[:n] + return alloc +} + +// FilterTerminalAllocs filters out all allocations in a terminal state and +// returns the latest terminal allocations +func FilterTerminalAllocs(allocs []*Allocation) ([]*Allocation, map[string]*Allocation) { + terminalAllocsByName := make(map[string]*Allocation) + n := len(allocs) + for i := 0; i < n; i++ { + if allocs[i].TerminalStatus() { + + // Add the allocation to the terminal allocs map if it's not already + // added or has a higher create index than the one which is + // currently present. + alloc, ok := terminalAllocsByName[allocs[i].Name] + if !ok || alloc.CreateIndex < allocs[i].CreateIndex { + terminalAllocsByName[allocs[i].Name] = allocs[i] + } + + // Remove the allocation + allocs[i], allocs[n-1] = allocs[n-1], nil + i-- + n-- + } + } + return allocs[:n], terminalAllocsByName +} + +// AllocsFit checks if a given set of allocations will fit on a node. +// The netIdx can optionally be provided if its already been computed. +// If the netIdx is provided, it is assumed that the client has already +// ensured there are no collisions. +func AllocsFit(node *Node, allocs []*Allocation, netIdx *NetworkIndex) (bool, string, *Resources, error) { + // Compute the utilization from zero + used := new(Resources) + + // Add the reserved resources of the node + if node.Reserved != nil { + if err := used.Add(node.Reserved); err != nil { + return false, "", nil, err + } + } + + // For each alloc, add the resources + for _, alloc := range allocs { + if alloc.Resources != nil { + if err := used.Add(alloc.Resources); err != nil { + return false, "", nil, err + } + } else if alloc.TaskResources != nil { + + // Adding the shared resource asks for the allocation to the used + // resources + if err := used.Add(alloc.SharedResources); err != nil { + return false, "", nil, err + } + // Allocations within the plan have the combined resources stripped + // to save space, so sum up the individual task resources. + for _, taskResource := range alloc.TaskResources { + if err := used.Add(taskResource); err != nil { + return false, "", nil, err + } + } + } else { + return false, "", nil, fmt.Errorf("allocation %q has no resources set", alloc.ID) + } + } + + // Check that the node resources are a super set of those + // that are being allocated + if superset, dimension := node.Resources.Superset(used); !superset { + return false, dimension, used, nil + } + + // Create the network index if missing + if netIdx == nil { + netIdx = NewNetworkIndex() + defer netIdx.Release() + if netIdx.SetNode(node) || netIdx.AddAllocs(allocs) { + return false, "reserved port collision", used, nil + } + } + + // Check if the network is overcommitted + if netIdx.Overcommitted() { + return false, "bandwidth exceeded", used, nil + } + + // Allocations fit! + return true, "", used, nil +} + +// ScoreFit is used to score the fit based on the Google work published here: +// http://www.columbia.edu/~cs2035/courses/ieor4405.S13/datacenter_scheduling.ppt +// This is equivalent to their BestFit v3 +func ScoreFit(node *Node, util *Resources) float64 { + // Determine the node availability + nodeCpu := float64(node.Resources.CPU) + if node.Reserved != nil { + nodeCpu -= float64(node.Reserved.CPU) + } + nodeMem := float64(node.Resources.MemoryMB) + if node.Reserved != nil { + nodeMem -= float64(node.Reserved.MemoryMB) + } + + // Compute the free percentage + freePctCpu := 1 - (float64(util.CPU) / nodeCpu) + freePctRam := 1 - (float64(util.MemoryMB) / nodeMem) + + // Total will be "maximized" the smaller the value is. + // At 100% utilization, the total is 2, while at 0% util it is 20. + total := math.Pow(10, freePctCpu) + math.Pow(10, freePctRam) + + // Invert so that the "maximized" total represents a high-value + // score. Because the floor is 20, we simply use that as an anchor. + // This means at a perfect fit, we return 18 as the score. + score := 20.0 - total + + // Bound the score, just in case + // If the score is over 18, that means we've overfit the node. + if score > 18.0 { + score = 18.0 + } else if score < 0 { + score = 0 + } + return score +} + +// GenerateUUID is used to generate a random UUID +func GenerateUUID() string { + buf := make([]byte, 16) + if _, err := crand.Read(buf); err != nil { + panic(fmt.Errorf("failed to read random bytes: %v", err)) + } + + return fmt.Sprintf("%08x-%04x-%04x-%04x-%12x", + buf[0:4], + buf[4:6], + buf[6:8], + buf[8:10], + buf[10:16]) +} + +func CopySliceConstraints(s []*Constraint) []*Constraint { + l := len(s) + if l == 0 { + return nil + } + + c := make([]*Constraint, l) + for i, v := range s { + c[i] = v.Copy() + } + return c +} + +// VaultPoliciesSet takes the structure returned by VaultPolicies and returns +// the set of required policies +func VaultPoliciesSet(policies map[string]map[string]*Vault) []string { + set := make(map[string]struct{}) + + for _, tgp := range policies { + for _, tp := range tgp { + for _, p := range tp.Policies { + set[p] = struct{}{} + } + } + } + + flattened := make([]string, 0, len(set)) + for p := range set { + flattened = append(flattened, p) + } + return flattened +} + +// DenormalizeAllocationJobs is used to attach a job to all allocations that are +// non-terminal and do not have a job already. This is useful in cases where the +// job is normalized. +func DenormalizeAllocationJobs(job *Job, allocs []*Allocation) { + if job != nil { + for _, alloc := range allocs { + if alloc.Job == nil && !alloc.TerminalStatus() { + alloc.Job = job + } + } + } +} + +// AllocName returns the name of the allocation given the input. +func AllocName(job, group string, idx uint) string { + return fmt.Sprintf("%s.%s[%d]", job, group, idx) +} + +// ACLPolicyListHash returns a consistent hash for a set of policies. +func ACLPolicyListHash(policies []*ACLPolicy) string { + cacheKeyHash, err := blake2b.New256(nil) + if err != nil { + panic(err) + } + for _, policy := range policies { + cacheKeyHash.Write([]byte(policy.Name)) + binary.Write(cacheKeyHash, binary.BigEndian, policy.ModifyIndex) + } + cacheKey := string(cacheKeyHash.Sum(nil)) + return cacheKey +} + +// CompileACLObject compiles a set of ACL policies into an ACL object with a cache +func CompileACLObject(cache *lru.TwoQueueCache, policies []*ACLPolicy) (*acl.ACL, error) { + // Sort the policies to ensure consistent ordering + sort.Slice(policies, func(i, j int) bool { + return policies[i].Name < policies[j].Name + }) + + // Determine the cache key + cacheKey := ACLPolicyListHash(policies) + aclRaw, ok := cache.Get(cacheKey) + if ok { + return aclRaw.(*acl.ACL), nil + } + + // Parse the policies + parsed := make([]*acl.Policy, 0, len(policies)) + for _, policy := range policies { + p, err := acl.Parse(policy.Rules) + if err != nil { + return nil, fmt.Errorf("failed to parse %q: %v", policy.Name, err) + } + parsed = append(parsed, p) + } + + // Create the ACL object + aclObj, err := acl.NewACL(false, parsed) + if err != nil { + return nil, fmt.Errorf("failed to construct ACL: %v", err) + } + + // Update the cache + cache.Add(cacheKey, aclObj) + return aclObj, nil +} diff --git a/vendor/github.com/hashicorp/nomad/nomad/structs/network.go b/vendor/github.com/hashicorp/nomad/nomad/structs/network.go new file mode 100644 index 000000000..3f0ebff4f --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/nomad/structs/network.go @@ -0,0 +1,326 @@ +package structs + +import ( + "fmt" + "math/rand" + "net" + "sync" +) + +const ( + // MinDynamicPort is the smallest dynamic port generated + MinDynamicPort = 20000 + + // MaxDynamicPort is the largest dynamic port generated + MaxDynamicPort = 32000 + + // maxRandPortAttempts is the maximum number of attempt + // to assign a random port + maxRandPortAttempts = 20 + + // maxValidPort is the max valid port number + maxValidPort = 65536 +) + +var ( + // bitmapPool is used to pool the bitmaps used for port collision + // checking. They are fairly large (8K) so we can re-use them to + // avoid GC pressure. Care should be taken to call Clear() on any + // bitmap coming from the pool. + bitmapPool = new(sync.Pool) +) + +// NetworkIndex is used to index the available network resources +// and the used network resources on a machine given allocations +type NetworkIndex struct { + AvailNetworks []*NetworkResource // List of available networks + AvailBandwidth map[string]int // Bandwidth by device + UsedPorts map[string]Bitmap // Ports by IP + UsedBandwidth map[string]int // Bandwidth by device +} + +// NewNetworkIndex is used to construct a new network index +func NewNetworkIndex() *NetworkIndex { + return &NetworkIndex{ + AvailBandwidth: make(map[string]int), + UsedPorts: make(map[string]Bitmap), + UsedBandwidth: make(map[string]int), + } +} + +// Release is called when the network index is no longer needed +// to attempt to re-use some of the memory it has allocated +func (idx *NetworkIndex) Release() { + for _, b := range idx.UsedPorts { + bitmapPool.Put(b) + } +} + +// Overcommitted checks if the network is overcommitted +func (idx *NetworkIndex) Overcommitted() bool { + for device, used := range idx.UsedBandwidth { + avail := idx.AvailBandwidth[device] + if used > avail { + return true + } + } + return false +} + +// SetNode is used to setup the available network resources. Returns +// true if there is a collision +func (idx *NetworkIndex) SetNode(node *Node) (collide bool) { + // Add the available CIDR blocks + for _, n := range node.Resources.Networks { + if n.Device != "" { + idx.AvailNetworks = append(idx.AvailNetworks, n) + idx.AvailBandwidth[n.Device] = n.MBits + } + } + + // Add the reserved resources + if r := node.Reserved; r != nil { + for _, n := range r.Networks { + if idx.AddReserved(n) { + collide = true + } + } + } + return +} + +// AddAllocs is used to add the used network resources. Returns +// true if there is a collision +func (idx *NetworkIndex) AddAllocs(allocs []*Allocation) (collide bool) { + for _, alloc := range allocs { + for _, task := range alloc.TaskResources { + if len(task.Networks) == 0 { + continue + } + n := task.Networks[0] + if idx.AddReserved(n) { + collide = true + } + } + } + return +} + +// AddReserved is used to add a reserved network usage, returns true +// if there is a port collision +func (idx *NetworkIndex) AddReserved(n *NetworkResource) (collide bool) { + // Add the port usage + used := idx.UsedPorts[n.IP] + if used == nil { + // Try to get a bitmap from the pool, else create + raw := bitmapPool.Get() + if raw != nil { + used = raw.(Bitmap) + used.Clear() + } else { + used, _ = NewBitmap(maxValidPort) + } + idx.UsedPorts[n.IP] = used + } + + for _, ports := range [][]Port{n.ReservedPorts, n.DynamicPorts} { + for _, port := range ports { + // Guard against invalid port + if port.Value < 0 || port.Value >= maxValidPort { + return true + } + if used.Check(uint(port.Value)) { + collide = true + } else { + used.Set(uint(port.Value)) + } + } + } + + // Add the bandwidth + idx.UsedBandwidth[n.Device] += n.MBits + return +} + +// yieldIP is used to iteratively invoke the callback with +// an available IP +func (idx *NetworkIndex) yieldIP(cb func(net *NetworkResource, ip net.IP) bool) { + inc := func(ip net.IP) { + for j := len(ip) - 1; j >= 0; j-- { + ip[j]++ + if ip[j] > 0 { + break + } + } + } + + for _, n := range idx.AvailNetworks { + ip, ipnet, err := net.ParseCIDR(n.CIDR) + if err != nil { + continue + } + for ip := ip.Mask(ipnet.Mask); ipnet.Contains(ip); inc(ip) { + if cb(n, ip) { + return + } + } + } +} + +// AssignNetwork is used to assign network resources given an ask. +// If the ask cannot be satisfied, returns nil +func (idx *NetworkIndex) AssignNetwork(ask *NetworkResource) (out *NetworkResource, err error) { + err = fmt.Errorf("no networks available") + idx.yieldIP(func(n *NetworkResource, ip net.IP) (stop bool) { + // Convert the IP to a string + ipStr := ip.String() + + // Check if we would exceed the bandwidth cap + availBandwidth := idx.AvailBandwidth[n.Device] + usedBandwidth := idx.UsedBandwidth[n.Device] + if usedBandwidth+ask.MBits > availBandwidth { + err = fmt.Errorf("bandwidth exceeded") + return + } + + used := idx.UsedPorts[ipStr] + + // Check if any of the reserved ports are in use + for _, port := range ask.ReservedPorts { + // Guard against invalid port + if port.Value < 0 || port.Value >= maxValidPort { + err = fmt.Errorf("invalid port %d (out of range)", port.Value) + return + } + + // Check if in use + if used != nil && used.Check(uint(port.Value)) { + err = fmt.Errorf("reserved port collision") + return + } + } + + // Create the offer + offer := &NetworkResource{ + Device: n.Device, + IP: ipStr, + MBits: ask.MBits, + ReservedPorts: ask.ReservedPorts, + DynamicPorts: ask.DynamicPorts, + } + + // Try to stochastically pick the dynamic ports as it is faster and + // lower memory usage. + var dynPorts []int + var dynErr error + dynPorts, dynErr = getDynamicPortsStochastic(used, ask) + if dynErr == nil { + goto BUILD_OFFER + } + + // Fall back to the precise method if the random sampling failed. + dynPorts, dynErr = getDynamicPortsPrecise(used, ask) + if dynErr != nil { + err = dynErr + return + } + + BUILD_OFFER: + for i, port := range dynPorts { + offer.DynamicPorts[i].Value = port + } + + // Stop, we have an offer! + out = offer + err = nil + return true + }) + return +} + +// getDynamicPortsPrecise takes the nodes used port bitmap which may be nil if +// no ports have been allocated yet, the network ask and returns a set of unused +// ports to fullfil the ask's DynamicPorts or an error if it failed. An error +// means the ask can not be satisfied as the method does a precise search. +func getDynamicPortsPrecise(nodeUsed Bitmap, ask *NetworkResource) ([]int, error) { + // Create a copy of the used ports and apply the new reserves + var usedSet Bitmap + var err error + if nodeUsed != nil { + usedSet, err = nodeUsed.Copy() + if err != nil { + return nil, err + } + } else { + usedSet, err = NewBitmap(maxValidPort) + if err != nil { + return nil, err + } + } + + for _, port := range ask.ReservedPorts { + usedSet.Set(uint(port.Value)) + } + + // Get the indexes of the unset + availablePorts := usedSet.IndexesInRange(false, MinDynamicPort, MaxDynamicPort) + + // Randomize the amount we need + numDyn := len(ask.DynamicPorts) + if len(availablePorts) < numDyn { + return nil, fmt.Errorf("dynamic port selection failed") + } + + numAvailable := len(availablePorts) + for i := 0; i < numDyn; i++ { + j := rand.Intn(numAvailable) + availablePorts[i], availablePorts[j] = availablePorts[j], availablePorts[i] + } + + return availablePorts[:numDyn], nil +} + +// getDynamicPortsStochastic takes the nodes used port bitmap which may be nil if +// no ports have been allocated yet, the network ask and returns a set of unused +// ports to fullfil the ask's DynamicPorts or an error if it failed. An error +// does not mean the ask can not be satisfied as the method has a fixed amount +// of random probes and if these fail, the search is aborted. +func getDynamicPortsStochastic(nodeUsed Bitmap, ask *NetworkResource) ([]int, error) { + var reserved, dynamic []int + for _, port := range ask.ReservedPorts { + reserved = append(reserved, port.Value) + } + + for i := 0; i < len(ask.DynamicPorts); i++ { + attempts := 0 + PICK: + attempts++ + if attempts > maxRandPortAttempts { + return nil, fmt.Errorf("stochastic dynamic port selection failed") + } + + randPort := MinDynamicPort + rand.Intn(MaxDynamicPort-MinDynamicPort) + if nodeUsed != nil && nodeUsed.Check(uint(randPort)) { + goto PICK + } + + for _, ports := range [][]int{reserved, dynamic} { + if isPortReserved(ports, randPort) { + goto PICK + } + } + dynamic = append(dynamic, randPort) + } + + return dynamic, nil +} + +// IntContains scans an integer slice for a value +func isPortReserved(haystack []int, needle int) bool { + for _, item := range haystack { + if item == needle { + return true + } + } + return false +} diff --git a/vendor/github.com/hashicorp/nomad/nomad/structs/node_class.go b/vendor/github.com/hashicorp/nomad/nomad/structs/node_class.go new file mode 100644 index 000000000..aab070055 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/nomad/structs/node_class.go @@ -0,0 +1,94 @@ +package structs + +import ( + "fmt" + "strings" + + "github.com/mitchellh/hashstructure" +) + +const ( + // NodeUniqueNamespace is a prefix that can be appended to node meta or + // attribute keys to mark them for exclusion in computed node class. + NodeUniqueNamespace = "unique." +) + +// UniqueNamespace takes a key and returns the key marked under the unique +// namespace. +func UniqueNamespace(key string) string { + return fmt.Sprintf("%s%s", NodeUniqueNamespace, key) +} + +// IsUniqueNamespace returns whether the key is under the unique namespace. +func IsUniqueNamespace(key string) bool { + return strings.HasPrefix(key, NodeUniqueNamespace) +} + +// ComputeClass computes a derived class for the node based on its attributes. +// ComputedClass is a unique id that identifies nodes with a common set of +// attributes and capabilities. Thus, when calculating a node's computed class +// we avoid including any uniquely identifing fields. +func (n *Node) ComputeClass() error { + hash, err := hashstructure.Hash(n, nil) + if err != nil { + return err + } + + n.ComputedClass = fmt.Sprintf("v1:%d", hash) + return nil +} + +// HashInclude is used to blacklist uniquely identifying node fields from being +// included in the computed node class. +func (n Node) HashInclude(field string, v interface{}) (bool, error) { + switch field { + case "Datacenter", "Attributes", "Meta", "NodeClass": + return true, nil + default: + return false, nil + } +} + +// HashIncludeMap is used to blacklist uniquely identifying node map keys from being +// included in the computed node class. +func (n Node) HashIncludeMap(field string, k, v interface{}) (bool, error) { + key, ok := k.(string) + if !ok { + return false, fmt.Errorf("map key %v not a string", k) + } + + switch field { + case "Meta", "Attributes": + return !IsUniqueNamespace(key), nil + default: + return false, fmt.Errorf("unexpected map field: %v", field) + } +} + +// EscapedConstraints takes a set of constraints and returns the set that +// escapes computed node classes. +func EscapedConstraints(constraints []*Constraint) []*Constraint { + var escaped []*Constraint + for _, c := range constraints { + if constraintTargetEscapes(c.LTarget) || constraintTargetEscapes(c.RTarget) { + escaped = append(escaped, c) + } + } + + return escaped +} + +// constraintTargetEscapes returns whether the target of a constraint escapes +// computed node class optimization. +func constraintTargetEscapes(target string) bool { + switch { + case strings.HasPrefix(target, "${node.unique."): + return true + case strings.HasPrefix(target, "${attr.unique."): + return true + case strings.HasPrefix(target, "${meta.unique."): + return true + default: + return false + } +} diff --git a/vendor/github.com/hashicorp/nomad/nomad/structs/operator.go b/vendor/github.com/hashicorp/nomad/nomad/structs/operator.go new file mode 100644 index 000000000..93b99f6fb --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/nomad/structs/operator.go @@ -0,0 +1,49 @@ +package structs + +import ( + "github.com/hashicorp/raft" +) + +// RaftServer has information about a server in the Raft configuration. +type RaftServer struct { + // ID is the unique ID for the server. These are currently the same + // as the address, but they will be changed to a real GUID in a future + // release of Nomad. + ID raft.ServerID + + // Node is the node name of the server, as known by Nomad, or this + // will be set to "(unknown)" otherwise. + Node string + + // Address is the IP:port of the server, used for Raft communications. + Address raft.ServerAddress + + // Leader is true if this server is the current cluster leader. + Leader bool + + // Voter is true if this server has a vote in the cluster. This might + // be false if the server is staging and still coming online, or if + // it's a non-voting server, which will be added in a future release of + // Nomad. + Voter bool +} + +// RaftConfigrationResponse is returned when querying for the current Raft +// configuration. +type RaftConfigurationResponse struct { + // Servers has the list of servers in the Raft configuration. + Servers []*RaftServer + + // Index has the Raft index of this configuration. + Index uint64 +} + +// RaftPeerByAddressRequest is used by the Operator endpoint to apply a Raft +// operation on a specific Raft peer by address in the form of "IP:port". +type RaftPeerByAddressRequest struct { + // Address is the peer to remove, in the form "IP:port". + Address raft.ServerAddress + + // WriteRequest holds the Region for this request. + WriteRequest +} diff --git a/vendor/github.com/hashicorp/nomad/nomad/structs/structs.go b/vendor/github.com/hashicorp/nomad/nomad/structs/structs.go new file mode 100644 index 000000000..1ca19e35d --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/nomad/structs/structs.go @@ -0,0 +1,5783 @@ +package structs + +import ( + "bytes" + "crypto/md5" + "crypto/sha1" + "crypto/sha256" + "crypto/sha512" + "encoding/hex" + "errors" + "fmt" + "io" + "net" + "os" + "path/filepath" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" + + "golang.org/x/crypto/blake2b" + + "github.com/gorhill/cronexpr" + "github.com/hashicorp/consul/api" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/go-version" + "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/args" + "github.com/mitchellh/copystructure" + "github.com/ugorji/go/codec" + + hcodec "github.com/hashicorp/go-msgpack/codec" +) + +var ( + ErrNoLeader = fmt.Errorf("No cluster leader") + ErrNoRegionPath = fmt.Errorf("No path to region") + ErrTokenNotFound = errors.New("ACL token not found") + ErrPermissionDenied = errors.New("Permission denied") + + // validPolicyName is used to validate a policy name + validPolicyName = regexp.MustCompile("^[a-zA-Z0-9-]{1,128}$") +) + +type MessageType uint8 + +const ( + NodeRegisterRequestType MessageType = iota + NodeDeregisterRequestType + NodeUpdateStatusRequestType + NodeUpdateDrainRequestType + JobRegisterRequestType + JobDeregisterRequestType + EvalUpdateRequestType + EvalDeleteRequestType + AllocUpdateRequestType + AllocClientUpdateRequestType + ReconcileJobSummariesRequestType + VaultAccessorRegisterRequestType + VaultAccessorDegisterRequestType + ApplyPlanResultsRequestType + DeploymentStatusUpdateRequestType + DeploymentPromoteRequestType + DeploymentAllocHealthRequestType + DeploymentDeleteRequestType + JobStabilityRequestType + ACLPolicyUpsertRequestType + ACLPolicyDeleteRequestType + ACLTokenUpsertRequestType + ACLTokenDeleteRequestType + ACLTokenBootstrapRequestType +) + +const ( + // IgnoreUnknownTypeFlag is set along with a MessageType + // to indicate that the message type can be safely ignored + // if it is not recognized. This is for future proofing, so + // that new commands can be added in a way that won't cause + // old servers to crash when the FSM attempts to process them. + IgnoreUnknownTypeFlag MessageType = 128 + + // ApiMajorVersion is returned as part of the Status.Version request. + // It should be incremented anytime the APIs are changed in a way + // that would break clients for sane client versioning. + ApiMajorVersion = 1 + + // ApiMinorVersion is returned as part of the Status.Version request. + // It should be incremented anytime the APIs are changed to allow + // for sane client versioning. Minor changes should be compatible + // within the major version. + ApiMinorVersion = 1 + + ProtocolVersion = "protocol" + APIMajorVersion = "api.major" + APIMinorVersion = "api.minor" + + GetterModeAny = "any" + GetterModeFile = "file" + GetterModeDir = "dir" + + // maxPolicyDescriptionLength limits a policy description length + maxPolicyDescriptionLength = 256 + + // maxTokenNameLength limits a ACL token name length + maxTokenNameLength = 64 + + // ACLClientToken and ACLManagementToken are the only types of tokens + ACLClientToken = "client" + ACLManagementToken = "management" + + // DefaultNamespace is the default namespace. + DefaultNamespace = "default" + DefaultNamespaceDescription = "Default shared namespace" +) + +// Context defines the scope in which a search for Nomad object operates, and +// is also used to query the matching index value for this context +type Context string + +const ( + Allocs Context = "allocs" + Deployments Context = "deployment" + Evals Context = "evals" + Jobs Context = "jobs" + Nodes Context = "nodes" + Namespaces Context = "namespaces" + All Context = "all" +) + +// NamespacedID is a tuple of an ID and a namespace +type NamespacedID struct { + ID string + Namespace string +} + +// RPCInfo is used to describe common information about query +type RPCInfo interface { + RequestRegion() string + IsRead() bool + AllowStaleRead() bool +} + +// QueryOptions is used to specify various flags for read queries +type QueryOptions struct { + // The target region for this query + Region string + + // Namespace is the target namespace for the query. + Namespace string + + // If set, wait until query exceeds given index. Must be provided + // with MaxQueryTime. + MinQueryIndex uint64 + + // Provided with MinQueryIndex to wait for change. + MaxQueryTime time.Duration + + // If set, any follower can service the request. Results + // may be arbitrarily stale. + AllowStale bool + + // If set, used as prefix for resource list searches + Prefix string + + // SecretID is secret portion of the ACL token used for the request + SecretID string +} + +func (q QueryOptions) RequestRegion() string { + return q.Region +} + +func (q QueryOptions) RequestNamespace() string { + if q.Namespace == "" { + return DefaultNamespace + } + return q.Namespace +} + +// QueryOption only applies to reads, so always true +func (q QueryOptions) IsRead() bool { + return true +} + +func (q QueryOptions) AllowStaleRead() bool { + return q.AllowStale +} + +type WriteRequest struct { + // The target region for this write + Region string + + // Namespace is the target namespace for the write. + Namespace string + + // SecretID is secret portion of the ACL token used for the request + SecretID string +} + +func (w WriteRequest) RequestRegion() string { + // The target region for this request + return w.Region +} + +func (w WriteRequest) RequestNamespace() string { + if w.Namespace == "" { + return DefaultNamespace + } + return w.Namespace +} + +// WriteRequest only applies to writes, always false +func (w WriteRequest) IsRead() bool { + return false +} + +func (w WriteRequest) AllowStaleRead() bool { + return false +} + +// QueryMeta allows a query response to include potentially +// useful metadata about a query +type QueryMeta struct { + // This is the index associated with the read + Index uint64 + + // If AllowStale is used, this is time elapsed since + // last contact between the follower and leader. This + // can be used to gauge staleness. + LastContact time.Duration + + // Used to indicate if there is a known leader node + KnownLeader bool +} + +// WriteMeta allows a write response to include potentially +// useful metadata about the write +type WriteMeta struct { + // This is the index associated with the write + Index uint64 +} + +// NodeRegisterRequest is used for Node.Register endpoint +// to register a node as being a schedulable entity. +type NodeRegisterRequest struct { + Node *Node + WriteRequest +} + +// NodeDeregisterRequest is used for Node.Deregister endpoint +// to deregister a node as being a schedulable entity. +type NodeDeregisterRequest struct { + NodeID string + WriteRequest +} + +// NodeServerInfo is used to in NodeUpdateResponse to return Nomad server +// information used in RPC server lists. +type NodeServerInfo struct { + // RPCAdvertiseAddr is the IP endpoint that a Nomad Server wishes to + // be contacted at for RPCs. + RPCAdvertiseAddr string + + // RpcMajorVersion is the major version number the Nomad Server + // supports + RPCMajorVersion int32 + + // RpcMinorVersion is the minor version number the Nomad Server + // supports + RPCMinorVersion int32 + + // Datacenter is the datacenter that a Nomad server belongs to + Datacenter string +} + +// NodeUpdateStatusRequest is used for Node.UpdateStatus endpoint +// to update the status of a node. +type NodeUpdateStatusRequest struct { + NodeID string + Status string + WriteRequest +} + +// NodeUpdateDrainRequest is used for updatin the drain status +type NodeUpdateDrainRequest struct { + NodeID string + Drain bool + WriteRequest +} + +// NodeEvaluateRequest is used to re-evaluate the ndoe +type NodeEvaluateRequest struct { + NodeID string + WriteRequest +} + +// NodeSpecificRequest is used when we just need to specify a target node +type NodeSpecificRequest struct { + NodeID string + SecretID string + QueryOptions +} + +// SearchResponse is used to return matches and information about whether +// the match list is truncated specific to each type of context. +type SearchResponse struct { + // Map of context types to ids which match a specified prefix + Matches map[Context][]string + + // Truncations indicates whether the matches for a particular context have + // been truncated + Truncations map[Context]bool + + QueryMeta +} + +// SearchRequest is used to parameterize a request, and returns a +// list of matches made up of jobs, allocations, evaluations, and/or nodes, +// along with whether or not the information returned is truncated. +type SearchRequest struct { + // Prefix is what ids are matched to. I.e, if the given prefix were + // "a", potential matches might be "abcd" or "aabb" + Prefix string + + // Context is the type that can be matched against. A context can be a job, + // node, evaluation, allocation, or empty (indicated every context should be + // matched) + Context Context + + QueryOptions +} + +// JobRegisterRequest is used for Job.Register endpoint +// to register a job as being a schedulable entity. +type JobRegisterRequest struct { + Job *Job + + // If EnforceIndex is set then the job will only be registered if the passed + // JobModifyIndex matches the current Jobs index. If the index is zero, the + // register only occurs if the job is new. + EnforceIndex bool + JobModifyIndex uint64 + + // PolicyOverride is set when the user is attempting to override any policies + PolicyOverride bool + + WriteRequest +} + +// JobDeregisterRequest is used for Job.Deregister endpoint +// to deregister a job as being a schedulable entity. +type JobDeregisterRequest struct { + JobID string + + // Purge controls whether the deregister purges the job from the system or + // whether the job is just marked as stopped and will be removed by the + // garbage collector + Purge bool + + WriteRequest +} + +// JobEvaluateRequest is used when we just need to re-evaluate a target job +type JobEvaluateRequest struct { + JobID string + WriteRequest +} + +// JobSpecificRequest is used when we just need to specify a target job +type JobSpecificRequest struct { + JobID string + AllAllocs bool + QueryOptions +} + +// JobListRequest is used to parameterize a list request +type JobListRequest struct { + QueryOptions +} + +// JobPlanRequest is used for the Job.Plan endpoint to trigger a dry-run +// evaluation of the Job. +type JobPlanRequest struct { + Job *Job + Diff bool // Toggles an annotated diff + // PolicyOverride is set when the user is attempting to override any policies + PolicyOverride bool + WriteRequest +} + +// JobSummaryRequest is used when we just need to get a specific job summary +type JobSummaryRequest struct { + JobID string + QueryOptions +} + +// JobDispatchRequest is used to dispatch a job based on a parameterized job +type JobDispatchRequest struct { + JobID string + Payload []byte + Meta map[string]string + WriteRequest +} + +// JobValidateRequest is used to validate a job +type JobValidateRequest struct { + Job *Job + WriteRequest +} + +// JobRevertRequest is used to revert a job to a prior version. +type JobRevertRequest struct { + // JobID is the ID of the job being reverted + JobID string + + // JobVersion the version to revert to. + JobVersion uint64 + + // EnforcePriorVersion if set will enforce that the job is at the given + // version before reverting. + EnforcePriorVersion *uint64 + + WriteRequest +} + +// JobStabilityRequest is used to marked a job as stable. +type JobStabilityRequest struct { + // Job to set the stability on + JobID string + JobVersion uint64 + + // Set the stability + Stable bool + WriteRequest +} + +// JobStabilityResponse is the response when marking a job as stable. +type JobStabilityResponse struct { + WriteMeta +} + +// NodeListRequest is used to parameterize a list request +type NodeListRequest struct { + QueryOptions +} + +// EvalUpdateRequest is used for upserting evaluations. +type EvalUpdateRequest struct { + Evals []*Evaluation + EvalToken string + WriteRequest +} + +// EvalDeleteRequest is used for deleting an evaluation. +type EvalDeleteRequest struct { + Evals []string + Allocs []string + WriteRequest +} + +// EvalSpecificRequest is used when we just need to specify a target evaluation +type EvalSpecificRequest struct { + EvalID string + QueryOptions +} + +// EvalAckRequest is used to Ack/Nack a specific evaluation +type EvalAckRequest struct { + EvalID string + Token string + WriteRequest +} + +// EvalDequeueRequest is used when we want to dequeue an evaluation +type EvalDequeueRequest struct { + Schedulers []string + Timeout time.Duration + SchedulerVersion uint16 + WriteRequest +} + +// EvalListRequest is used to list the evaluations +type EvalListRequest struct { + QueryOptions +} + +// PlanRequest is used to submit an allocation plan to the leader +type PlanRequest struct { + Plan *Plan + WriteRequest +} + +// ApplyPlanResultsRequest is used by the planner to apply a Raft transaction +// committing the result of a plan. +type ApplyPlanResultsRequest struct { + // AllocUpdateRequest holds the allocation updates to be made by the + // scheduler. + AllocUpdateRequest + + // Deployment is the deployment created or updated as a result of a + // scheduling event. + Deployment *Deployment + + // DeploymentUpdates is a set of status updates to apply to the given + // deployments. This allows the scheduler to cancel any unneeded deployment + // because the job is stopped or the update block is removed. + DeploymentUpdates []*DeploymentStatusUpdate +} + +// AllocUpdateRequest is used to submit changes to allocations, either +// to cause evictions or to assign new allocaitons. Both can be done +// within a single transaction +type AllocUpdateRequest struct { + // Alloc is the list of new allocations to assign + Alloc []*Allocation + + // Job is the shared parent job of the allocations. + // It is pulled out since it is common to reduce payload size. + Job *Job + + WriteRequest +} + +// AllocListRequest is used to request a list of allocations +type AllocListRequest struct { + QueryOptions +} + +// AllocSpecificRequest is used to query a specific allocation +type AllocSpecificRequest struct { + AllocID string + QueryOptions +} + +// AllocsGetRequest is used to query a set of allocations +type AllocsGetRequest struct { + AllocIDs []string + QueryOptions +} + +// PeriodicForceReqeuest is used to force a specific periodic job. +type PeriodicForceRequest struct { + JobID string + WriteRequest +} + +// ServerMembersResponse has the list of servers in a cluster +type ServerMembersResponse struct { + ServerName string + ServerRegion string + ServerDC string + Members []*ServerMember +} + +// ServerMember holds information about a Nomad server agent in a cluster +type ServerMember struct { + Name string + Addr net.IP + Port uint16 + Tags map[string]string + Status string + ProtocolMin uint8 + ProtocolMax uint8 + ProtocolCur uint8 + DelegateMin uint8 + DelegateMax uint8 + DelegateCur uint8 +} + +// DeriveVaultTokenRequest is used to request wrapped Vault tokens for the +// following tasks in the given allocation +type DeriveVaultTokenRequest struct { + NodeID string + SecretID string + AllocID string + Tasks []string + QueryOptions +} + +// VaultAccessorsRequest is used to operate on a set of Vault accessors +type VaultAccessorsRequest struct { + Accessors []*VaultAccessor +} + +// VaultAccessor is a reference to a created Vault token on behalf of +// an allocation's task. +type VaultAccessor struct { + AllocID string + Task string + NodeID string + Accessor string + CreationTTL int + + // Raft Indexes + CreateIndex uint64 +} + +// DeriveVaultTokenResponse returns the wrapped tokens for each requested task +type DeriveVaultTokenResponse struct { + // Tasks is a mapping between the task name and the wrapped token + Tasks map[string]string + + // Error stores any error that occurred. Errors are stored here so we can + // communicate whether it is retriable + Error *RecoverableError + + QueryMeta +} + +// GenericRequest is used to request where no +// specific information is needed. +type GenericRequest struct { + QueryOptions +} + +// DeploymentListRequest is used to list the deployments +type DeploymentListRequest struct { + QueryOptions +} + +// DeploymentDeleteRequest is used for deleting deployments. +type DeploymentDeleteRequest struct { + Deployments []string + WriteRequest +} + +// DeploymentStatusUpdateRequest is used to update the status of a deployment as +// well as optionally creating an evaluation atomically. +type DeploymentStatusUpdateRequest struct { + // Eval, if set, is used to create an evaluation at the same time as + // updating the status of a deployment. + Eval *Evaluation + + // DeploymentUpdate is a status update to apply to the given + // deployment. + DeploymentUpdate *DeploymentStatusUpdate + + // Job is used to optionally upsert a job. This is used when setting the + // allocation health results in a deployment failure and the deployment + // auto-reverts to the latest stable job. + Job *Job +} + +// DeploymentAllocHealthRequest is used to set the health of a set of +// allocations as part of a deployment. +type DeploymentAllocHealthRequest struct { + DeploymentID string + + // Marks these allocations as healthy, allow further allocations + // to be rolled. + HealthyAllocationIDs []string + + // Any unhealthy allocations fail the deployment + UnhealthyAllocationIDs []string + + WriteRequest +} + +// ApplyDeploymentAllocHealthRequest is used to apply an alloc health request via Raft +type ApplyDeploymentAllocHealthRequest struct { + DeploymentAllocHealthRequest + + // An optional field to update the status of a deployment + DeploymentUpdate *DeploymentStatusUpdate + + // Job is used to optionally upsert a job. This is used when setting the + // allocation health results in a deployment failure and the deployment + // auto-reverts to the latest stable job. + Job *Job + + // An optional evaluation to create after promoting the canaries + Eval *Evaluation +} + +// DeploymentPromoteRequest is used to promote task groups in a deployment +type DeploymentPromoteRequest struct { + DeploymentID string + + // All is to promote all task groups + All bool + + // Groups is used to set the promotion status per task group + Groups []string + + WriteRequest +} + +// ApplyDeploymentPromoteRequest is used to apply a promotion request via Raft +type ApplyDeploymentPromoteRequest struct { + DeploymentPromoteRequest + + // An optional evaluation to create after promoting the canaries + Eval *Evaluation +} + +// DeploymentPauseRequest is used to pause a deployment +type DeploymentPauseRequest struct { + DeploymentID string + + // Pause sets the pause status + Pause bool + + WriteRequest +} + +// DeploymentSpecificRequest is used to make a request specific to a particular +// deployment +type DeploymentSpecificRequest struct { + DeploymentID string + QueryOptions +} + +// DeploymentFailRequest is used to fail a particular deployment +type DeploymentFailRequest struct { + DeploymentID string + WriteRequest +} + +// SingleDeploymentResponse is used to respond with a single deployment +type SingleDeploymentResponse struct { + Deployment *Deployment + QueryMeta +} + +// GenericResponse is used to respond to a request where no +// specific response information is needed. +type GenericResponse struct { + WriteMeta +} + +// VersionResponse is used for the Status.Version reseponse +type VersionResponse struct { + Build string + Versions map[string]int + QueryMeta +} + +// JobRegisterResponse is used to respond to a job registration +type JobRegisterResponse struct { + EvalID string + EvalCreateIndex uint64 + JobModifyIndex uint64 + + // Warnings contains any warnings about the given job. These may include + // deprecation warnings. + Warnings string + + QueryMeta +} + +// JobDeregisterResponse is used to respond to a job deregistration +type JobDeregisterResponse struct { + EvalID string + EvalCreateIndex uint64 + JobModifyIndex uint64 + QueryMeta +} + +// JobValidateResponse is the response from validate request +type JobValidateResponse struct { + // DriverConfigValidated indicates whether the agent validated the driver + // config + DriverConfigValidated bool + + // ValidationErrors is a list of validation errors + ValidationErrors []string + + // Error is a string version of any error that may have occurred + Error string + + // Warnings contains any warnings about the given job. These may include + // deprecation warnings. + Warnings string +} + +// NodeUpdateResponse is used to respond to a node update +type NodeUpdateResponse struct { + HeartbeatTTL time.Duration + EvalIDs []string + EvalCreateIndex uint64 + NodeModifyIndex uint64 + + // LeaderRPCAddr is the RPC address of the current Raft Leader. If + // empty, the current Nomad Server is in the minority of a partition. + LeaderRPCAddr string + + // NumNodes is the number of Nomad nodes attached to this quorum of + // Nomad Servers at the time of the response. This value can + // fluctuate based on the health of the cluster between heartbeats. + NumNodes int32 + + // Servers is the full list of known Nomad servers in the local + // region. + Servers []*NodeServerInfo + + QueryMeta +} + +// NodeDrainUpdateResponse is used to respond to a node drain update +type NodeDrainUpdateResponse struct { + EvalIDs []string + EvalCreateIndex uint64 + NodeModifyIndex uint64 + QueryMeta +} + +// NodeAllocsResponse is used to return allocs for a single node +type NodeAllocsResponse struct { + Allocs []*Allocation + QueryMeta +} + +// NodeClientAllocsResponse is used to return allocs meta data for a single node +type NodeClientAllocsResponse struct { + Allocs map[string]uint64 + QueryMeta +} + +// SingleNodeResponse is used to return a single node +type SingleNodeResponse struct { + Node *Node + QueryMeta +} + +// NodeListResponse is used for a list request +type NodeListResponse struct { + Nodes []*NodeListStub + QueryMeta +} + +// SingleJobResponse is used to return a single job +type SingleJobResponse struct { + Job *Job + QueryMeta +} + +// JobSummaryResponse is used to return a single job summary +type JobSummaryResponse struct { + JobSummary *JobSummary + QueryMeta +} + +type JobDispatchResponse struct { + DispatchedJobID string + EvalID string + EvalCreateIndex uint64 + JobCreateIndex uint64 + WriteMeta +} + +// JobListResponse is used for a list request +type JobListResponse struct { + Jobs []*JobListStub + QueryMeta +} + +// JobVersionsRequest is used to get a jobs versions +type JobVersionsRequest struct { + JobID string + Diffs bool + QueryOptions +} + +// JobVersionsResponse is used for a job get versions request +type JobVersionsResponse struct { + Versions []*Job + Diffs []*JobDiff + QueryMeta +} + +// JobPlanResponse is used to respond to a job plan request +type JobPlanResponse struct { + // Annotations stores annotations explaining decisions the scheduler made. + Annotations *PlanAnnotations + + // FailedTGAllocs is the placement failures per task group. + FailedTGAllocs map[string]*AllocMetric + + // JobModifyIndex is the modification index of the job. The value can be + // used when running `nomad run` to ensure that the Job wasn’t modified + // since the last plan. If the job is being created, the value is zero. + JobModifyIndex uint64 + + // CreatedEvals is the set of evaluations created by the scheduler. The + // reasons for this can be rolling-updates or blocked evals. + CreatedEvals []*Evaluation + + // Diff contains the diff of the job and annotations on whether the change + // causes an in-place update or create/destroy + Diff *JobDiff + + // NextPeriodicLaunch is the time duration till the job would be launched if + // submitted. + NextPeriodicLaunch time.Time + + // Warnings contains any warnings about the given job. These may include + // deprecation warnings. + Warnings string + + WriteMeta +} + +// SingleAllocResponse is used to return a single allocation +type SingleAllocResponse struct { + Alloc *Allocation + QueryMeta +} + +// AllocsGetResponse is used to return a set of allocations +type AllocsGetResponse struct { + Allocs []*Allocation + QueryMeta +} + +// JobAllocationsResponse is used to return the allocations for a job +type JobAllocationsResponse struct { + Allocations []*AllocListStub + QueryMeta +} + +// JobEvaluationsResponse is used to return the evaluations for a job +type JobEvaluationsResponse struct { + Evaluations []*Evaluation + QueryMeta +} + +// SingleEvalResponse is used to return a single evaluation +type SingleEvalResponse struct { + Eval *Evaluation + QueryMeta +} + +// EvalDequeueResponse is used to return from a dequeue +type EvalDequeueResponse struct { + Eval *Evaluation + Token string + + // WaitIndex is the Raft index the worker should wait until invoking the + // scheduler. + WaitIndex uint64 + + QueryMeta +} + +// GetWaitIndex is used to retrieve the Raft index in which state should be at +// or beyond before invoking the scheduler. +func (e *EvalDequeueResponse) GetWaitIndex() uint64 { + // Prefer the wait index sent. This will be populated on all responses from + // 0.7.0 and above + if e.WaitIndex != 0 { + return e.WaitIndex + } else if e.Eval != nil { + return e.Eval.ModifyIndex + } + + // This should never happen + return 1 +} + +// PlanResponse is used to return from a PlanRequest +type PlanResponse struct { + Result *PlanResult + WriteMeta +} + +// AllocListResponse is used for a list request +type AllocListResponse struct { + Allocations []*AllocListStub + QueryMeta +} + +// DeploymentListResponse is used for a list request +type DeploymentListResponse struct { + Deployments []*Deployment + QueryMeta +} + +// EvalListResponse is used for a list request +type EvalListResponse struct { + Evaluations []*Evaluation + QueryMeta +} + +// EvalAllocationsResponse is used to return the allocations for an evaluation +type EvalAllocationsResponse struct { + Allocations []*AllocListStub + QueryMeta +} + +// PeriodicForceResponse is used to respond to a periodic job force launch +type PeriodicForceResponse struct { + EvalID string + EvalCreateIndex uint64 + WriteMeta +} + +// DeploymentUpdateResponse is used to respond to a deployment change. The +// response will include the modify index of the deployment as well as details +// of any triggered evaluation. +type DeploymentUpdateResponse struct { + EvalID string + EvalCreateIndex uint64 + DeploymentModifyIndex uint64 + + // RevertedJobVersion is the version the job was reverted to. If unset, the + // job wasn't reverted + RevertedJobVersion *uint64 + + WriteMeta +} + +const ( + NodeStatusInit = "initializing" + NodeStatusReady = "ready" + NodeStatusDown = "down" +) + +// ShouldDrainNode checks if a given node status should trigger an +// evaluation. Some states don't require any further action. +func ShouldDrainNode(status string) bool { + switch status { + case NodeStatusInit, NodeStatusReady: + return false + case NodeStatusDown: + return true + default: + panic(fmt.Sprintf("unhandled node status %s", status)) + } +} + +// ValidNodeStatus is used to check if a node status is valid +func ValidNodeStatus(status string) bool { + switch status { + case NodeStatusInit, NodeStatusReady, NodeStatusDown: + return true + default: + return false + } +} + +// Node is a representation of a schedulable client node +type Node struct { + // ID is a unique identifier for the node. It can be constructed + // by doing a concatenation of the Name and Datacenter as a simple + // approach. Alternatively a UUID may be used. + ID string + + // SecretID is an ID that is only known by the Node and the set of Servers. + // It is not accessible via the API and is used to authenticate nodes + // conducting priviledged activities. + SecretID string + + // Datacenter for this node + Datacenter string + + // Node name + Name string + + // HTTPAddr is the address on which the Nomad client is listening for http + // requests + HTTPAddr string + + // TLSEnabled indicates if the Agent has TLS enabled for the HTTP API + TLSEnabled bool + + // Attributes is an arbitrary set of key/value + // data that can be used for constraints. Examples + // include "kernel.name=linux", "arch=386", "driver.docker=1", + // "docker.runtime=1.8.3" + Attributes map[string]string + + // Resources is the available resources on the client. + // For example 'cpu=2' 'memory=2048' + Resources *Resources + + // Reserved is the set of resources that are reserved, + // and should be subtracted from the total resources for + // the purposes of scheduling. This may be provide certain + // high-watermark tolerances or because of external schedulers + // consuming resources. + Reserved *Resources + + // Links are used to 'link' this client to external + // systems. For example 'consul=foo.dc1' 'aws=i-83212' + // 'ami=ami-123' + Links map[string]string + + // Meta is used to associate arbitrary metadata with this + // client. This is opaque to Nomad. + Meta map[string]string + + // NodeClass is an opaque identifier used to group nodes + // together for the purpose of determining scheduling pressure. + NodeClass string + + // ComputedClass is a unique id that identifies nodes with a common set of + // attributes and capabilities. + ComputedClass string + + // Drain is controlled by the servers, and not the client. + // If true, no jobs will be scheduled to this node, and existing + // allocations will be drained. + Drain bool + + // Status of this node + Status string + + // StatusDescription is meant to provide more human useful information + StatusDescription string + + // StatusUpdatedAt is the time stamp at which the state of the node was + // updated + StatusUpdatedAt int64 + + // Raft Indexes + CreateIndex uint64 + ModifyIndex uint64 +} + +// Ready returns if the node is ready for running allocations +func (n *Node) Ready() bool { + return n.Status == NodeStatusReady && !n.Drain +} + +func (n *Node) Copy() *Node { + if n == nil { + return nil + } + nn := new(Node) + *nn = *n + nn.Attributes = helper.CopyMapStringString(nn.Attributes) + nn.Resources = nn.Resources.Copy() + nn.Reserved = nn.Reserved.Copy() + nn.Links = helper.CopyMapStringString(nn.Links) + nn.Meta = helper.CopyMapStringString(nn.Meta) + return nn +} + +// TerminalStatus returns if the current status is terminal and +// will no longer transition. +func (n *Node) TerminalStatus() bool { + switch n.Status { + case NodeStatusDown: + return true + default: + return false + } +} + +// Stub returns a summarized version of the node +func (n *Node) Stub() *NodeListStub { + return &NodeListStub{ + ID: n.ID, + Datacenter: n.Datacenter, + Name: n.Name, + NodeClass: n.NodeClass, + Version: n.Attributes["nomad.version"], + Drain: n.Drain, + Status: n.Status, + StatusDescription: n.StatusDescription, + CreateIndex: n.CreateIndex, + ModifyIndex: n.ModifyIndex, + } +} + +// NodeListStub is used to return a subset of job information +// for the job list +type NodeListStub struct { + ID string + Datacenter string + Name string + NodeClass string + Version string + Drain bool + Status string + StatusDescription string + CreateIndex uint64 + ModifyIndex uint64 +} + +// Networks defined for a task on the Resources struct. +type Networks []*NetworkResource + +// Port assignment and IP for the given label or empty values. +func (ns Networks) Port(label string) (string, int) { + for _, n := range ns { + for _, p := range n.ReservedPorts { + if p.Label == label { + return n.IP, p.Value + } + } + for _, p := range n.DynamicPorts { + if p.Label == label { + return n.IP, p.Value + } + } + } + return "", 0 +} + +// Resources is used to define the resources available +// on a client +type Resources struct { + CPU int + MemoryMB int + DiskMB int + IOPS int + Networks Networks +} + +const ( + BytesInMegabyte = 1024 * 1024 +) + +// DefaultResources returns the default resources for a task. +func DefaultResources() *Resources { + return &Resources{ + CPU: 100, + MemoryMB: 10, + IOPS: 0, + } +} + +// DiskInBytes returns the amount of disk resources in bytes. +func (r *Resources) DiskInBytes() int64 { + return int64(r.DiskMB * BytesInMegabyte) +} + +// Merge merges this resource with another resource. +func (r *Resources) Merge(other *Resources) { + if other.CPU != 0 { + r.CPU = other.CPU + } + if other.MemoryMB != 0 { + r.MemoryMB = other.MemoryMB + } + if other.DiskMB != 0 { + r.DiskMB = other.DiskMB + } + if other.IOPS != 0 { + r.IOPS = other.IOPS + } + if len(other.Networks) != 0 { + r.Networks = other.Networks + } +} + +func (r *Resources) Canonicalize() { + // Ensure that an empty and nil slices are treated the same to avoid scheduling + // problems since we use reflect DeepEquals. + if len(r.Networks) == 0 { + r.Networks = nil + } + + for _, n := range r.Networks { + n.Canonicalize() + } +} + +// MeetsMinResources returns an error if the resources specified are less than +// the minimum allowed. +func (r *Resources) MeetsMinResources() error { + var mErr multierror.Error + if r.CPU < 20 { + mErr.Errors = append(mErr.Errors, fmt.Errorf("minimum CPU value is 20; got %d", r.CPU)) + } + if r.MemoryMB < 10 { + mErr.Errors = append(mErr.Errors, fmt.Errorf("minimum MemoryMB value is 10; got %d", r.MemoryMB)) + } + if r.IOPS < 0 { + mErr.Errors = append(mErr.Errors, fmt.Errorf("minimum IOPS value is 0; got %d", r.IOPS)) + } + for i, n := range r.Networks { + if err := n.MeetsMinResources(); err != nil { + mErr.Errors = append(mErr.Errors, fmt.Errorf("network resource at index %d failed: %v", i, err)) + } + } + + return mErr.ErrorOrNil() +} + +// Copy returns a deep copy of the resources +func (r *Resources) Copy() *Resources { + if r == nil { + return nil + } + newR := new(Resources) + *newR = *r + if r.Networks != nil { + n := len(r.Networks) + newR.Networks = make([]*NetworkResource, n) + for i := 0; i < n; i++ { + newR.Networks[i] = r.Networks[i].Copy() + } + } + return newR +} + +// NetIndex finds the matching net index using device name +func (r *Resources) NetIndex(n *NetworkResource) int { + for idx, net := range r.Networks { + if net.Device == n.Device { + return idx + } + } + return -1 +} + +// Superset checks if one set of resources is a superset +// of another. This ignores network resources, and the NetworkIndex +// should be used for that. +func (r *Resources) Superset(other *Resources) (bool, string) { + if r.CPU < other.CPU { + return false, "cpu exhausted" + } + if r.MemoryMB < other.MemoryMB { + return false, "memory exhausted" + } + if r.DiskMB < other.DiskMB { + return false, "disk exhausted" + } + if r.IOPS < other.IOPS { + return false, "iops exhausted" + } + return true, "" +} + +// Add adds the resources of the delta to this, potentially +// returning an error if not possible. +func (r *Resources) Add(delta *Resources) error { + if delta == nil { + return nil + } + r.CPU += delta.CPU + r.MemoryMB += delta.MemoryMB + r.DiskMB += delta.DiskMB + r.IOPS += delta.IOPS + + for _, n := range delta.Networks { + // Find the matching interface by IP or CIDR + idx := r.NetIndex(n) + if idx == -1 { + r.Networks = append(r.Networks, n.Copy()) + } else { + r.Networks[idx].Add(n) + } + } + return nil +} + +func (r *Resources) GoString() string { + return fmt.Sprintf("*%#v", *r) +} + +type Port struct { + Label string + Value int +} + +// NetworkResource is used to represent available network +// resources +type NetworkResource struct { + Device string // Name of the device + CIDR string // CIDR block of addresses + IP string // Host IP address + MBits int // Throughput + ReservedPorts []Port // Host Reserved ports + DynamicPorts []Port // Host Dynamically assigned ports +} + +func (n *NetworkResource) Canonicalize() { + // Ensure that an empty and nil slices are treated the same to avoid scheduling + // problems since we use reflect DeepEquals. + if len(n.ReservedPorts) == 0 { + n.ReservedPorts = nil + } + if len(n.DynamicPorts) == 0 { + n.DynamicPorts = nil + } +} + +// MeetsMinResources returns an error if the resources specified are less than +// the minimum allowed. +func (n *NetworkResource) MeetsMinResources() error { + var mErr multierror.Error + if n.MBits < 1 { + mErr.Errors = append(mErr.Errors, fmt.Errorf("minimum MBits value is 1; got %d", n.MBits)) + } + return mErr.ErrorOrNil() +} + +// Copy returns a deep copy of the network resource +func (n *NetworkResource) Copy() *NetworkResource { + if n == nil { + return nil + } + newR := new(NetworkResource) + *newR = *n + if n.ReservedPorts != nil { + newR.ReservedPorts = make([]Port, len(n.ReservedPorts)) + copy(newR.ReservedPorts, n.ReservedPorts) + } + if n.DynamicPorts != nil { + newR.DynamicPorts = make([]Port, len(n.DynamicPorts)) + copy(newR.DynamicPorts, n.DynamicPorts) + } + return newR +} + +// Add adds the resources of the delta to this, potentially +// returning an error if not possible. +func (n *NetworkResource) Add(delta *NetworkResource) { + if len(delta.ReservedPorts) > 0 { + n.ReservedPorts = append(n.ReservedPorts, delta.ReservedPorts...) + } + n.MBits += delta.MBits + n.DynamicPorts = append(n.DynamicPorts, delta.DynamicPorts...) +} + +func (n *NetworkResource) GoString() string { + return fmt.Sprintf("*%#v", *n) +} + +// PortLabels returns a map of port labels to their assigned host ports. +func (n *NetworkResource) PortLabels() map[string]int { + num := len(n.ReservedPorts) + len(n.DynamicPorts) + labelValues := make(map[string]int, num) + for _, port := range n.ReservedPorts { + labelValues[port.Label] = port.Value + } + for _, port := range n.DynamicPorts { + labelValues[port.Label] = port.Value + } + return labelValues +} + +const ( + // JobTypeNomad is reserved for internal system tasks and is + // always handled by the CoreScheduler. + JobTypeCore = "_core" + JobTypeService = "service" + JobTypeBatch = "batch" + JobTypeSystem = "system" +) + +const ( + JobStatusPending = "pending" // Pending means the job is waiting on scheduling + JobStatusRunning = "running" // Running means the job has non-terminal allocations + JobStatusDead = "dead" // Dead means all evaluation's and allocations are terminal +) + +const ( + // JobMinPriority is the minimum allowed priority + JobMinPriority = 1 + + // JobDefaultPriority is the default priority if not + // not specified. + JobDefaultPriority = 50 + + // JobMaxPriority is the maximum allowed priority + JobMaxPriority = 100 + + // Ensure CoreJobPriority is higher than any user + // specified job so that it gets priority. This is important + // for the system to remain healthy. + CoreJobPriority = JobMaxPriority * 2 + + // JobTrackedVersions is the number of historic job versions that are + // kept. + JobTrackedVersions = 6 +) + +// Job is the scope of a scheduling request to Nomad. It is the largest +// scoped object, and is a named collection of task groups. Each task group +// is further composed of tasks. A task group (TG) is the unit of scheduling +// however. +type Job struct { + // Stop marks whether the user has stopped the job. A stopped job will + // have all created allocations stopped and acts as a way to stop a job + // without purging it from the system. This allows existing allocs to be + // queried and the job to be inspected as it is being killed. + Stop bool + + // Region is the Nomad region that handles scheduling this job + Region string + + // Namespace is the namespace the job is submitted into. + Namespace string + + // ID is a unique identifier for the job per region. It can be + // specified hierarchically like LineOfBiz/OrgName/Team/Project + ID string + + // ParentID is the unique identifier of the job that spawned this job. + ParentID string + + // Name is the logical name of the job used to refer to it. This is unique + // per region, but not unique globally. + Name string + + // Type is used to control various behaviors about the job. Most jobs + // are service jobs, meaning they are expected to be long lived. + // Some jobs are batch oriented meaning they run and then terminate. + // This can be extended in the future to support custom schedulers. + Type string + + // Priority is used to control scheduling importance and if this job + // can preempt other jobs. + Priority int + + // AllAtOnce is used to control if incremental scheduling of task groups + // is allowed or if we must do a gang scheduling of the entire job. This + // can slow down larger jobs if resources are not available. + AllAtOnce bool + + // Datacenters contains all the datacenters this job is allowed to span + Datacenters []string + + // Constraints can be specified at a job level and apply to + // all the task groups and tasks. + Constraints []*Constraint + + // TaskGroups are the collections of task groups that this job needs + // to run. Each task group is an atomic unit of scheduling and placement. + TaskGroups []*TaskGroup + + // COMPAT: Remove in 0.7.0. Stagger is deprecated in 0.6.0. + Update UpdateStrategy + + // Periodic is used to define the interval the job is run at. + Periodic *PeriodicConfig + + // ParameterizedJob is used to specify the job as a parameterized job + // for dispatching. + ParameterizedJob *ParameterizedJobConfig + + // Payload is the payload supplied when the job was dispatched. + Payload []byte + + // Meta is used to associate arbitrary metadata with this + // job. This is opaque to Nomad. + Meta map[string]string + + // VaultToken is the Vault token that proves the submitter of the job has + // access to the specified Vault policies. This field is only used to + // transfer the token and is not stored after Job submission. + VaultToken string + + // Job status + Status string + + // StatusDescription is meant to provide more human useful information + StatusDescription string + + // Stable marks a job as stable. Stability is only defined on "service" and + // "system" jobs. The stability of a job will be set automatically as part + // of a deployment and can be manually set via APIs. + Stable bool + + // Version is a monitonically increasing version number that is incremened + // on each job register. + Version uint64 + + // SubmitTime is the time at which the job was submitted as a UnixNano in + // UTC + SubmitTime int64 + + // Raft Indexes + CreateIndex uint64 + ModifyIndex uint64 + JobModifyIndex uint64 +} + +// Canonicalize is used to canonicalize fields in the Job. This should be called +// when registering a Job. A set of warnings are returned if the job was changed +// in anyway that the user should be made aware of. +func (j *Job) Canonicalize() (warnings error) { + if j == nil { + return nil + } + + var mErr multierror.Error + // Ensure that an empty and nil map are treated the same to avoid scheduling + // problems since we use reflect DeepEquals. + if len(j.Meta) == 0 { + j.Meta = nil + } + + // Ensure the job is in a namespace. + if j.Namespace == "" { + j.Namespace = DefaultNamespace + } + + for _, tg := range j.TaskGroups { + tg.Canonicalize(j) + } + + if j.ParameterizedJob != nil { + j.ParameterizedJob.Canonicalize() + } + + if j.Periodic != nil { + j.Periodic.Canonicalize() + } + + // COMPAT: Remove in 0.7.0 + // Rewrite any job that has an update block with pre 0.6.0 syntax. + jobHasOldUpdate := j.Update.Stagger > 0 && j.Update.MaxParallel > 0 + if jobHasOldUpdate && j.Type != JobTypeBatch { + // Build an appropriate update block and copy it down to each task group + base := DefaultUpdateStrategy.Copy() + base.MaxParallel = j.Update.MaxParallel + base.MinHealthyTime = j.Update.Stagger + + // Add to each task group, modifying as needed + upgraded := false + l := len(j.TaskGroups) + for _, tg := range j.TaskGroups { + // The task group doesn't need upgrading if it has an update block with the new syntax + u := tg.Update + if u != nil && u.Stagger > 0 && u.MaxParallel > 0 && + u.HealthCheck != "" && u.MinHealthyTime > 0 && u.HealthyDeadline > 0 { + continue + } + + upgraded = true + + // The MaxParallel for the job should be 10% of the total count + // unless there is just one task group then we can infer the old + // max parallel should be the new + tgu := base.Copy() + if l != 1 { + // RoundTo 10% + var percent float64 = float64(tg.Count) * 0.1 + tgu.MaxParallel = int(percent + 0.5) + } + + // Safety guards + if tgu.MaxParallel == 0 { + tgu.MaxParallel = 1 + } else if tgu.MaxParallel > tg.Count { + tgu.MaxParallel = tg.Count + } + + tg.Update = tgu + } + + if upgraded { + w := "A best effort conversion to new update stanza introduced in v0.6.0 applied. " + + "Please update upgrade stanza before v0.7.0." + multierror.Append(&mErr, fmt.Errorf(w)) + } + } + + // Ensure that the batch job doesn't have new style or old style update + // stanza. Unfortunately are scanning here because we have to deprecate over + // a release so we can't check in the task group since that may be new style + // but wouldn't capture the old style and we don't want to have duplicate + // warnings. + if j.Type == JobTypeBatch { + displayWarning := jobHasOldUpdate + j.Update.Stagger = 0 + j.Update.MaxParallel = 0 + j.Update.HealthCheck = "" + j.Update.MinHealthyTime = 0 + j.Update.HealthyDeadline = 0 + j.Update.AutoRevert = false + j.Update.Canary = 0 + + // Remove any update spec from the task groups + for _, tg := range j.TaskGroups { + if tg.Update != nil { + displayWarning = true + tg.Update = nil + } + } + + if displayWarning { + w := "Update stanza is disallowed for batch jobs since v0.6.0. " + + "The update block has automatically been removed" + multierror.Append(&mErr, fmt.Errorf(w)) + } + } + + return mErr.ErrorOrNil() +} + +// Copy returns a deep copy of the Job. It is expected that callers use recover. +// This job can panic if the deep copy failed as it uses reflection. +func (j *Job) Copy() *Job { + if j == nil { + return nil + } + nj := new(Job) + *nj = *j + nj.Datacenters = helper.CopySliceString(nj.Datacenters) + nj.Constraints = CopySliceConstraints(nj.Constraints) + + if j.TaskGroups != nil { + tgs := make([]*TaskGroup, len(nj.TaskGroups)) + for i, tg := range nj.TaskGroups { + tgs[i] = tg.Copy() + } + nj.TaskGroups = tgs + } + + nj.Periodic = nj.Periodic.Copy() + nj.Meta = helper.CopyMapStringString(nj.Meta) + nj.ParameterizedJob = nj.ParameterizedJob.Copy() + return nj +} + +// Validate is used to sanity check a job input +func (j *Job) Validate() error { + var mErr multierror.Error + + if j.Region == "" { + mErr.Errors = append(mErr.Errors, errors.New("Missing job region")) + } + if j.ID == "" { + mErr.Errors = append(mErr.Errors, errors.New("Missing job ID")) + } else if strings.Contains(j.ID, " ") { + mErr.Errors = append(mErr.Errors, errors.New("Job ID contains a space")) + } + if j.Name == "" { + mErr.Errors = append(mErr.Errors, errors.New("Missing job name")) + } + if j.Namespace == "" { + mErr.Errors = append(mErr.Errors, errors.New("Job must be in a namespace")) + } + switch j.Type { + case JobTypeCore, JobTypeService, JobTypeBatch, JobTypeSystem: + case "": + mErr.Errors = append(mErr.Errors, errors.New("Missing job type")) + default: + mErr.Errors = append(mErr.Errors, fmt.Errorf("Invalid job type: %q", j.Type)) + } + if j.Priority < JobMinPriority || j.Priority > JobMaxPriority { + mErr.Errors = append(mErr.Errors, fmt.Errorf("Job priority must be between [%d, %d]", JobMinPriority, JobMaxPriority)) + } + if len(j.Datacenters) == 0 { + mErr.Errors = append(mErr.Errors, errors.New("Missing job datacenters")) + } + if len(j.TaskGroups) == 0 { + mErr.Errors = append(mErr.Errors, errors.New("Missing job task groups")) + } + for idx, constr := range j.Constraints { + if err := constr.Validate(); err != nil { + outer := fmt.Errorf("Constraint %d validation failed: %s", idx+1, err) + mErr.Errors = append(mErr.Errors, outer) + } + } + + // Check for duplicate task groups + taskGroups := make(map[string]int) + for idx, tg := range j.TaskGroups { + if tg.Name == "" { + mErr.Errors = append(mErr.Errors, fmt.Errorf("Job task group %d missing name", idx+1)) + } else if existing, ok := taskGroups[tg.Name]; ok { + mErr.Errors = append(mErr.Errors, fmt.Errorf("Job task group %d redefines '%s' from group %d", idx+1, tg.Name, existing+1)) + } else { + taskGroups[tg.Name] = idx + } + + if j.Type == "system" && tg.Count > 1 { + mErr.Errors = append(mErr.Errors, + fmt.Errorf("Job task group %s has count %d. Count cannot exceed 1 with system scheduler", + tg.Name, tg.Count)) + } + } + + // Validate the task group + for _, tg := range j.TaskGroups { + if err := tg.Validate(j); err != nil { + outer := fmt.Errorf("Task group %s validation failed: %v", tg.Name, err) + mErr.Errors = append(mErr.Errors, outer) + } + } + + // Validate periodic is only used with batch jobs. + if j.IsPeriodic() && j.Periodic.Enabled { + if j.Type != JobTypeBatch { + mErr.Errors = append(mErr.Errors, + fmt.Errorf("Periodic can only be used with %q scheduler", JobTypeBatch)) + } + + if err := j.Periodic.Validate(); err != nil { + mErr.Errors = append(mErr.Errors, err) + } + } + + if j.IsParameterized() { + if j.Type != JobTypeBatch { + mErr.Errors = append(mErr.Errors, + fmt.Errorf("Parameterized job can only be used with %q scheduler", JobTypeBatch)) + } + + if err := j.ParameterizedJob.Validate(); err != nil { + mErr.Errors = append(mErr.Errors, err) + } + } + + return mErr.ErrorOrNil() +} + +// Warnings returns a list of warnings that may be from dubious settings or +// deprecation warnings. +func (j *Job) Warnings() error { + var mErr multierror.Error + + // Check the groups + for _, tg := range j.TaskGroups { + if err := tg.Warnings(j); err != nil { + outer := fmt.Errorf("Group %q has warnings: %v", tg.Name, err) + mErr.Errors = append(mErr.Errors, outer) + } + } + + return mErr.ErrorOrNil() +} + +// LookupTaskGroup finds a task group by name +func (j *Job) LookupTaskGroup(name string) *TaskGroup { + for _, tg := range j.TaskGroups { + if tg.Name == name { + return tg + } + } + return nil +} + +// CombinedTaskMeta takes a TaskGroup and Task name and returns the combined +// meta data for the task. When joining Job, Group and Task Meta, the precedence +// is by deepest scope (Task > Group > Job). +func (j *Job) CombinedTaskMeta(groupName, taskName string) map[string]string { + group := j.LookupTaskGroup(groupName) + if group == nil { + return nil + } + + task := group.LookupTask(taskName) + if task == nil { + return nil + } + + meta := helper.CopyMapStringString(task.Meta) + if meta == nil { + meta = make(map[string]string, len(group.Meta)+len(j.Meta)) + } + + // Add the group specific meta + for k, v := range group.Meta { + if _, ok := meta[k]; !ok { + meta[k] = v + } + } + + // Add the job specific meta + for k, v := range j.Meta { + if _, ok := meta[k]; !ok { + meta[k] = v + } + } + + return meta +} + +// Stopped returns if a job is stopped. +func (j *Job) Stopped() bool { + return j == nil || j.Stop +} + +// HasUpdateStrategy returns if any task group in the job has an update strategy +func (j *Job) HasUpdateStrategy() bool { + for _, tg := range j.TaskGroups { + if tg.Update != nil { + return true + } + } + + return false +} + +// Stub is used to return a summary of the job +func (j *Job) Stub(summary *JobSummary) *JobListStub { + return &JobListStub{ + ID: j.ID, + ParentID: j.ParentID, + Name: j.Name, + Type: j.Type, + Priority: j.Priority, + Periodic: j.IsPeriodic(), + ParameterizedJob: j.IsParameterized(), + Stop: j.Stop, + Status: j.Status, + StatusDescription: j.StatusDescription, + CreateIndex: j.CreateIndex, + ModifyIndex: j.ModifyIndex, + JobModifyIndex: j.JobModifyIndex, + SubmitTime: j.SubmitTime, + JobSummary: summary, + } +} + +// IsPeriodic returns whether a job is periodic. +func (j *Job) IsPeriodic() bool { + return j.Periodic != nil +} + +// IsParameterized returns whether a job is parameterized job. +func (j *Job) IsParameterized() bool { + return j.ParameterizedJob != nil +} + +// VaultPolicies returns the set of Vault policies per task group, per task +func (j *Job) VaultPolicies() map[string]map[string]*Vault { + policies := make(map[string]map[string]*Vault, len(j.TaskGroups)) + + for _, tg := range j.TaskGroups { + tgPolicies := make(map[string]*Vault, len(tg.Tasks)) + + for _, task := range tg.Tasks { + if task.Vault == nil { + continue + } + + tgPolicies[task.Name] = task.Vault + } + + if len(tgPolicies) != 0 { + policies[tg.Name] = tgPolicies + } + } + + return policies +} + +// RequiredSignals returns a mapping of task groups to tasks to their required +// set of signals +func (j *Job) RequiredSignals() map[string]map[string][]string { + signals := make(map[string]map[string][]string) + + for _, tg := range j.TaskGroups { + for _, task := range tg.Tasks { + // Use this local one as a set + taskSignals := make(map[string]struct{}) + + // Check if the Vault change mode uses signals + if task.Vault != nil && task.Vault.ChangeMode == VaultChangeModeSignal { + taskSignals[task.Vault.ChangeSignal] = struct{}{} + } + + // Check if any template change mode uses signals + for _, t := range task.Templates { + if t.ChangeMode != TemplateChangeModeSignal { + continue + } + + taskSignals[t.ChangeSignal] = struct{}{} + } + + // Flatten and sort the signals + l := len(taskSignals) + if l == 0 { + continue + } + + flat := make([]string, 0, l) + for sig := range taskSignals { + flat = append(flat, sig) + } + + sort.Strings(flat) + tgSignals, ok := signals[tg.Name] + if !ok { + tgSignals = make(map[string][]string) + signals[tg.Name] = tgSignals + } + tgSignals[task.Name] = flat + } + + } + + return signals +} + +// SpecChanged determines if the functional specification has changed between +// two job versions. +func (j *Job) SpecChanged(new *Job) bool { + if j == nil { + return new != nil + } + + // Create a copy of the new job + c := new.Copy() + + // Update the new job so we can do a reflect + c.Status = j.Status + c.StatusDescription = j.StatusDescription + c.Stable = j.Stable + c.Version = j.Version + c.CreateIndex = j.CreateIndex + c.ModifyIndex = j.ModifyIndex + c.JobModifyIndex = j.JobModifyIndex + c.SubmitTime = j.SubmitTime + + // Deep equals the jobs + return !reflect.DeepEqual(j, c) +} + +func (j *Job) SetSubmitTime() { + j.SubmitTime = time.Now().UTC().UnixNano() +} + +// JobListStub is used to return a subset of job information +// for the job list +type JobListStub struct { + ID string + ParentID string + Name string + Type string + Priority int + Periodic bool + ParameterizedJob bool + Stop bool + Status string + StatusDescription string + JobSummary *JobSummary + CreateIndex uint64 + ModifyIndex uint64 + JobModifyIndex uint64 + SubmitTime int64 +} + +// JobSummary summarizes the state of the allocations of a job +type JobSummary struct { + // JobID is the ID of the job the summary is for + JobID string + + // Namespace is the namespace of the job and its summary + Namespace string + + // Summmary contains the summary per task group for the Job + Summary map[string]TaskGroupSummary + + // Children contains a summary for the children of this job. + Children *JobChildrenSummary + + // Raft Indexes + CreateIndex uint64 + ModifyIndex uint64 +} + +// Copy returns a new copy of JobSummary +func (js *JobSummary) Copy() *JobSummary { + newJobSummary := new(JobSummary) + *newJobSummary = *js + newTGSummary := make(map[string]TaskGroupSummary, len(js.Summary)) + for k, v := range js.Summary { + newTGSummary[k] = v + } + newJobSummary.Summary = newTGSummary + newJobSummary.Children = newJobSummary.Children.Copy() + return newJobSummary +} + +// JobChildrenSummary contains the summary of children job statuses +type JobChildrenSummary struct { + Pending int64 + Running int64 + Dead int64 +} + +// Copy returns a new copy of a JobChildrenSummary +func (jc *JobChildrenSummary) Copy() *JobChildrenSummary { + if jc == nil { + return nil + } + + njc := new(JobChildrenSummary) + *njc = *jc + return njc +} + +// TaskGroup summarizes the state of all the allocations of a particular +// TaskGroup +type TaskGroupSummary struct { + Queued int + Complete int + Failed int + Running int + Starting int + Lost int +} + +const ( + // Checks uses any registered health check state in combination with task + // states to determine if a allocation is healthy. + UpdateStrategyHealthCheck_Checks = "checks" + + // TaskStates uses the task states of an allocation to determine if the + // allocation is healthy. + UpdateStrategyHealthCheck_TaskStates = "task_states" + + // Manual allows the operator to manually signal to Nomad when an + // allocations is healthy. This allows more advanced health checking that is + // outside of the scope of Nomad. + UpdateStrategyHealthCheck_Manual = "manual" +) + +var ( + // DefaultUpdateStrategy provides a baseline that can be used to upgrade + // jobs with the old policy or for populating field defaults. + DefaultUpdateStrategy = &UpdateStrategy{ + Stagger: 30 * time.Second, + MaxParallel: 1, + HealthCheck: UpdateStrategyHealthCheck_Checks, + MinHealthyTime: 10 * time.Second, + HealthyDeadline: 5 * time.Minute, + AutoRevert: false, + Canary: 0, + } +) + +// UpdateStrategy is used to modify how updates are done +type UpdateStrategy struct { + // Stagger is used to determine the rate at which allocations are migrated + // due to down or draining nodes. + Stagger time.Duration + + // MaxParallel is how many updates can be done in parallel + MaxParallel int + + // HealthCheck specifies the mechanism in which allocations are marked + // healthy or unhealthy as part of a deployment. + HealthCheck string + + // MinHealthyTime is the minimum time an allocation must be in the healthy + // state before it is marked as healthy, unblocking more alllocations to be + // rolled. + MinHealthyTime time.Duration + + // HealthyDeadline is the time in which an allocation must be marked as + // healthy before it is automatically transistioned to unhealthy. This time + // period doesn't count against the MinHealthyTime. + HealthyDeadline time.Duration + + // AutoRevert declares that if a deployment fails because of unhealthy + // allocations, there should be an attempt to auto-revert the job to a + // stable version. + AutoRevert bool + + // Canary is the number of canaries to deploy when a change to the task + // group is detected. + Canary int +} + +func (u *UpdateStrategy) Copy() *UpdateStrategy { + if u == nil { + return nil + } + + copy := new(UpdateStrategy) + *copy = *u + return copy +} + +func (u *UpdateStrategy) Validate() error { + if u == nil { + return nil + } + + var mErr multierror.Error + switch u.HealthCheck { + case UpdateStrategyHealthCheck_Checks, UpdateStrategyHealthCheck_TaskStates, UpdateStrategyHealthCheck_Manual: + default: + multierror.Append(&mErr, fmt.Errorf("Invalid health check given: %q", u.HealthCheck)) + } + + if u.MaxParallel < 1 { + multierror.Append(&mErr, fmt.Errorf("Max parallel can not be less than one: %d < 1", u.MaxParallel)) + } + if u.Canary < 0 { + multierror.Append(&mErr, fmt.Errorf("Canary count can not be less than zero: %d < 0", u.Canary)) + } + if u.MinHealthyTime < 0 { + multierror.Append(&mErr, fmt.Errorf("Minimum healthy time may not be less than zero: %v", u.MinHealthyTime)) + } + if u.HealthyDeadline <= 0 { + multierror.Append(&mErr, fmt.Errorf("Healthy deadline must be greater than zero: %v", u.HealthyDeadline)) + } + if u.MinHealthyTime >= u.HealthyDeadline { + multierror.Append(&mErr, fmt.Errorf("Minimum healthy time must be less than healthy deadline: %v > %v", u.MinHealthyTime, u.HealthyDeadline)) + } + if u.Stagger <= 0 { + multierror.Append(&mErr, fmt.Errorf("Stagger must be greater than zero: %v", u.Stagger)) + } + + return mErr.ErrorOrNil() +} + +// TODO(alexdadgar): Remove once no longer used by the scheduler. +// Rolling returns if a rolling strategy should be used +func (u *UpdateStrategy) Rolling() bool { + return u.Stagger > 0 && u.MaxParallel > 0 +} + +const ( + // PeriodicSpecCron is used for a cron spec. + PeriodicSpecCron = "cron" + + // PeriodicSpecTest is only used by unit tests. It is a sorted, comma + // separated list of unix timestamps at which to launch. + PeriodicSpecTest = "_internal_test" +) + +// Periodic defines the interval a job should be run at. +type PeriodicConfig struct { + // Enabled determines if the job should be run periodically. + Enabled bool + + // Spec specifies the interval the job should be run as. It is parsed based + // on the SpecType. + Spec string + + // SpecType defines the format of the spec. + SpecType string + + // ProhibitOverlap enforces that spawned jobs do not run in parallel. + ProhibitOverlap bool + + // TimeZone is the user specified string that determines the time zone to + // launch against. The time zones must be specified from IANA Time Zone + // database, such as "America/New_York". + // Reference: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones + // Reference: https://www.iana.org/time-zones + TimeZone string + + // location is the time zone to evaluate the launch time against + location *time.Location +} + +func (p *PeriodicConfig) Copy() *PeriodicConfig { + if p == nil { + return nil + } + np := new(PeriodicConfig) + *np = *p + return np +} + +func (p *PeriodicConfig) Validate() error { + if !p.Enabled { + return nil + } + + var mErr multierror.Error + if p.Spec == "" { + multierror.Append(&mErr, fmt.Errorf("Must specify a spec")) + } + + // Check if we got a valid time zone + if p.TimeZone != "" { + if _, err := time.LoadLocation(p.TimeZone); err != nil { + multierror.Append(&mErr, fmt.Errorf("Invalid time zone %q: %v", p.TimeZone, err)) + } + } + + switch p.SpecType { + case PeriodicSpecCron: + // Validate the cron spec + if _, err := cronexpr.Parse(p.Spec); err != nil { + multierror.Append(&mErr, fmt.Errorf("Invalid cron spec %q: %v", p.Spec, err)) + } + case PeriodicSpecTest: + // No-op + default: + multierror.Append(&mErr, fmt.Errorf("Unknown periodic specification type %q", p.SpecType)) + } + + return mErr.ErrorOrNil() +} + +func (p *PeriodicConfig) Canonicalize() { + // Load the location + l, err := time.LoadLocation(p.TimeZone) + if err != nil { + p.location = time.UTC + } + + p.location = l +} + +// Next returns the closest time instant matching the spec that is after the +// passed time. If no matching instance exists, the zero value of time.Time is +// returned. The `time.Location` of the returned value matches that of the +// passed time. +func (p *PeriodicConfig) Next(fromTime time.Time) time.Time { + switch p.SpecType { + case PeriodicSpecCron: + if e, err := cronexpr.Parse(p.Spec); err == nil { + return e.Next(fromTime) + } + case PeriodicSpecTest: + split := strings.Split(p.Spec, ",") + if len(split) == 1 && split[0] == "" { + return time.Time{} + } + + // Parse the times + times := make([]time.Time, len(split)) + for i, s := range split { + unix, err := strconv.Atoi(s) + if err != nil { + return time.Time{} + } + + times[i] = time.Unix(int64(unix), 0) + } + + // Find the next match + for _, next := range times { + if fromTime.Before(next) { + return next + } + } + } + + return time.Time{} +} + +// GetLocation returns the location to use for determining the time zone to run +// the periodic job against. +func (p *PeriodicConfig) GetLocation() *time.Location { + // Jobs pre 0.5.5 will not have this + if p.location != nil { + return p.location + } + + return time.UTC +} + +const ( + // PeriodicLaunchSuffix is the string appended to the periodic jobs ID + // when launching derived instances of it. + PeriodicLaunchSuffix = "/periodic-" +) + +// PeriodicLaunch tracks the last launch time of a periodic job. +type PeriodicLaunch struct { + ID string // ID of the periodic job. + Namespace string // Namespace of the periodic job + Launch time.Time // The last launch time. + + // Raft Indexes + CreateIndex uint64 + ModifyIndex uint64 +} + +const ( + DispatchPayloadForbidden = "forbidden" + DispatchPayloadOptional = "optional" + DispatchPayloadRequired = "required" + + // DispatchLaunchSuffix is the string appended to the parameterized job's ID + // when dispatching instances of it. + DispatchLaunchSuffix = "/dispatch-" +) + +// ParameterizedJobConfig is used to configure the parameterized job +type ParameterizedJobConfig struct { + // Payload configure the payload requirements + Payload string + + // MetaRequired is metadata keys that must be specified by the dispatcher + MetaRequired []string + + // MetaOptional is metadata keys that may be specified by the dispatcher + MetaOptional []string +} + +func (d *ParameterizedJobConfig) Validate() error { + var mErr multierror.Error + switch d.Payload { + case DispatchPayloadOptional, DispatchPayloadRequired, DispatchPayloadForbidden: + default: + multierror.Append(&mErr, fmt.Errorf("Unknown payload requirement: %q", d.Payload)) + } + + // Check that the meta configurations are disjoint sets + disjoint, offending := helper.SliceSetDisjoint(d.MetaRequired, d.MetaOptional) + if !disjoint { + multierror.Append(&mErr, fmt.Errorf("Required and optional meta keys should be disjoint. Following keys exist in both: %v", offending)) + } + + return mErr.ErrorOrNil() +} + +func (d *ParameterizedJobConfig) Canonicalize() { + if d.Payload == "" { + d.Payload = DispatchPayloadOptional + } +} + +func (d *ParameterizedJobConfig) Copy() *ParameterizedJobConfig { + if d == nil { + return nil + } + nd := new(ParameterizedJobConfig) + *nd = *d + nd.MetaOptional = helper.CopySliceString(nd.MetaOptional) + nd.MetaRequired = helper.CopySliceString(nd.MetaRequired) + return nd +} + +// DispatchedID returns an ID appropriate for a job dispatched against a +// particular parameterized job +func DispatchedID(templateID string, t time.Time) string { + u := GenerateUUID()[:8] + return fmt.Sprintf("%s%s%d-%s", templateID, DispatchLaunchSuffix, t.Unix(), u) +} + +// DispatchPayloadConfig configures how a task gets its input from a job dispatch +type DispatchPayloadConfig struct { + // File specifies a relative path to where the input data should be written + File string +} + +func (d *DispatchPayloadConfig) Copy() *DispatchPayloadConfig { + if d == nil { + return nil + } + nd := new(DispatchPayloadConfig) + *nd = *d + return nd +} + +func (d *DispatchPayloadConfig) Validate() error { + // Verify the destination doesn't escape + escaped, err := PathEscapesAllocDir("task/local/", d.File) + if err != nil { + return fmt.Errorf("invalid destination path: %v", err) + } else if escaped { + return fmt.Errorf("destination escapes allocation directory") + } + + return nil +} + +var ( + defaultServiceJobRestartPolicy = RestartPolicy{ + Delay: 15 * time.Second, + Attempts: 2, + Interval: 1 * time.Minute, + Mode: RestartPolicyModeDelay, + } + defaultBatchJobRestartPolicy = RestartPolicy{ + Delay: 15 * time.Second, + Attempts: 15, + Interval: 7 * 24 * time.Hour, + Mode: RestartPolicyModeDelay, + } +) + +const ( + // RestartPolicyModeDelay causes an artificial delay till the next interval is + // reached when the specified attempts have been reached in the interval. + RestartPolicyModeDelay = "delay" + + // RestartPolicyModeFail causes a job to fail if the specified number of + // attempts are reached within an interval. + RestartPolicyModeFail = "fail" + + // RestartPolicyMinInterval is the minimum interval that is accepted for a + // restart policy. + RestartPolicyMinInterval = 5 * time.Second +) + +// RestartPolicy configures how Tasks are restarted when they crash or fail. +type RestartPolicy struct { + // Attempts is the number of restart that will occur in an interval. + Attempts int + + // Interval is a duration in which we can limit the number of restarts + // within. + Interval time.Duration + + // Delay is the time between a failure and a restart. + Delay time.Duration + + // Mode controls what happens when the task restarts more than attempt times + // in an interval. + Mode string +} + +func (r *RestartPolicy) Copy() *RestartPolicy { + if r == nil { + return nil + } + nrp := new(RestartPolicy) + *nrp = *r + return nrp +} + +func (r *RestartPolicy) Validate() error { + var mErr multierror.Error + switch r.Mode { + case RestartPolicyModeDelay, RestartPolicyModeFail: + default: + multierror.Append(&mErr, fmt.Errorf("Unsupported restart mode: %q", r.Mode)) + } + + // Check for ambiguous/confusing settings + if r.Attempts == 0 && r.Mode != RestartPolicyModeFail { + multierror.Append(&mErr, fmt.Errorf("Restart policy %q with %d attempts is ambiguous", r.Mode, r.Attempts)) + } + + if r.Interval.Nanoseconds() < RestartPolicyMinInterval.Nanoseconds() { + multierror.Append(&mErr, fmt.Errorf("Interval can not be less than %v (got %v)", RestartPolicyMinInterval, r.Interval)) + } + if time.Duration(r.Attempts)*r.Delay > r.Interval { + multierror.Append(&mErr, + fmt.Errorf("Nomad can't restart the TaskGroup %v times in an interval of %v with a delay of %v", r.Attempts, r.Interval, r.Delay)) + } + return mErr.ErrorOrNil() +} + +func NewRestartPolicy(jobType string) *RestartPolicy { + switch jobType { + case JobTypeService, JobTypeSystem: + rp := defaultServiceJobRestartPolicy + return &rp + case JobTypeBatch: + rp := defaultBatchJobRestartPolicy + return &rp + } + return nil +} + +// TaskGroup is an atomic unit of placement. Each task group belongs to +// a job and may contain any number of tasks. A task group support running +// in many replicas using the same configuration.. +type TaskGroup struct { + // Name of the task group + Name string + + // Count is the number of replicas of this task group that should + // be scheduled. + Count int + + // Update is used to control the update strategy for this task group + Update *UpdateStrategy + + // Constraints can be specified at a task group level and apply to + // all the tasks contained. + Constraints []*Constraint + + //RestartPolicy of a TaskGroup + RestartPolicy *RestartPolicy + + // Tasks are the collection of tasks that this task group needs to run + Tasks []*Task + + // EphemeralDisk is the disk resources that the task group requests + EphemeralDisk *EphemeralDisk + + // Meta is used to associate arbitrary metadata with this + // task group. This is opaque to Nomad. + Meta map[string]string +} + +func (tg *TaskGroup) Copy() *TaskGroup { + if tg == nil { + return nil + } + ntg := new(TaskGroup) + *ntg = *tg + ntg.Update = ntg.Update.Copy() + ntg.Constraints = CopySliceConstraints(ntg.Constraints) + ntg.RestartPolicy = ntg.RestartPolicy.Copy() + + if tg.Tasks != nil { + tasks := make([]*Task, len(ntg.Tasks)) + for i, t := range ntg.Tasks { + tasks[i] = t.Copy() + } + ntg.Tasks = tasks + } + + ntg.Meta = helper.CopyMapStringString(ntg.Meta) + + if tg.EphemeralDisk != nil { + ntg.EphemeralDisk = tg.EphemeralDisk.Copy() + } + return ntg +} + +// Canonicalize is used to canonicalize fields in the TaskGroup. +func (tg *TaskGroup) Canonicalize(job *Job) { + // Ensure that an empty and nil map are treated the same to avoid scheduling + // problems since we use reflect DeepEquals. + if len(tg.Meta) == 0 { + tg.Meta = nil + } + + // Set the default restart policy. + if tg.RestartPolicy == nil { + tg.RestartPolicy = NewRestartPolicy(job.Type) + } + + // Set a default ephemeral disk object if the user has not requested for one + if tg.EphemeralDisk == nil { + tg.EphemeralDisk = DefaultEphemeralDisk() + } + + for _, task := range tg.Tasks { + task.Canonicalize(job, tg) + } + + // Add up the disk resources to EphemeralDisk. This is done so that users + // are not required to move their disk attribute from resources to + // EphemeralDisk section of the job spec in Nomad 0.5 + // COMPAT 0.4.1 -> 0.5 + // Remove in 0.6 + var diskMB int + for _, task := range tg.Tasks { + diskMB += task.Resources.DiskMB + } + if diskMB > 0 { + tg.EphemeralDisk.SizeMB = diskMB + } +} + +// Validate is used to sanity check a task group +func (tg *TaskGroup) Validate(j *Job) error { + var mErr multierror.Error + if tg.Name == "" { + mErr.Errors = append(mErr.Errors, errors.New("Missing task group name")) + } + if tg.Count < 0 { + mErr.Errors = append(mErr.Errors, errors.New("Task group count can't be negative")) + } + if len(tg.Tasks) == 0 { + mErr.Errors = append(mErr.Errors, errors.New("Missing tasks for task group")) + } + for idx, constr := range tg.Constraints { + if err := constr.Validate(); err != nil { + outer := fmt.Errorf("Constraint %d validation failed: %s", idx+1, err) + mErr.Errors = append(mErr.Errors, outer) + } + } + + if tg.RestartPolicy != nil { + if err := tg.RestartPolicy.Validate(); err != nil { + mErr.Errors = append(mErr.Errors, err) + } + } else { + mErr.Errors = append(mErr.Errors, fmt.Errorf("Task Group %v should have a restart policy", tg.Name)) + } + + if tg.EphemeralDisk != nil { + if err := tg.EphemeralDisk.Validate(); err != nil { + mErr.Errors = append(mErr.Errors, err) + } + } else { + mErr.Errors = append(mErr.Errors, fmt.Errorf("Task Group %v should have an ephemeral disk object", tg.Name)) + } + + // Validate the update strategy + if u := tg.Update; u != nil { + switch j.Type { + case JobTypeService, JobTypeSystem: + default: + // COMPAT: Enable in 0.7.0 + //mErr.Errors = append(mErr.Errors, fmt.Errorf("Job type %q does not allow update block", j.Type)) + } + if err := u.Validate(); err != nil { + mErr.Errors = append(mErr.Errors, err) + } + } + + // Check for duplicate tasks, that there is only leader task if any, + // and no duplicated static ports + tasks := make(map[string]int) + staticPorts := make(map[int]string) + leaderTasks := 0 + for idx, task := range tg.Tasks { + if task.Name == "" { + mErr.Errors = append(mErr.Errors, fmt.Errorf("Task %d missing name", idx+1)) + } else if existing, ok := tasks[task.Name]; ok { + mErr.Errors = append(mErr.Errors, fmt.Errorf("Task %d redefines '%s' from task %d", idx+1, task.Name, existing+1)) + } else { + tasks[task.Name] = idx + } + + if task.Leader { + leaderTasks++ + } + + if task.Resources == nil { + continue + } + + for _, net := range task.Resources.Networks { + for _, port := range net.ReservedPorts { + if other, ok := staticPorts[port.Value]; ok { + err := fmt.Errorf("Static port %d already reserved by %s", port.Value, other) + mErr.Errors = append(mErr.Errors, err) + } else { + staticPorts[port.Value] = fmt.Sprintf("%s:%s", task.Name, port.Label) + } + } + } + } + + if leaderTasks > 1 { + mErr.Errors = append(mErr.Errors, fmt.Errorf("Only one task may be marked as leader")) + } + + // Validate the tasks + for _, task := range tg.Tasks { + if err := task.Validate(tg.EphemeralDisk); err != nil { + outer := fmt.Errorf("Task %s validation failed: %v", task.Name, err) + mErr.Errors = append(mErr.Errors, outer) + } + } + return mErr.ErrorOrNil() +} + +// Warnings returns a list of warnings that may be from dubious settings or +// deprecation warnings. +func (tg *TaskGroup) Warnings(j *Job) error { + var mErr multierror.Error + + // Validate the update strategy + if u := tg.Update; u != nil { + // Check the counts are appropriate + if u.MaxParallel > tg.Count { + mErr.Errors = append(mErr.Errors, + fmt.Errorf("Update max parallel count is greater than task group count (%d > %d). "+ + "A destructive change would result in the simultaneous replacement of all allocations.", u.MaxParallel, tg.Count)) + } + } + + return mErr.ErrorOrNil() +} + +// LookupTask finds a task by name +func (tg *TaskGroup) LookupTask(name string) *Task { + for _, t := range tg.Tasks { + if t.Name == name { + return t + } + } + return nil +} + +func (tg *TaskGroup) GoString() string { + return fmt.Sprintf("*%#v", *tg) +} + +// CheckRestart describes if and when a task should be restarted based on +// failing health checks. +type CheckRestart struct { + Limit int // Restart task after this many unhealthy intervals + Grace time.Duration // Grace time to give tasks after starting to get healthy + IgnoreWarnings bool // If true treat checks in `warning` as passing +} + +func (c *CheckRestart) Copy() *CheckRestart { + if c == nil { + return nil + } + + nc := new(CheckRestart) + *nc = *c + return nc +} + +func (c *CheckRestart) Validate() error { + if c == nil { + return nil + } + + var mErr multierror.Error + if c.Limit < 0 { + mErr.Errors = append(mErr.Errors, fmt.Errorf("limit must be greater than or equal to 0 but found %d", c.Limit)) + } + + if c.Grace < 0 { + mErr.Errors = append(mErr.Errors, fmt.Errorf("grace period must be greater than or equal to 0 but found %d", c.Grace)) + } + + return mErr.ErrorOrNil() +} + +const ( + ServiceCheckHTTP = "http" + ServiceCheckTCP = "tcp" + ServiceCheckScript = "script" + + // minCheckInterval is the minimum check interval permitted. Consul + // currently has its MinInterval set to 1s. Mirror that here for + // consistency. + minCheckInterval = 1 * time.Second + + // minCheckTimeout is the minimum check timeout permitted for Consul + // script TTL checks. + minCheckTimeout = 1 * time.Second +) + +// The ServiceCheck data model represents the consul health check that +// Nomad registers for a Task +type ServiceCheck struct { + Name string // Name of the check, defaults to id + Type string // Type of the check - tcp, http, docker and script + Command string // Command is the command to run for script checks + Args []string // Args is a list of argumes for script checks + Path string // path of the health check url for http type check + Protocol string // Protocol to use if check is http, defaults to http + PortLabel string // The port to use for tcp/http checks + Interval time.Duration // Interval of the check + Timeout time.Duration // Timeout of the response from the check before consul fails the check + InitialStatus string // Initial status of the check + TLSSkipVerify bool // Skip TLS verification when Protocol=https + Method string // HTTP Method to use (GET by default) + Header map[string][]string // HTTP Headers for Consul to set when making HTTP checks + CheckRestart *CheckRestart // If and when a task should be restarted based on checks +} + +func (sc *ServiceCheck) Copy() *ServiceCheck { + if sc == nil { + return nil + } + nsc := new(ServiceCheck) + *nsc = *sc + nsc.Args = helper.CopySliceString(sc.Args) + nsc.Header = helper.CopyMapStringSliceString(sc.Header) + nsc.CheckRestart = sc.CheckRestart.Copy() + return nsc +} + +func (sc *ServiceCheck) Canonicalize(serviceName string) { + // Ensure empty maps/slices are treated as null to avoid scheduling + // issues when using DeepEquals. + if len(sc.Args) == 0 { + sc.Args = nil + } + + if len(sc.Header) == 0 { + sc.Header = nil + } else { + for k, v := range sc.Header { + if len(v) == 0 { + sc.Header[k] = nil + } + } + } + + if sc.Name == "" { + sc.Name = fmt.Sprintf("service: %q check", serviceName) + } +} + +// validate a Service's ServiceCheck +func (sc *ServiceCheck) validate() error { + switch strings.ToLower(sc.Type) { + case ServiceCheckTCP: + case ServiceCheckHTTP: + if sc.Path == "" { + return fmt.Errorf("http type must have a valid http path") + } + + case ServiceCheckScript: + if sc.Command == "" { + return fmt.Errorf("script type must have a valid script path") + } + default: + return fmt.Errorf(`invalid type (%+q), must be one of "http", "tcp", or "script" type`, sc.Type) + } + + if sc.Interval == 0 { + return fmt.Errorf("missing required value interval. Interval cannot be less than %v", minCheckInterval) + } else if sc.Interval < minCheckInterval { + return fmt.Errorf("interval (%v) cannot be lower than %v", sc.Interval, minCheckInterval) + } + + if sc.Timeout == 0 { + return fmt.Errorf("missing required value timeout. Timeout cannot be less than %v", minCheckInterval) + } else if sc.Timeout < minCheckTimeout { + return fmt.Errorf("timeout (%v) is lower than required minimum timeout %v", sc.Timeout, minCheckInterval) + } + + switch sc.InitialStatus { + case "": + // case api.HealthUnknown: TODO: Add when Consul releases 0.7.1 + case api.HealthPassing: + case api.HealthWarning: + case api.HealthCritical: + default: + return fmt.Errorf(`invalid initial check state (%s), must be one of %q, %q, %q or empty`, sc.InitialStatus, api.HealthPassing, api.HealthWarning, api.HealthCritical) + + } + + return sc.CheckRestart.Validate() +} + +// RequiresPort returns whether the service check requires the task has a port. +func (sc *ServiceCheck) RequiresPort() bool { + switch sc.Type { + case ServiceCheckHTTP, ServiceCheckTCP: + return true + default: + return false + } +} + +// TriggersRestarts returns true if this check should be watched and trigger a restart +// on failure. +func (sc *ServiceCheck) TriggersRestarts() bool { + return sc.CheckRestart != nil && sc.CheckRestart.Limit > 0 +} + +// Hash all ServiceCheck fields and the check's corresponding service ID to +// create an identifier. The identifier is not guaranteed to be unique as if +// the PortLabel is blank, the Service's PortLabel will be used after Hash is +// called. +func (sc *ServiceCheck) Hash(serviceID string) string { + h := sha1.New() + io.WriteString(h, serviceID) + io.WriteString(h, sc.Name) + io.WriteString(h, sc.Type) + io.WriteString(h, sc.Command) + io.WriteString(h, strings.Join(sc.Args, "")) + io.WriteString(h, sc.Path) + io.WriteString(h, sc.Protocol) + io.WriteString(h, sc.PortLabel) + io.WriteString(h, sc.Interval.String()) + io.WriteString(h, sc.Timeout.String()) + io.WriteString(h, sc.Method) + // Only include TLSSkipVerify if set to maintain ID stability with Nomad <0.6 + if sc.TLSSkipVerify { + io.WriteString(h, "true") + } + + // Since map iteration order isn't stable we need to write k/v pairs to + // a slice and sort it before hashing. + if len(sc.Header) > 0 { + headers := make([]string, 0, len(sc.Header)) + for k, v := range sc.Header { + headers = append(headers, k+strings.Join(v, "")) + } + sort.Strings(headers) + io.WriteString(h, strings.Join(headers, "")) + } + + return fmt.Sprintf("%x", h.Sum(nil)) +} + +const ( + AddressModeAuto = "auto" + AddressModeHost = "host" + AddressModeDriver = "driver" +) + +// Service represents a Consul service definition in Nomad +type Service struct { + // Name of the service registered with Consul. Consul defaults the + // Name to ServiceID if not specified. The Name if specified is used + // as one of the seed values when generating a Consul ServiceID. + Name string + + // PortLabel is either the numeric port number or the `host:port`. + // To specify the port number using the host's Consul Advertise + // address, specify an empty host in the PortLabel (e.g. `:port`). + PortLabel string + + // AddressMode specifies whether or not to use the host ip:port for + // this service. + AddressMode string + + Tags []string // List of tags for the service + Checks []*ServiceCheck // List of checks associated with the service +} + +func (s *Service) Copy() *Service { + if s == nil { + return nil + } + ns := new(Service) + *ns = *s + ns.Tags = helper.CopySliceString(ns.Tags) + + if s.Checks != nil { + checks := make([]*ServiceCheck, len(ns.Checks)) + for i, c := range ns.Checks { + checks[i] = c.Copy() + } + ns.Checks = checks + } + + return ns +} + +// Canonicalize interpolates values of Job, Task Group and Task in the Service +// Name. This also generates check names, service id and check ids. +func (s *Service) Canonicalize(job string, taskGroup string, task string) { + // Ensure empty lists are treated as null to avoid scheduler issues when + // using DeepEquals + if len(s.Tags) == 0 { + s.Tags = nil + } + if len(s.Checks) == 0 { + s.Checks = nil + } + + s.Name = args.ReplaceEnv(s.Name, map[string]string{ + "JOB": job, + "TASKGROUP": taskGroup, + "TASK": task, + "BASE": fmt.Sprintf("%s-%s-%s", job, taskGroup, task), + }, + ) + + for _, check := range s.Checks { + check.Canonicalize(s.Name) + } +} + +// Validate checks if the Check definition is valid +func (s *Service) Validate() error { + var mErr multierror.Error + + // Ensure the service name is valid per the below RFCs but make an exception + // for our interpolation syntax + // RFC-952 §1 (https://tools.ietf.org/html/rfc952), RFC-1123 §2.1 + // (https://tools.ietf.org/html/rfc1123), and RFC-2782 + // (https://tools.ietf.org/html/rfc2782). + re := regexp.MustCompile(`^(?i:[a-z0-9]|[a-z0-9\$][a-zA-Z0-9\-\$\{\}\_\.]*[a-z0-9\}])$`) + if !re.MatchString(s.Name) { + mErr.Errors = append(mErr.Errors, fmt.Errorf("service name must be valid per RFC 1123 and can contain only alphanumeric characters or dashes: %q", s.Name)) + } + + switch s.AddressMode { + case "", AddressModeAuto, AddressModeHost, AddressModeDriver: + // OK + default: + mErr.Errors = append(mErr.Errors, fmt.Errorf("service address_mode must be %q, %q, or %q; not %q", AddressModeAuto, AddressModeHost, AddressModeDriver, s.AddressMode)) + } + + for _, c := range s.Checks { + if s.PortLabel == "" && c.RequiresPort() { + mErr.Errors = append(mErr.Errors, fmt.Errorf("check %s invalid: check requires a port but the service %+q has no port", c.Name, s.Name)) + continue + } + + if err := c.validate(); err != nil { + mErr.Errors = append(mErr.Errors, fmt.Errorf("check %s invalid: %v", c.Name, err)) + } + } + + return mErr.ErrorOrNil() +} + +// ValidateName checks if the services Name is valid and should be called after +// the name has been interpolated +func (s *Service) ValidateName(name string) error { + // Ensure the service name is valid per RFC-952 §1 + // (https://tools.ietf.org/html/rfc952), RFC-1123 §2.1 + // (https://tools.ietf.org/html/rfc1123), and RFC-2782 + // (https://tools.ietf.org/html/rfc2782). + re := regexp.MustCompile(`^(?i:[a-z0-9]|[a-z0-9][a-z0-9\-]{0,61}[a-z0-9])$`) + if !re.MatchString(name) { + return fmt.Errorf("service name must be valid per RFC 1123 and can contain only alphanumeric characters or dashes and must be no longer than 63 characters: %q", name) + } + return nil +} + +// Hash calculates the hash of the check based on it's content and the service +// which owns it +func (s *Service) Hash() string { + h := sha1.New() + io.WriteString(h, s.Name) + io.WriteString(h, strings.Join(s.Tags, "")) + io.WriteString(h, s.PortLabel) + io.WriteString(h, s.AddressMode) + return fmt.Sprintf("%x", h.Sum(nil)) +} + +const ( + // DefaultKillTimeout is the default timeout between signaling a task it + // will be killed and killing it. + DefaultKillTimeout = 5 * time.Second +) + +// LogConfig provides configuration for log rotation +type LogConfig struct { + MaxFiles int + MaxFileSizeMB int +} + +// DefaultLogConfig returns the default LogConfig values. +func DefaultLogConfig() *LogConfig { + return &LogConfig{ + MaxFiles: 10, + MaxFileSizeMB: 10, + } +} + +// Validate returns an error if the log config specified are less than +// the minimum allowed. +func (l *LogConfig) Validate() error { + var mErr multierror.Error + if l.MaxFiles < 1 { + mErr.Errors = append(mErr.Errors, fmt.Errorf("minimum number of files is 1; got %d", l.MaxFiles)) + } + if l.MaxFileSizeMB < 1 { + mErr.Errors = append(mErr.Errors, fmt.Errorf("minimum file size is 1MB; got %d", l.MaxFileSizeMB)) + } + return mErr.ErrorOrNil() +} + +// Task is a single process typically that is executed as part of a task group. +type Task struct { + // Name of the task + Name string + + // Driver is used to control which driver is used + Driver string + + // User is used to determine which user will run the task. It defaults to + // the same user the Nomad client is being run as. + User string + + // Config is provided to the driver to initialize + Config map[string]interface{} + + // Map of environment variables to be used by the driver + Env map[string]string + + // List of service definitions exposed by the Task + Services []*Service + + // Vault is used to define the set of Vault policies that this task should + // have access to. + Vault *Vault + + // Templates are the set of templates to be rendered for the task. + Templates []*Template + + // Constraints can be specified at a task level and apply only to + // the particular task. + Constraints []*Constraint + + // Resources is the resources needed by this task + Resources *Resources + + // DispatchPayload configures how the task retrieves its input from a dispatch + DispatchPayload *DispatchPayloadConfig + + // Meta is used to associate arbitrary metadata with this + // task. This is opaque to Nomad. + Meta map[string]string + + // KillTimeout is the time between signaling a task that it will be + // killed and killing it. + KillTimeout time.Duration + + // LogConfig provides configuration for log rotation + LogConfig *LogConfig + + // Artifacts is a list of artifacts to download and extract before running + // the task. + Artifacts []*TaskArtifact + + // Leader marks the task as the leader within the group. When the leader + // task exits, other tasks will be gracefully terminated. + Leader bool + + // ShutdownDelay is the duration of the delay between deregistering a + // task from Consul and sending it a signal to shutdown. See #2441 + ShutdownDelay time.Duration +} + +func (t *Task) Copy() *Task { + if t == nil { + return nil + } + nt := new(Task) + *nt = *t + nt.Env = helper.CopyMapStringString(nt.Env) + + if t.Services != nil { + services := make([]*Service, len(nt.Services)) + for i, s := range nt.Services { + services[i] = s.Copy() + } + nt.Services = services + } + + nt.Constraints = CopySliceConstraints(nt.Constraints) + + nt.Vault = nt.Vault.Copy() + nt.Resources = nt.Resources.Copy() + nt.Meta = helper.CopyMapStringString(nt.Meta) + nt.DispatchPayload = nt.DispatchPayload.Copy() + + if t.Artifacts != nil { + artifacts := make([]*TaskArtifact, 0, len(t.Artifacts)) + for _, a := range nt.Artifacts { + artifacts = append(artifacts, a.Copy()) + } + nt.Artifacts = artifacts + } + + if i, err := copystructure.Copy(nt.Config); err != nil { + panic(err.Error()) + } else { + nt.Config = i.(map[string]interface{}) + } + + if t.Templates != nil { + templates := make([]*Template, len(t.Templates)) + for i, tmpl := range nt.Templates { + templates[i] = tmpl.Copy() + } + nt.Templates = templates + } + + return nt +} + +// Canonicalize canonicalizes fields in the task. +func (t *Task) Canonicalize(job *Job, tg *TaskGroup) { + // Ensure that an empty and nil map are treated the same to avoid scheduling + // problems since we use reflect DeepEquals. + if len(t.Meta) == 0 { + t.Meta = nil + } + if len(t.Config) == 0 { + t.Config = nil + } + if len(t.Env) == 0 { + t.Env = nil + } + + for _, service := range t.Services { + service.Canonicalize(job.Name, tg.Name, t.Name) + } + + // If Resources are nil initialize them to defaults, otherwise canonicalize + if t.Resources == nil { + t.Resources = DefaultResources() + } else { + t.Resources.Canonicalize() + } + + // Set the default timeout if it is not specified. + if t.KillTimeout == 0 { + t.KillTimeout = DefaultKillTimeout + } + + if t.Vault != nil { + t.Vault.Canonicalize() + } + + for _, template := range t.Templates { + template.Canonicalize() + } +} + +func (t *Task) GoString() string { + return fmt.Sprintf("*%#v", *t) +} + +// Validate is used to sanity check a task +func (t *Task) Validate(ephemeralDisk *EphemeralDisk) error { + var mErr multierror.Error + if t.Name == "" { + mErr.Errors = append(mErr.Errors, errors.New("Missing task name")) + } + if strings.ContainsAny(t.Name, `/\`) { + // We enforce this so that when creating the directory on disk it will + // not have any slashes. + mErr.Errors = append(mErr.Errors, errors.New("Task name cannot include slashes")) + } + if t.Driver == "" { + mErr.Errors = append(mErr.Errors, errors.New("Missing task driver")) + } + if t.KillTimeout < 0 { + mErr.Errors = append(mErr.Errors, errors.New("KillTimeout must be a positive value")) + } + if t.ShutdownDelay < 0 { + mErr.Errors = append(mErr.Errors, errors.New("ShutdownDelay must be a positive value")) + } + + // Validate the resources. + if t.Resources == nil { + mErr.Errors = append(mErr.Errors, errors.New("Missing task resources")) + } else { + if err := t.Resources.MeetsMinResources(); err != nil { + mErr.Errors = append(mErr.Errors, err) + } + + // Ensure the task isn't asking for disk resources + if t.Resources.DiskMB > 0 { + mErr.Errors = append(mErr.Errors, errors.New("Task can't ask for disk resources, they have to be specified at the task group level.")) + } + } + + // Validate the log config + if t.LogConfig == nil { + mErr.Errors = append(mErr.Errors, errors.New("Missing Log Config")) + } else if err := t.LogConfig.Validate(); err != nil { + mErr.Errors = append(mErr.Errors, err) + } + + for idx, constr := range t.Constraints { + if err := constr.Validate(); err != nil { + outer := fmt.Errorf("Constraint %d validation failed: %s", idx+1, err) + mErr.Errors = append(mErr.Errors, outer) + } + + switch constr.Operand { + case ConstraintDistinctHosts, ConstraintDistinctProperty: + outer := fmt.Errorf("Constraint %d has disallowed Operand at task level: %s", idx+1, constr.Operand) + mErr.Errors = append(mErr.Errors, outer) + } + } + + // Validate Services + if err := validateServices(t); err != nil { + mErr.Errors = append(mErr.Errors, err) + } + + if t.LogConfig != nil && ephemeralDisk != nil { + logUsage := (t.LogConfig.MaxFiles * t.LogConfig.MaxFileSizeMB) + if ephemeralDisk.SizeMB <= logUsage { + mErr.Errors = append(mErr.Errors, + fmt.Errorf("log storage (%d MB) must be less than requested disk capacity (%d MB)", + logUsage, ephemeralDisk.SizeMB)) + } + } + + for idx, artifact := range t.Artifacts { + if err := artifact.Validate(); err != nil { + outer := fmt.Errorf("Artifact %d validation failed: %v", idx+1, err) + mErr.Errors = append(mErr.Errors, outer) + } + } + + if t.Vault != nil { + if err := t.Vault.Validate(); err != nil { + mErr.Errors = append(mErr.Errors, fmt.Errorf("Vault validation failed: %v", err)) + } + } + + destinations := make(map[string]int, len(t.Templates)) + for idx, tmpl := range t.Templates { + if err := tmpl.Validate(); err != nil { + outer := fmt.Errorf("Template %d validation failed: %s", idx+1, err) + mErr.Errors = append(mErr.Errors, outer) + } + + if other, ok := destinations[tmpl.DestPath]; ok { + outer := fmt.Errorf("Template %d has same destination as %d", idx+1, other) + mErr.Errors = append(mErr.Errors, outer) + } else { + destinations[tmpl.DestPath] = idx + 1 + } + } + + // Validate the dispatch payload block if there + if t.DispatchPayload != nil { + if err := t.DispatchPayload.Validate(); err != nil { + mErr.Errors = append(mErr.Errors, fmt.Errorf("Dispatch Payload validation failed: %v", err)) + } + } + + return mErr.ErrorOrNil() +} + +// validateServices takes a task and validates the services within it are valid +// and reference ports that exist. +func validateServices(t *Task) error { + var mErr multierror.Error + + // Ensure that services don't ask for non-existent ports and their names are + // unique. + servicePorts := make(map[string][]string) + knownServices := make(map[string]struct{}) + for i, service := range t.Services { + if err := service.Validate(); err != nil { + outer := fmt.Errorf("service[%d] %+q validation failed: %s", i, service.Name, err) + mErr.Errors = append(mErr.Errors, outer) + } + + // Ensure that services with the same name are not being registered for + // the same port + if _, ok := knownServices[service.Name+service.PortLabel]; ok { + mErr.Errors = append(mErr.Errors, fmt.Errorf("service %q is duplicate", service.Name)) + } + knownServices[service.Name+service.PortLabel] = struct{}{} + + if service.PortLabel != "" { + servicePorts[service.PortLabel] = append(servicePorts[service.PortLabel], service.Name) + } + + // Ensure that check names are unique. + knownChecks := make(map[string]struct{}) + for _, check := range service.Checks { + if _, ok := knownChecks[check.Name]; ok { + mErr.Errors = append(mErr.Errors, fmt.Errorf("check %q is duplicate", check.Name)) + } + knownChecks[check.Name] = struct{}{} + } + } + + // Get the set of port labels. + portLabels := make(map[string]struct{}) + if t.Resources != nil { + for _, network := range t.Resources.Networks { + ports := network.PortLabels() + for portLabel, _ := range ports { + portLabels[portLabel] = struct{}{} + } + } + } + + // Ensure all ports referenced in services exist. + for servicePort, services := range servicePorts { + _, ok := portLabels[servicePort] + if !ok { + joined := strings.Join(services, ", ") + err := fmt.Errorf("port label %q referenced by services %v does not exist", servicePort, joined) + mErr.Errors = append(mErr.Errors, err) + } + } + + // Ensure address mode is valid + return mErr.ErrorOrNil() +} + +const ( + // TemplateChangeModeNoop marks that no action should be taken if the + // template is re-rendered + TemplateChangeModeNoop = "noop" + + // TemplateChangeModeSignal marks that the task should be signaled if the + // template is re-rendered + TemplateChangeModeSignal = "signal" + + // TemplateChangeModeRestart marks that the task should be restarted if the + // template is re-rendered + TemplateChangeModeRestart = "restart" +) + +var ( + // TemplateChangeModeInvalidError is the error for when an invalid change + // mode is given + TemplateChangeModeInvalidError = errors.New("Invalid change mode. Must be one of the following: noop, signal, restart") +) + +// Template represents a template configuration to be rendered for a given task +type Template struct { + // SourcePath is the path to the template to be rendered + SourcePath string + + // DestPath is the path to where the template should be rendered + DestPath string + + // EmbeddedTmpl store the raw template. This is useful for smaller templates + // where they are embedded in the job file rather than sent as an artificat + EmbeddedTmpl string + + // ChangeMode indicates what should be done if the template is re-rendered + ChangeMode string + + // ChangeSignal is the signal that should be sent if the change mode + // requires it. + ChangeSignal string + + // Splay is used to avoid coordinated restarts of processes by applying a + // random wait between 0 and the given splay value before signalling the + // application of a change + Splay time.Duration + + // Perms is the permission the file should be written out with. + Perms string + + // LeftDelim and RightDelim are optional configurations to control what + // delimiter is utilized when parsing the template. + LeftDelim string + RightDelim string + + // Envvars enables exposing the template as environment variables + // instead of as a file. The template must be of the form: + // + // VAR_NAME_1={{ key service/my-key }} + // VAR_NAME_2=raw string and {{ env "attr.kernel.name" }} + // + // Lines will be split on the initial "=" with the first part being the + // key name and the second part the value. + // Empty lines and lines starting with # will be ignored, but to avoid + // escaping issues #s within lines will not be treated as comments. + Envvars bool + + // VaultGrace is the grace duration between lease renewal and reacquiring a + // secret. If the lease of a secret is less than the grace, a new secret is + // acquired. + VaultGrace time.Duration +} + +// DefaultTemplate returns a default template. +func DefaultTemplate() *Template { + return &Template{ + ChangeMode: TemplateChangeModeRestart, + Splay: 5 * time.Second, + Perms: "0644", + } +} + +func (t *Template) Copy() *Template { + if t == nil { + return nil + } + copy := new(Template) + *copy = *t + return copy +} + +func (t *Template) Canonicalize() { + if t.ChangeSignal != "" { + t.ChangeSignal = strings.ToUpper(t.ChangeSignal) + } +} + +func (t *Template) Validate() error { + var mErr multierror.Error + + // Verify we have something to render + if t.SourcePath == "" && t.EmbeddedTmpl == "" { + multierror.Append(&mErr, fmt.Errorf("Must specify a source path or have an embedded template")) + } + + // Verify we can render somewhere + if t.DestPath == "" { + multierror.Append(&mErr, fmt.Errorf("Must specify a destination for the template")) + } + + // Verify the destination doesn't escape + escaped, err := PathEscapesAllocDir("task", t.DestPath) + if err != nil { + mErr.Errors = append(mErr.Errors, fmt.Errorf("invalid destination path: %v", err)) + } else if escaped { + mErr.Errors = append(mErr.Errors, fmt.Errorf("destination escapes allocation directory")) + } + + // Verify a proper change mode + switch t.ChangeMode { + case TemplateChangeModeNoop, TemplateChangeModeRestart: + case TemplateChangeModeSignal: + if t.ChangeSignal == "" { + multierror.Append(&mErr, fmt.Errorf("Must specify signal value when change mode is signal")) + } + if t.Envvars { + multierror.Append(&mErr, fmt.Errorf("cannot use signals with env var templates")) + } + default: + multierror.Append(&mErr, TemplateChangeModeInvalidError) + } + + // Verify the splay is positive + if t.Splay < 0 { + multierror.Append(&mErr, fmt.Errorf("Must specify positive splay value")) + } + + // Verify the permissions + if t.Perms != "" { + if _, err := strconv.ParseUint(t.Perms, 8, 12); err != nil { + multierror.Append(&mErr, fmt.Errorf("Failed to parse %q as octal: %v", t.Perms, err)) + } + } + + if t.VaultGrace.Nanoseconds() < 0 { + multierror.Append(&mErr, fmt.Errorf("Vault grace must be greater than zero: %v < 0", t.VaultGrace)) + } + + return mErr.ErrorOrNil() +} + +// Set of possible states for a task. +const ( + TaskStatePending = "pending" // The task is waiting to be run. + TaskStateRunning = "running" // The task is currently running. + TaskStateDead = "dead" // Terminal state of task. +) + +// TaskState tracks the current state of a task and events that caused state +// transitions. +type TaskState struct { + // The current state of the task. + State string + + // Failed marks a task as having failed + Failed bool + + // Restarts is the number of times the task has restarted + Restarts uint64 + + // LastRestart is the time the task last restarted. It is updated each time the + // task restarts + LastRestart time.Time + + // StartedAt is the time the task is started. It is updated each time the + // task starts + StartedAt time.Time + + // FinishedAt is the time at which the task transistioned to dead and will + // not be started again. + FinishedAt time.Time + + // Series of task events that transition the state of the task. + Events []*TaskEvent +} + +func (ts *TaskState) Copy() *TaskState { + if ts == nil { + return nil + } + copy := new(TaskState) + *copy = *ts + + if ts.Events != nil { + copy.Events = make([]*TaskEvent, len(ts.Events)) + for i, e := range ts.Events { + copy.Events[i] = e.Copy() + } + } + return copy +} + +// Successful returns whether a task finished successfully. +func (ts *TaskState) Successful() bool { + l := len(ts.Events) + if ts.State != TaskStateDead || l == 0 { + return false + } + + e := ts.Events[l-1] + if e.Type != TaskTerminated { + return false + } + + return e.ExitCode == 0 +} + +const ( + // TaskSetupFailure indicates that the task could not be started due to a + // a setup failure. + TaskSetupFailure = "Setup Failure" + + // TaskDriveFailure indicates that the task could not be started due to a + // failure in the driver. + TaskDriverFailure = "Driver Failure" + + // TaskReceived signals that the task has been pulled by the client at the + // given timestamp. + TaskReceived = "Received" + + // TaskFailedValidation indicates the task was invalid and as such was not + // run. + TaskFailedValidation = "Failed Validation" + + // TaskStarted signals that the task was started and its timestamp can be + // used to determine the running length of the task. + TaskStarted = "Started" + + // TaskTerminated indicates that the task was started and exited. + TaskTerminated = "Terminated" + + // TaskKilling indicates a kill signal has been sent to the task. + TaskKilling = "Killing" + + // TaskKilled indicates a user has killed the task. + TaskKilled = "Killed" + + // TaskRestarting indicates that task terminated and is being restarted. + TaskRestarting = "Restarting" + + // TaskNotRestarting indicates that the task has failed and is not being + // restarted because it has exceeded its restart policy. + TaskNotRestarting = "Not Restarting" + + // TaskRestartSignal indicates that the task has been signalled to be + // restarted + TaskRestartSignal = "Restart Signaled" + + // TaskSignaling indicates that the task is being signalled. + TaskSignaling = "Signaling" + + // TaskDownloadingArtifacts means the task is downloading the artifacts + // specified in the task. + TaskDownloadingArtifacts = "Downloading Artifacts" + + // TaskArtifactDownloadFailed indicates that downloading the artifacts + // failed. + TaskArtifactDownloadFailed = "Failed Artifact Download" + + // TaskBuildingTaskDir indicates that the task directory/chroot is being + // built. + TaskBuildingTaskDir = "Building Task Directory" + + // TaskSetup indicates the task runner is setting up the task environment + TaskSetup = "Task Setup" + + // TaskDiskExceeded indicates that one of the tasks in a taskgroup has + // exceeded the requested disk resources. + TaskDiskExceeded = "Disk Resources Exceeded" + + // TaskSiblingFailed indicates that a sibling task in the task group has + // failed. + TaskSiblingFailed = "Sibling Task Failed" + + // TaskDriverMessage is an informational event message emitted by + // drivers such as when they're performing a long running action like + // downloading an image. + TaskDriverMessage = "Driver" + + // TaskLeaderDead indicates that the leader task within the has finished. + TaskLeaderDead = "Leader Task Dead" + + // TaskGenericMessage is used by various subsystems to emit a message. + TaskGenericMessage = "Generic" +) + +// TaskEvent is an event that effects the state of a task and contains meta-data +// appropriate to the events type. +type TaskEvent struct { + Type string + Time int64 // Unix Nanosecond timestamp + + // FailsTask marks whether this event fails the task + FailsTask bool + + // Restart fields. + RestartReason string + + // Setup Failure fields. + SetupError string + + // Driver Failure fields. + DriverError string // A driver error occurred while starting the task. + + // Task Terminated Fields. + ExitCode int // The exit code of the task. + Signal int // The signal that terminated the task. + Message string // A possible message explaining the termination of the task. + + // Killing fields + KillTimeout time.Duration + + // Task Killed Fields. + KillError string // Error killing the task. + + // KillReason is the reason the task was killed + KillReason string + + // TaskRestarting fields. + StartDelay int64 // The sleep period before restarting the task in unix nanoseconds. + + // Artifact Download fields + DownloadError string // Error downloading artifacts + + // Validation fields + ValidationError string // Validation error + + // The maximum allowed task disk size. + DiskLimit int64 + + // Name of the sibling task that caused termination of the task that + // the TaskEvent refers to. + FailedSibling string + + // VaultError is the error from token renewal + VaultError string + + // TaskSignalReason indicates the reason the task is being signalled. + TaskSignalReason string + + // TaskSignal is the signal that was sent to the task + TaskSignal string + + // DriverMessage indicates a driver action being taken. + DriverMessage string + + // GenericSource is the source of a message. + GenericSource string +} + +func (te *TaskEvent) GoString() string { + return fmt.Sprintf("%v - %v", te.Time, te.Type) +} + +// SetMessage sets the message of TaskEvent +func (te *TaskEvent) SetMessage(msg string) *TaskEvent { + te.Message = msg + return te +} + +func (te *TaskEvent) Copy() *TaskEvent { + if te == nil { + return nil + } + copy := new(TaskEvent) + *copy = *te + return copy +} + +func NewTaskEvent(event string) *TaskEvent { + return &TaskEvent{ + Type: event, + Time: time.Now().UnixNano(), + } +} + +// SetSetupError is used to store an error that occurred while setting up the +// task +func (e *TaskEvent) SetSetupError(err error) *TaskEvent { + if err != nil { + e.SetupError = err.Error() + } + return e +} + +func (e *TaskEvent) SetFailsTask() *TaskEvent { + e.FailsTask = true + return e +} + +func (e *TaskEvent) SetDriverError(err error) *TaskEvent { + if err != nil { + e.DriverError = err.Error() + } + return e +} + +func (e *TaskEvent) SetExitCode(c int) *TaskEvent { + e.ExitCode = c + return e +} + +func (e *TaskEvent) SetSignal(s int) *TaskEvent { + e.Signal = s + return e +} + +func (e *TaskEvent) SetExitMessage(err error) *TaskEvent { + if err != nil { + e.Message = err.Error() + } + return e +} + +func (e *TaskEvent) SetKillError(err error) *TaskEvent { + if err != nil { + e.KillError = err.Error() + } + return e +} + +func (e *TaskEvent) SetKillReason(r string) *TaskEvent { + e.KillReason = r + return e +} + +func (e *TaskEvent) SetRestartDelay(delay time.Duration) *TaskEvent { + e.StartDelay = int64(delay) + return e +} + +func (e *TaskEvent) SetRestartReason(reason string) *TaskEvent { + e.RestartReason = reason + return e +} + +func (e *TaskEvent) SetTaskSignalReason(r string) *TaskEvent { + e.TaskSignalReason = r + return e +} + +func (e *TaskEvent) SetTaskSignal(s os.Signal) *TaskEvent { + e.TaskSignal = s.String() + return e +} + +func (e *TaskEvent) SetDownloadError(err error) *TaskEvent { + if err != nil { + e.DownloadError = err.Error() + } + return e +} + +func (e *TaskEvent) SetValidationError(err error) *TaskEvent { + if err != nil { + e.ValidationError = err.Error() + } + return e +} + +func (e *TaskEvent) SetKillTimeout(timeout time.Duration) *TaskEvent { + e.KillTimeout = timeout + return e +} + +func (e *TaskEvent) SetDiskLimit(limit int64) *TaskEvent { + e.DiskLimit = limit + return e +} + +func (e *TaskEvent) SetFailedSibling(sibling string) *TaskEvent { + e.FailedSibling = sibling + return e +} + +func (e *TaskEvent) SetVaultRenewalError(err error) *TaskEvent { + if err != nil { + e.VaultError = err.Error() + } + return e +} + +func (e *TaskEvent) SetDriverMessage(m string) *TaskEvent { + e.DriverMessage = m + return e +} + +func (e *TaskEvent) SetGenericSource(s string) *TaskEvent { + e.GenericSource = s + return e +} + +// TaskArtifact is an artifact to download before running the task. +type TaskArtifact struct { + // GetterSource is the source to download an artifact using go-getter + GetterSource string + + // GetterOptions are options to use when downloading the artifact using + // go-getter. + GetterOptions map[string]string + + // GetterMode is the go-getter.ClientMode for fetching resources. + // Defaults to "any" but can be set to "file" or "dir". + GetterMode string + + // RelativeDest is the download destination given relative to the task's + // directory. + RelativeDest string +} + +func (ta *TaskArtifact) Copy() *TaskArtifact { + if ta == nil { + return nil + } + nta := new(TaskArtifact) + *nta = *ta + nta.GetterOptions = helper.CopyMapStringString(ta.GetterOptions) + return nta +} + +func (ta *TaskArtifact) GoString() string { + return fmt.Sprintf("%+v", ta) +} + +// PathEscapesAllocDir returns if the given path escapes the allocation +// directory. The prefix allows adding a prefix if the path will be joined, for +// example a "task/local" prefix may be provided if the path will be joined +// against that prefix. +func PathEscapesAllocDir(prefix, path string) (bool, error) { + // Verify the destination doesn't escape the tasks directory + alloc, err := filepath.Abs(filepath.Join("/", "alloc-dir/", "alloc-id/")) + if err != nil { + return false, err + } + abs, err := filepath.Abs(filepath.Join(alloc, prefix, path)) + if err != nil { + return false, err + } + rel, err := filepath.Rel(alloc, abs) + if err != nil { + return false, err + } + + return strings.HasPrefix(rel, ".."), nil +} + +func (ta *TaskArtifact) Validate() error { + // Verify the source + var mErr multierror.Error + if ta.GetterSource == "" { + mErr.Errors = append(mErr.Errors, fmt.Errorf("source must be specified")) + } + + switch ta.GetterMode { + case "": + // Default to any + ta.GetterMode = GetterModeAny + case GetterModeAny, GetterModeFile, GetterModeDir: + // Ok + default: + mErr.Errors = append(mErr.Errors, fmt.Errorf("invalid artifact mode %q; must be one of: %s, %s, %s", + ta.GetterMode, GetterModeAny, GetterModeFile, GetterModeDir)) + } + + escaped, err := PathEscapesAllocDir("task", ta.RelativeDest) + if err != nil { + mErr.Errors = append(mErr.Errors, fmt.Errorf("invalid destination path: %v", err)) + } else if escaped { + mErr.Errors = append(mErr.Errors, fmt.Errorf("destination escapes allocation directory")) + } + + // Verify the checksum + if check, ok := ta.GetterOptions["checksum"]; ok { + check = strings.TrimSpace(check) + if check == "" { + mErr.Errors = append(mErr.Errors, fmt.Errorf("checksum value cannot be empty")) + return mErr.ErrorOrNil() + } + + parts := strings.Split(check, ":") + if l := len(parts); l != 2 { + mErr.Errors = append(mErr.Errors, fmt.Errorf(`checksum must be given as "type:value"; got %q`, check)) + return mErr.ErrorOrNil() + } + + checksumVal := parts[1] + checksumBytes, err := hex.DecodeString(checksumVal) + if err != nil { + mErr.Errors = append(mErr.Errors, fmt.Errorf("invalid checksum: %v", err)) + return mErr.ErrorOrNil() + } + + checksumType := parts[0] + expectedLength := 0 + switch checksumType { + case "md5": + expectedLength = md5.Size + case "sha1": + expectedLength = sha1.Size + case "sha256": + expectedLength = sha256.Size + case "sha512": + expectedLength = sha512.Size + default: + mErr.Errors = append(mErr.Errors, fmt.Errorf("unsupported checksum type: %s", checksumType)) + return mErr.ErrorOrNil() + } + + if len(checksumBytes) != expectedLength { + mErr.Errors = append(mErr.Errors, fmt.Errorf("invalid %s checksum: %v", checksumType, checksumVal)) + return mErr.ErrorOrNil() + } + } + + return mErr.ErrorOrNil() +} + +const ( + ConstraintDistinctProperty = "distinct_property" + ConstraintDistinctHosts = "distinct_hosts" + ConstraintRegex = "regexp" + ConstraintVersion = "version" + ConstraintSetContains = "set_contains" +) + +// Constraints are used to restrict placement options. +type Constraint struct { + LTarget string // Left-hand target + RTarget string // Right-hand target + Operand string // Constraint operand (<=, <, =, !=, >, >=), contains, near + str string // Memoized string +} + +// Equal checks if two constraints are equal +func (c *Constraint) Equal(o *Constraint) bool { + return c.LTarget == o.LTarget && + c.RTarget == o.RTarget && + c.Operand == o.Operand +} + +func (c *Constraint) Copy() *Constraint { + if c == nil { + return nil + } + nc := new(Constraint) + *nc = *c + return nc +} + +func (c *Constraint) String() string { + if c.str != "" { + return c.str + } + c.str = fmt.Sprintf("%s %s %s", c.LTarget, c.Operand, c.RTarget) + return c.str +} + +func (c *Constraint) Validate() error { + var mErr multierror.Error + if c.Operand == "" { + mErr.Errors = append(mErr.Errors, errors.New("Missing constraint operand")) + } + + // requireLtarget specifies whether the constraint requires an LTarget to be + // provided. + requireLtarget := true + + // Perform additional validation based on operand + switch c.Operand { + case ConstraintDistinctHosts: + requireLtarget = false + case ConstraintSetContains: + if c.RTarget == "" { + mErr.Errors = append(mErr.Errors, fmt.Errorf("Set contains constraint requires an RTarget")) + } + case ConstraintRegex: + if _, err := regexp.Compile(c.RTarget); err != nil { + mErr.Errors = append(mErr.Errors, fmt.Errorf("Regular expression failed to compile: %v", err)) + } + case ConstraintVersion: + if _, err := version.NewConstraint(c.RTarget); err != nil { + mErr.Errors = append(mErr.Errors, fmt.Errorf("Version constraint is invalid: %v", err)) + } + case ConstraintDistinctProperty: + // If a count is set, make sure it is convertible to a uint64 + if c.RTarget != "" { + count, err := strconv.ParseUint(c.RTarget, 10, 64) + if err != nil { + mErr.Errors = append(mErr.Errors, fmt.Errorf("Failed to convert RTarget %q to uint64: %v", c.RTarget, err)) + } else if count < 1 { + mErr.Errors = append(mErr.Errors, fmt.Errorf("Distinct Property must have an allowed count of 1 or greater: %d < 1", count)) + } + } + case "=", "==", "is", "!=", "not", "<", "<=", ">", ">=": + if c.RTarget == "" { + mErr.Errors = append(mErr.Errors, fmt.Errorf("Operator %q requires an RTarget", c.Operand)) + } + default: + mErr.Errors = append(mErr.Errors, fmt.Errorf("Unknown constraint type %q", c.Operand)) + } + + // Ensure we have an LTarget for the constraints that need one + if requireLtarget && c.LTarget == "" { + mErr.Errors = append(mErr.Errors, fmt.Errorf("No LTarget provided but is required by constraint")) + } + + return mErr.ErrorOrNil() +} + +// EphemeralDisk is an ephemeral disk object +type EphemeralDisk struct { + // Sticky indicates whether the allocation is sticky to a node + Sticky bool + + // SizeMB is the size of the local disk + SizeMB int + + // Migrate determines if Nomad client should migrate the allocation dir for + // sticky allocations + Migrate bool +} + +// DefaultEphemeralDisk returns a EphemeralDisk with default configurations +func DefaultEphemeralDisk() *EphemeralDisk { + return &EphemeralDisk{ + SizeMB: 300, + } +} + +// Validate validates EphemeralDisk +func (d *EphemeralDisk) Validate() error { + if d.SizeMB < 10 { + return fmt.Errorf("minimum DiskMB value is 10; got %d", d.SizeMB) + } + return nil +} + +// Copy copies the EphemeralDisk struct and returns a new one +func (d *EphemeralDisk) Copy() *EphemeralDisk { + ld := new(EphemeralDisk) + *ld = *d + return ld +} + +const ( + // VaultChangeModeNoop takes no action when a new token is retrieved. + VaultChangeModeNoop = "noop" + + // VaultChangeModeSignal signals the task when a new token is retrieved. + VaultChangeModeSignal = "signal" + + // VaultChangeModeRestart restarts the task when a new token is retrieved. + VaultChangeModeRestart = "restart" +) + +// Vault stores the set of permissions a task needs access to from Vault. +type Vault struct { + // Policies is the set of policies that the task needs access to + Policies []string + + // Env marks whether the Vault Token should be exposed as an environment + // variable + Env bool + + // ChangeMode is used to configure the task's behavior when the Vault + // token changes because the original token could not be renewed in time. + ChangeMode string + + // ChangeSignal is the signal sent to the task when a new token is + // retrieved. This is only valid when using the signal change mode. + ChangeSignal string +} + +func DefaultVaultBlock() *Vault { + return &Vault{ + Env: true, + ChangeMode: VaultChangeModeRestart, + } +} + +// Copy returns a copy of this Vault block. +func (v *Vault) Copy() *Vault { + if v == nil { + return nil + } + + nv := new(Vault) + *nv = *v + return nv +} + +func (v *Vault) Canonicalize() { + if v.ChangeSignal != "" { + v.ChangeSignal = strings.ToUpper(v.ChangeSignal) + } +} + +// Validate returns if the Vault block is valid. +func (v *Vault) Validate() error { + if v == nil { + return nil + } + + var mErr multierror.Error + if len(v.Policies) == 0 { + multierror.Append(&mErr, fmt.Errorf("Policy list cannot be empty")) + } + + for _, p := range v.Policies { + if p == "root" { + multierror.Append(&mErr, fmt.Errorf("Can not specify \"root\" policy")) + } + } + + switch v.ChangeMode { + case VaultChangeModeSignal: + if v.ChangeSignal == "" { + multierror.Append(&mErr, fmt.Errorf("Signal must be specified when using change mode %q", VaultChangeModeSignal)) + } + case VaultChangeModeNoop, VaultChangeModeRestart: + default: + multierror.Append(&mErr, fmt.Errorf("Unknown change mode %q", v.ChangeMode)) + } + + return mErr.ErrorOrNil() +} + +const ( + // DeploymentStatuses are the various states a deployment can be be in + DeploymentStatusRunning = "running" + DeploymentStatusPaused = "paused" + DeploymentStatusFailed = "failed" + DeploymentStatusSuccessful = "successful" + DeploymentStatusCancelled = "cancelled" + + // DeploymentStatusDescriptions are the various descriptions of the states a + // deployment can be in. + DeploymentStatusDescriptionRunning = "Deployment is running" + DeploymentStatusDescriptionRunningNeedsPromotion = "Deployment is running but requires promotion" + DeploymentStatusDescriptionPaused = "Deployment is paused" + DeploymentStatusDescriptionSuccessful = "Deployment completed successfully" + DeploymentStatusDescriptionStoppedJob = "Cancelled because job is stopped" + DeploymentStatusDescriptionNewerJob = "Cancelled due to newer version of job" + DeploymentStatusDescriptionFailedAllocations = "Failed due to unhealthy allocations" + DeploymentStatusDescriptionFailedByUser = "Deployment marked as failed" +) + +// DeploymentStatusDescriptionRollback is used to get the status description of +// a deployment when rolling back to an older job. +func DeploymentStatusDescriptionRollback(baseDescription string, jobVersion uint64) string { + return fmt.Sprintf("%s - rolling back to job version %d", baseDescription, jobVersion) +} + +// DeploymentStatusDescriptionNoRollbackTarget is used to get the status description of +// a deployment when there is no target to rollback to but autorevet is desired. +func DeploymentStatusDescriptionNoRollbackTarget(baseDescription string) string { + return fmt.Sprintf("%s - no stable job version to auto revert to", baseDescription) +} + +// Deployment is the object that represents a job deployment which is used to +// transition a job between versions. +type Deployment struct { + // ID is a generated UUID for the deployment + ID string + + // Namespace is the namespace the deployment is created in + Namespace string + + // JobID is the job the deployment is created for + JobID string + + // JobVersion is the version of the job at which the deployment is tracking + JobVersion uint64 + + // JobModifyIndex is the modify index of the job at which the deployment is tracking + JobModifyIndex uint64 + + // JobCreateIndex is the create index of the job which the deployment is + // tracking. It is needed so that if the job gets stopped and reran we can + // present the correct list of deployments for the job and not old ones. + JobCreateIndex uint64 + + // TaskGroups is the set of task groups effected by the deployment and their + // current deployment status. + TaskGroups map[string]*DeploymentState + + // The status of the deployment + Status string + + // StatusDescription allows a human readable description of the deployment + // status. + StatusDescription string + + CreateIndex uint64 + ModifyIndex uint64 +} + +// NewDeployment creates a new deployment given the job. +func NewDeployment(job *Job) *Deployment { + return &Deployment{ + ID: GenerateUUID(), + Namespace: job.Namespace, + JobID: job.ID, + JobVersion: job.Version, + JobModifyIndex: job.ModifyIndex, + JobCreateIndex: job.CreateIndex, + Status: DeploymentStatusRunning, + StatusDescription: DeploymentStatusDescriptionRunning, + TaskGroups: make(map[string]*DeploymentState, len(job.TaskGroups)), + } +} + +func (d *Deployment) Copy() *Deployment { + if d == nil { + return nil + } + + c := &Deployment{} + *c = *d + + c.TaskGroups = nil + if l := len(d.TaskGroups); d.TaskGroups != nil { + c.TaskGroups = make(map[string]*DeploymentState, l) + for tg, s := range d.TaskGroups { + c.TaskGroups[tg] = s.Copy() + } + } + + return c +} + +// Active returns whether the deployment is active or terminal. +func (d *Deployment) Active() bool { + switch d.Status { + case DeploymentStatusRunning, DeploymentStatusPaused: + return true + default: + return false + } +} + +// GetID is a helper for getting the ID when the object may be nil +func (d *Deployment) GetID() string { + if d == nil { + return "" + } + return d.ID +} + +// HasPlacedCanaries returns whether the deployment has placed canaries +func (d *Deployment) HasPlacedCanaries() bool { + if d == nil || len(d.TaskGroups) == 0 { + return false + } + for _, group := range d.TaskGroups { + if len(group.PlacedCanaries) != 0 { + return true + } + } + return false +} + +// RequiresPromotion returns whether the deployment requires promotion to +// continue +func (d *Deployment) RequiresPromotion() bool { + if d == nil || len(d.TaskGroups) == 0 || d.Status != DeploymentStatusRunning { + return false + } + for _, group := range d.TaskGroups { + if group.DesiredCanaries > 0 && !group.Promoted { + return true + } + } + return false +} + +func (d *Deployment) GoString() string { + base := fmt.Sprintf("Deployment ID %q for job %q has status %q (%v):", d.ID, d.JobID, d.Status, d.StatusDescription) + for group, state := range d.TaskGroups { + base += fmt.Sprintf("\nTask Group %q has state:\n%#v", group, state) + } + return base +} + +// DeploymentState tracks the state of a deployment for a given task group. +type DeploymentState struct { + // AutoRevert marks whether the task group has indicated the job should be + // reverted on failure + AutoRevert bool + + // Promoted marks whether the canaries have been promoted + Promoted bool + + // PlacedCanaries is the set of placed canary allocations + PlacedCanaries []string + + // DesiredCanaries is the number of canaries that should be created. + DesiredCanaries int + + // DesiredTotal is the total number of allocations that should be created as + // part of the deployment. + DesiredTotal int + + // PlacedAllocs is the number of allocations that have been placed + PlacedAllocs int + + // HealthyAllocs is the number of allocations that have been marked healthy. + HealthyAllocs int + + // UnhealthyAllocs are allocations that have been marked as unhealthy. + UnhealthyAllocs int +} + +func (d *DeploymentState) GoString() string { + base := fmt.Sprintf("\tDesired Total: %d", d.DesiredTotal) + base += fmt.Sprintf("\n\tDesired Canaries: %d", d.DesiredCanaries) + base += fmt.Sprintf("\n\tPlaced Canaries: %#v", d.PlacedCanaries) + base += fmt.Sprintf("\n\tPromoted: %v", d.Promoted) + base += fmt.Sprintf("\n\tPlaced: %d", d.PlacedAllocs) + base += fmt.Sprintf("\n\tHealthy: %d", d.HealthyAllocs) + base += fmt.Sprintf("\n\tUnhealthy: %d", d.UnhealthyAllocs) + base += fmt.Sprintf("\n\tAutoRevert: %v", d.AutoRevert) + return base +} + +func (d *DeploymentState) Copy() *DeploymentState { + c := &DeploymentState{} + *c = *d + c.PlacedCanaries = helper.CopySliceString(d.PlacedCanaries) + return c +} + +// DeploymentStatusUpdate is used to update the status of a given deployment +type DeploymentStatusUpdate struct { + // DeploymentID is the ID of the deployment to update + DeploymentID string + + // Status is the new status of the deployment. + Status string + + // StatusDescription is the new status description of the deployment. + StatusDescription string +} + +const ( + AllocDesiredStatusRun = "run" // Allocation should run + AllocDesiredStatusStop = "stop" // Allocation should stop + AllocDesiredStatusEvict = "evict" // Allocation should stop, and was evicted +) + +const ( + AllocClientStatusPending = "pending" + AllocClientStatusRunning = "running" + AllocClientStatusComplete = "complete" + AllocClientStatusFailed = "failed" + AllocClientStatusLost = "lost" +) + +// Allocation is used to allocate the placement of a task group to a node. +type Allocation struct { + // ID of the allocation (UUID) + ID string + + // Namespace is the namespace the allocation is created in + Namespace string + + // ID of the evaluation that generated this allocation + EvalID string + + // Name is a logical name of the allocation. + Name string + + // NodeID is the node this is being placed on + NodeID string + + // Job is the parent job of the task group being allocated. + // This is copied at allocation time to avoid issues if the job + // definition is updated. + JobID string + Job *Job + + // TaskGroup is the name of the task group that should be run + TaskGroup string + + // Resources is the total set of resources allocated as part + // of this allocation of the task group. + Resources *Resources + + // SharedResources are the resources that are shared by all the tasks in an + // allocation + SharedResources *Resources + + // TaskResources is the set of resources allocated to each + // task. These should sum to the total Resources. + TaskResources map[string]*Resources + + // Metrics associated with this allocation + Metrics *AllocMetric + + // Desired Status of the allocation on the client + DesiredStatus string + + // DesiredStatusDescription is meant to provide more human useful information + DesiredDescription string + + // Status of the allocation on the client + ClientStatus string + + // ClientStatusDescription is meant to provide more human useful information + ClientDescription string + + // TaskStates stores the state of each task, + TaskStates map[string]*TaskState + + // PreviousAllocation is the allocation that this allocation is replacing + PreviousAllocation string + + // DeploymentID identifies an allocation as being created from a + // particular deployment + DeploymentID string + + // DeploymentStatus captures the status of the allocation as part of the + // given deployment + DeploymentStatus *AllocDeploymentStatus + + // Raft Indexes + CreateIndex uint64 + ModifyIndex uint64 + + // AllocModifyIndex is not updated when the client updates allocations. This + // lets the client pull only the allocs updated by the server. + AllocModifyIndex uint64 + + // CreateTime is the time the allocation has finished scheduling and been + // verified by the plan applier. + CreateTime int64 +} + +// Index returns the index of the allocation. If the allocation is from a task +// group with count greater than 1, there will be multiple allocations for it. +func (a *Allocation) Index() uint { + l := len(a.Name) + prefix := len(a.JobID) + len(a.TaskGroup) + 2 + if l <= 3 || l <= prefix { + return uint(0) + } + + strNum := a.Name[prefix : len(a.Name)-1] + num, _ := strconv.Atoi(strNum) + return uint(num) +} + +func (a *Allocation) Copy() *Allocation { + return a.copyImpl(true) +} + +// Copy provides a copy of the allocation but doesn't deep copy the job +func (a *Allocation) CopySkipJob() *Allocation { + return a.copyImpl(false) +} + +func (a *Allocation) copyImpl(job bool) *Allocation { + if a == nil { + return nil + } + na := new(Allocation) + *na = *a + + if job { + na.Job = na.Job.Copy() + } + + na.Resources = na.Resources.Copy() + na.SharedResources = na.SharedResources.Copy() + + if a.TaskResources != nil { + tr := make(map[string]*Resources, len(na.TaskResources)) + for task, resource := range na.TaskResources { + tr[task] = resource.Copy() + } + na.TaskResources = tr + } + + na.Metrics = na.Metrics.Copy() + na.DeploymentStatus = na.DeploymentStatus.Copy() + + if a.TaskStates != nil { + ts := make(map[string]*TaskState, len(na.TaskStates)) + for task, state := range na.TaskStates { + ts[task] = state.Copy() + } + na.TaskStates = ts + } + return na +} + +// TerminalStatus returns if the desired or actual status is terminal and +// will no longer transition. +func (a *Allocation) TerminalStatus() bool { + // First check the desired state and if that isn't terminal, check client + // state. + switch a.DesiredStatus { + case AllocDesiredStatusStop, AllocDesiredStatusEvict: + return true + default: + } + + switch a.ClientStatus { + case AllocClientStatusComplete, AllocClientStatusFailed, AllocClientStatusLost: + return true + default: + return false + } +} + +// Terminated returns if the allocation is in a terminal state on a client. +func (a *Allocation) Terminated() bool { + if a.ClientStatus == AllocClientStatusFailed || + a.ClientStatus == AllocClientStatusComplete || + a.ClientStatus == AllocClientStatusLost { + return true + } + return false +} + +// RanSuccessfully returns whether the client has ran the allocation and all +// tasks finished successfully +func (a *Allocation) RanSuccessfully() bool { + return a.ClientStatus == AllocClientStatusComplete +} + +// ShouldMigrate returns if the allocation needs data migration +func (a *Allocation) ShouldMigrate() bool { + if a.DesiredStatus == AllocDesiredStatusStop || a.DesiredStatus == AllocDesiredStatusEvict { + return false + } + + tg := a.Job.LookupTaskGroup(a.TaskGroup) + + // if the task group is nil or the ephemeral disk block isn't present then + // we won't migrate + if tg == nil || tg.EphemeralDisk == nil { + return false + } + + // We won't migrate any data is the user hasn't enabled migration or the + // disk is not marked as sticky + if !tg.EphemeralDisk.Migrate || !tg.EphemeralDisk.Sticky { + return false + } + + return true +} + +// Stub returns a list stub for the allocation +func (a *Allocation) Stub() *AllocListStub { + return &AllocListStub{ + ID: a.ID, + EvalID: a.EvalID, + Name: a.Name, + NodeID: a.NodeID, + JobID: a.JobID, + JobVersion: a.Job.Version, + TaskGroup: a.TaskGroup, + DesiredStatus: a.DesiredStatus, + DesiredDescription: a.DesiredDescription, + ClientStatus: a.ClientStatus, + ClientDescription: a.ClientDescription, + TaskStates: a.TaskStates, + DeploymentStatus: a.DeploymentStatus, + CreateIndex: a.CreateIndex, + ModifyIndex: a.ModifyIndex, + CreateTime: a.CreateTime, + } +} + +// AllocListStub is used to return a subset of alloc information +type AllocListStub struct { + ID string + EvalID string + Name string + NodeID string + JobID string + JobVersion uint64 + TaskGroup string + DesiredStatus string + DesiredDescription string + ClientStatus string + ClientDescription string + TaskStates map[string]*TaskState + DeploymentStatus *AllocDeploymentStatus + CreateIndex uint64 + ModifyIndex uint64 + CreateTime int64 +} + +// AllocMetric is used to track various metrics while attempting +// to make an allocation. These are used to debug a job, or to better +// understand the pressure within the system. +type AllocMetric struct { + // NodesEvaluated is the number of nodes that were evaluated + NodesEvaluated int + + // NodesFiltered is the number of nodes filtered due to a constraint + NodesFiltered int + + // NodesAvailable is the number of nodes available for evaluation per DC. + NodesAvailable map[string]int + + // ClassFiltered is the number of nodes filtered by class + ClassFiltered map[string]int + + // ConstraintFiltered is the number of failures caused by constraint + ConstraintFiltered map[string]int + + // NodesExhausted is the number of nodes skipped due to being + // exhausted of at least one resource + NodesExhausted int + + // ClassExhausted is the number of nodes exhausted by class + ClassExhausted map[string]int + + // DimensionExhausted provides the count by dimension or reason + DimensionExhausted map[string]int + + // Scores is the scores of the final few nodes remaining + // for placement. The top score is typically selected. + Scores map[string]float64 + + // AllocationTime is a measure of how long the allocation + // attempt took. This can affect performance and SLAs. + AllocationTime time.Duration + + // CoalescedFailures indicates the number of other + // allocations that were coalesced into this failed allocation. + // This is to prevent creating many failed allocations for a + // single task group. + CoalescedFailures int +} + +func (a *AllocMetric) Copy() *AllocMetric { + if a == nil { + return nil + } + na := new(AllocMetric) + *na = *a + na.NodesAvailable = helper.CopyMapStringInt(na.NodesAvailable) + na.ClassFiltered = helper.CopyMapStringInt(na.ClassFiltered) + na.ConstraintFiltered = helper.CopyMapStringInt(na.ConstraintFiltered) + na.ClassExhausted = helper.CopyMapStringInt(na.ClassExhausted) + na.DimensionExhausted = helper.CopyMapStringInt(na.DimensionExhausted) + na.Scores = helper.CopyMapStringFloat64(na.Scores) + return na +} + +func (a *AllocMetric) EvaluateNode() { + a.NodesEvaluated += 1 +} + +func (a *AllocMetric) FilterNode(node *Node, constraint string) { + a.NodesFiltered += 1 + if node != nil && node.NodeClass != "" { + if a.ClassFiltered == nil { + a.ClassFiltered = make(map[string]int) + } + a.ClassFiltered[node.NodeClass] += 1 + } + if constraint != "" { + if a.ConstraintFiltered == nil { + a.ConstraintFiltered = make(map[string]int) + } + a.ConstraintFiltered[constraint] += 1 + } +} + +func (a *AllocMetric) ExhaustedNode(node *Node, dimension string) { + a.NodesExhausted += 1 + if node != nil && node.NodeClass != "" { + if a.ClassExhausted == nil { + a.ClassExhausted = make(map[string]int) + } + a.ClassExhausted[node.NodeClass] += 1 + } + if dimension != "" { + if a.DimensionExhausted == nil { + a.DimensionExhausted = make(map[string]int) + } + a.DimensionExhausted[dimension] += 1 + } +} + +func (a *AllocMetric) ScoreNode(node *Node, name string, score float64) { + if a.Scores == nil { + a.Scores = make(map[string]float64) + } + key := fmt.Sprintf("%s.%s", node.ID, name) + a.Scores[key] = score +} + +// AllocDeploymentStatus captures the status of the allocation as part of the +// deployment. This can include things like if the allocation has been marked as +// heatlhy. +type AllocDeploymentStatus struct { + // Healthy marks whether the allocation has been marked healthy or unhealthy + // as part of a deployment. It can be unset if it has neither been marked + // healthy or unhealthy. + Healthy *bool + + // ModifyIndex is the raft index in which the deployment status was last + // changed. + ModifyIndex uint64 +} + +// IsHealthy returns if the allocation is marked as healthy as part of a +// deployment +func (a *AllocDeploymentStatus) IsHealthy() bool { + if a == nil { + return false + } + + return a.Healthy != nil && *a.Healthy +} + +// IsUnhealthy returns if the allocation is marked as unhealthy as part of a +// deployment +func (a *AllocDeploymentStatus) IsUnhealthy() bool { + if a == nil { + return false + } + + return a.Healthy != nil && !*a.Healthy +} + +func (a *AllocDeploymentStatus) Copy() *AllocDeploymentStatus { + if a == nil { + return nil + } + + c := new(AllocDeploymentStatus) + *c = *a + + if a.Healthy != nil { + c.Healthy = helper.BoolToPtr(*a.Healthy) + } + + return c +} + +const ( + EvalStatusBlocked = "blocked" + EvalStatusPending = "pending" + EvalStatusComplete = "complete" + EvalStatusFailed = "failed" + EvalStatusCancelled = "canceled" +) + +const ( + EvalTriggerJobRegister = "job-register" + EvalTriggerJobDeregister = "job-deregister" + EvalTriggerPeriodicJob = "periodic-job" + EvalTriggerNodeUpdate = "node-update" + EvalTriggerScheduled = "scheduled" + EvalTriggerRollingUpdate = "rolling-update" + EvalTriggerDeploymentWatcher = "deployment-watcher" + EvalTriggerFailedFollowUp = "failed-follow-up" + EvalTriggerMaxPlans = "max-plan-attempts" +) + +const ( + // CoreJobEvalGC is used for the garbage collection of evaluations + // and allocations. We periodically scan evaluations in a terminal state, + // in which all the corresponding allocations are also terminal. We + // delete these out of the system to bound the state. + CoreJobEvalGC = "eval-gc" + + // CoreJobNodeGC is used for the garbage collection of failed nodes. + // We periodically scan nodes in a terminal state, and if they have no + // corresponding allocations we delete these out of the system. + CoreJobNodeGC = "node-gc" + + // CoreJobJobGC is used for the garbage collection of eligible jobs. We + // periodically scan garbage collectible jobs and check if both their + // evaluations and allocations are terminal. If so, we delete these out of + // the system. + CoreJobJobGC = "job-gc" + + // CoreJobDeploymentGC is used for the garbage collection of eligible + // deployments. We periodically scan garbage collectible deployments and + // check if they are terminal. If so, we delete these out of the system. + CoreJobDeploymentGC = "deployment-gc" + + // CoreJobForceGC is used to force garbage collection of all GCable objects. + CoreJobForceGC = "force-gc" +) + +// Evaluation is used anytime we need to apply business logic as a result +// of a change to our desired state (job specification) or the emergent state +// (registered nodes). When the inputs change, we need to "evaluate" them, +// potentially taking action (allocation of work) or doing nothing if the state +// of the world does not require it. +type Evaluation struct { + // ID is a randonly generated UUID used for this evaluation. This + // is assigned upon the creation of the evaluation. + ID string + + // Namespace is the namespace the evaluation is created in + Namespace string + + // Priority is used to control scheduling importance and if this job + // can preempt other jobs. + Priority int + + // Type is used to control which schedulers are available to handle + // this evaluation. + Type string + + // TriggeredBy is used to give some insight into why this Eval + // was created. (Job change, node failure, alloc failure, etc). + TriggeredBy string + + // JobID is the job this evaluation is scoped to. Evaluations cannot + // be run in parallel for a given JobID, so we serialize on this. + JobID string + + // JobModifyIndex is the modify index of the job at the time + // the evaluation was created + JobModifyIndex uint64 + + // NodeID is the node that was affected triggering the evaluation. + NodeID string + + // NodeModifyIndex is the modify index of the node at the time + // the evaluation was created + NodeModifyIndex uint64 + + // DeploymentID is the ID of the deployment that triggered the evaluation. + DeploymentID string + + // Status of the evaluation + Status string + + // StatusDescription is meant to provide more human useful information + StatusDescription string + + // Wait is a minimum wait time for running the eval. This is used to + // support a rolling upgrade. + Wait time.Duration + + // NextEval is the evaluation ID for the eval created to do a followup. + // This is used to support rolling upgrades, where we need a chain of evaluations. + NextEval string + + // PreviousEval is the evaluation ID for the eval creating this one to do a followup. + // This is used to support rolling upgrades, where we need a chain of evaluations. + PreviousEval string + + // BlockedEval is the evaluation ID for a created blocked eval. A + // blocked eval will be created if all allocations could not be placed due + // to constraints or lacking resources. + BlockedEval string + + // FailedTGAllocs are task groups which have allocations that could not be + // made, but the metrics are persisted so that the user can use the feedback + // to determine the cause. + FailedTGAllocs map[string]*AllocMetric + + // ClassEligibility tracks computed node classes that have been explicitly + // marked as eligible or ineligible. + ClassEligibility map[string]bool + + // EscapedComputedClass marks whether the job has constraints that are not + // captured by computed node classes. + EscapedComputedClass bool + + // AnnotatePlan triggers the scheduler to provide additional annotations + // during the evaluation. This should not be set during normal operations. + AnnotatePlan bool + + // QueuedAllocations is the number of unplaced allocations at the time the + // evaluation was processed. The map is keyed by Task Group names. + QueuedAllocations map[string]int + + // SnapshotIndex is the Raft index of the snapshot used to process the + // evaluation. As such it will only be set once it has gone through the + // scheduler. + SnapshotIndex uint64 + + // Raft Indexes + CreateIndex uint64 + ModifyIndex uint64 +} + +// TerminalStatus returns if the current status is terminal and +// will no longer transition. +func (e *Evaluation) TerminalStatus() bool { + switch e.Status { + case EvalStatusComplete, EvalStatusFailed, EvalStatusCancelled: + return true + default: + return false + } +} + +func (e *Evaluation) GoString() string { + return fmt.Sprintf("", e.ID, e.JobID, e.Namespace) +} + +func (e *Evaluation) Copy() *Evaluation { + if e == nil { + return nil + } + ne := new(Evaluation) + *ne = *e + + // Copy ClassEligibility + if e.ClassEligibility != nil { + classes := make(map[string]bool, len(e.ClassEligibility)) + for class, elig := range e.ClassEligibility { + classes[class] = elig + } + ne.ClassEligibility = classes + } + + // Copy FailedTGAllocs + if e.FailedTGAllocs != nil { + failedTGs := make(map[string]*AllocMetric, len(e.FailedTGAllocs)) + for tg, metric := range e.FailedTGAllocs { + failedTGs[tg] = metric.Copy() + } + ne.FailedTGAllocs = failedTGs + } + + // Copy queued allocations + if e.QueuedAllocations != nil { + queuedAllocations := make(map[string]int, len(e.QueuedAllocations)) + for tg, num := range e.QueuedAllocations { + queuedAllocations[tg] = num + } + ne.QueuedAllocations = queuedAllocations + } + + return ne +} + +// ShouldEnqueue checks if a given evaluation should be enqueued into the +// eval_broker +func (e *Evaluation) ShouldEnqueue() bool { + switch e.Status { + case EvalStatusPending: + return true + case EvalStatusComplete, EvalStatusFailed, EvalStatusBlocked, EvalStatusCancelled: + return false + default: + panic(fmt.Sprintf("unhandled evaluation (%s) status %s", e.ID, e.Status)) + } +} + +// ShouldBlock checks if a given evaluation should be entered into the blocked +// eval tracker. +func (e *Evaluation) ShouldBlock() bool { + switch e.Status { + case EvalStatusBlocked: + return true + case EvalStatusComplete, EvalStatusFailed, EvalStatusPending, EvalStatusCancelled: + return false + default: + panic(fmt.Sprintf("unhandled evaluation (%s) status %s", e.ID, e.Status)) + } +} + +// MakePlan is used to make a plan from the given evaluation +// for a given Job +func (e *Evaluation) MakePlan(j *Job) *Plan { + p := &Plan{ + EvalID: e.ID, + Priority: e.Priority, + Job: j, + NodeUpdate: make(map[string][]*Allocation), + NodeAllocation: make(map[string][]*Allocation), + } + if j != nil { + p.AllAtOnce = j.AllAtOnce + } + return p +} + +// NextRollingEval creates an evaluation to followup this eval for rolling updates +func (e *Evaluation) NextRollingEval(wait time.Duration) *Evaluation { + return &Evaluation{ + ID: GenerateUUID(), + Namespace: e.Namespace, + Priority: e.Priority, + Type: e.Type, + TriggeredBy: EvalTriggerRollingUpdate, + JobID: e.JobID, + JobModifyIndex: e.JobModifyIndex, + Status: EvalStatusPending, + Wait: wait, + PreviousEval: e.ID, + } +} + +// CreateBlockedEval creates a blocked evaluation to followup this eval to place any +// failed allocations. It takes the classes marked explicitly eligible or +// ineligible and whether the job has escaped computed node classes. +func (e *Evaluation) CreateBlockedEval(classEligibility map[string]bool, escaped bool) *Evaluation { + return &Evaluation{ + ID: GenerateUUID(), + Namespace: e.Namespace, + Priority: e.Priority, + Type: e.Type, + TriggeredBy: e.TriggeredBy, + JobID: e.JobID, + JobModifyIndex: e.JobModifyIndex, + Status: EvalStatusBlocked, + PreviousEval: e.ID, + ClassEligibility: classEligibility, + EscapedComputedClass: escaped, + } +} + +// CreateFailedFollowUpEval creates a follow up evaluation when the current one +// has been marked as failed because it has hit the delivery limit and will not +// be retried by the eval_broker. +func (e *Evaluation) CreateFailedFollowUpEval(wait time.Duration) *Evaluation { + return &Evaluation{ + ID: GenerateUUID(), + Namespace: e.Namespace, + Priority: e.Priority, + Type: e.Type, + TriggeredBy: EvalTriggerFailedFollowUp, + JobID: e.JobID, + JobModifyIndex: e.JobModifyIndex, + Status: EvalStatusPending, + Wait: wait, + PreviousEval: e.ID, + } +} + +// Plan is used to submit a commit plan for task allocations. These +// are submitted to the leader which verifies that resources have +// not been overcommitted before admiting the plan. +type Plan struct { + // EvalID is the evaluation ID this plan is associated with + EvalID string + + // EvalToken is used to prevent a split-brain processing of + // an evaluation. There should only be a single scheduler running + // an Eval at a time, but this could be violated after a leadership + // transition. This unique token is used to reject plans that are + // being submitted from a different leader. + EvalToken string + + // Priority is the priority of the upstream job + Priority int + + // AllAtOnce is used to control if incremental scheduling of task groups + // is allowed or if we must do a gang scheduling of the entire job. + // If this is false, a plan may be partially applied. Otherwise, the + // entire plan must be able to make progress. + AllAtOnce bool + + // Job is the parent job of all the allocations in the Plan. + // Since a Plan only involves a single Job, we can reduce the size + // of the plan by only including it once. + Job *Job + + // NodeUpdate contains all the allocations for each node. For each node, + // this is a list of the allocations to update to either stop or evict. + NodeUpdate map[string][]*Allocation + + // NodeAllocation contains all the allocations for each node. + // The evicts must be considered prior to the allocations. + NodeAllocation map[string][]*Allocation + + // Annotations contains annotations by the scheduler to be used by operators + // to understand the decisions made by the scheduler. + Annotations *PlanAnnotations + + // Deployment is the deployment created or updated by the scheduler that + // should be applied by the planner. + Deployment *Deployment + + // DeploymentUpdates is a set of status updates to apply to the given + // deployments. This allows the scheduler to cancel any unneeded deployment + // because the job is stopped or the update block is removed. + DeploymentUpdates []*DeploymentStatusUpdate +} + +// AppendUpdate marks the allocation for eviction. The clientStatus of the +// allocation may be optionally set by passing in a non-empty value. +func (p *Plan) AppendUpdate(alloc *Allocation, desiredStatus, desiredDesc, clientStatus string) { + newAlloc := new(Allocation) + *newAlloc = *alloc + + // If the job is not set in the plan we are deregistering a job so we + // extract the job from the allocation. + if p.Job == nil && newAlloc.Job != nil { + p.Job = newAlloc.Job + } + + // Normalize the job + newAlloc.Job = nil + + // Strip the resources as it can be rebuilt. + newAlloc.Resources = nil + + newAlloc.DesiredStatus = desiredStatus + newAlloc.DesiredDescription = desiredDesc + + if clientStatus != "" { + newAlloc.ClientStatus = clientStatus + } + + node := alloc.NodeID + existing := p.NodeUpdate[node] + p.NodeUpdate[node] = append(existing, newAlloc) +} + +func (p *Plan) PopUpdate(alloc *Allocation) { + existing := p.NodeUpdate[alloc.NodeID] + n := len(existing) + if n > 0 && existing[n-1].ID == alloc.ID { + existing = existing[:n-1] + if len(existing) > 0 { + p.NodeUpdate[alloc.NodeID] = existing + } else { + delete(p.NodeUpdate, alloc.NodeID) + } + } +} + +func (p *Plan) AppendAlloc(alloc *Allocation) { + node := alloc.NodeID + existing := p.NodeAllocation[node] + p.NodeAllocation[node] = append(existing, alloc) +} + +// IsNoOp checks if this plan would do nothing +func (p *Plan) IsNoOp() bool { + return len(p.NodeUpdate) == 0 && + len(p.NodeAllocation) == 0 && + p.Deployment == nil && + len(p.DeploymentUpdates) == 0 +} + +// PlanResult is the result of a plan submitted to the leader. +type PlanResult struct { + // NodeUpdate contains all the updates that were committed. + NodeUpdate map[string][]*Allocation + + // NodeAllocation contains all the allocations that were committed. + NodeAllocation map[string][]*Allocation + + // Deployment is the deployment that was committed. + Deployment *Deployment + + // DeploymentUpdates is the set of deployment updates that were committed. + DeploymentUpdates []*DeploymentStatusUpdate + + // RefreshIndex is the index the worker should refresh state up to. + // This allows all evictions and allocations to be materialized. + // If any allocations were rejected due to stale data (node state, + // over committed) this can be used to force a worker refresh. + RefreshIndex uint64 + + // AllocIndex is the Raft index in which the evictions and + // allocations took place. This is used for the write index. + AllocIndex uint64 +} + +// IsNoOp checks if this plan result would do nothing +func (p *PlanResult) IsNoOp() bool { + return len(p.NodeUpdate) == 0 && len(p.NodeAllocation) == 0 && + len(p.DeploymentUpdates) == 0 && p.Deployment == nil +} + +// FullCommit is used to check if all the allocations in a plan +// were committed as part of the result. Returns if there was +// a match, and the number of expected and actual allocations. +func (p *PlanResult) FullCommit(plan *Plan) (bool, int, int) { + expected := 0 + actual := 0 + for name, allocList := range plan.NodeAllocation { + didAlloc, _ := p.NodeAllocation[name] + expected += len(allocList) + actual += len(didAlloc) + } + return actual == expected, expected, actual +} + +// PlanAnnotations holds annotations made by the scheduler to give further debug +// information to operators. +type PlanAnnotations struct { + // DesiredTGUpdates is the set of desired updates per task group. + DesiredTGUpdates map[string]*DesiredUpdates +} + +// DesiredUpdates is the set of changes the scheduler would like to make given +// sufficient resources and cluster capacity. +type DesiredUpdates struct { + Ignore uint64 + Place uint64 + Migrate uint64 + Stop uint64 + InPlaceUpdate uint64 + DestructiveUpdate uint64 + Canary uint64 +} + +func (d *DesiredUpdates) GoString() string { + return fmt.Sprintf("(place %d) (inplace %d) (destructive %d) (stop %d) (migrate %d) (ignore %d) (canary %d)", + d.Place, d.InPlaceUpdate, d.DestructiveUpdate, d.Stop, d.Migrate, d.Ignore, d.Canary) +} + +// msgpackHandle is a shared handle for encoding/decoding of structs +var MsgpackHandle = func() *codec.MsgpackHandle { + h := &codec.MsgpackHandle{RawToString: true} + + // Sets the default type for decoding a map into a nil interface{}. + // This is necessary in particular because we store the driver configs as a + // nil interface{}. + h.MapType = reflect.TypeOf(map[string]interface{}(nil)) + return h +}() + +var ( + // JsonHandle and JsonHandlePretty are the codec handles to JSON encode + // structs. The pretty handle will add indents for easier human consumption. + JsonHandle = &codec.JsonHandle{ + HTMLCharsAsIs: true, + } + JsonHandlePretty = &codec.JsonHandle{ + HTMLCharsAsIs: true, + Indent: 4, + } +) + +var HashiMsgpackHandle = func() *hcodec.MsgpackHandle { + h := &hcodec.MsgpackHandle{RawToString: true} + + // Sets the default type for decoding a map into a nil interface{}. + // This is necessary in particular because we store the driver configs as a + // nil interface{}. + h.MapType = reflect.TypeOf(map[string]interface{}(nil)) + return h +}() + +// Decode is used to decode a MsgPack encoded object +func Decode(buf []byte, out interface{}) error { + return codec.NewDecoder(bytes.NewReader(buf), MsgpackHandle).Decode(out) +} + +// Encode is used to encode a MsgPack object with type prefix +func Encode(t MessageType, msg interface{}) ([]byte, error) { + var buf bytes.Buffer + buf.WriteByte(uint8(t)) + err := codec.NewEncoder(&buf, MsgpackHandle).Encode(msg) + return buf.Bytes(), err +} + +// KeyringResponse is a unified key response and can be used for install, +// remove, use, as well as listing key queries. +type KeyringResponse struct { + Messages map[string]string + Keys map[string]int + NumNodes int +} + +// KeyringRequest is request objects for serf key operations. +type KeyringRequest struct { + Key string +} + +// RecoverableError wraps an error and marks whether it is recoverable and could +// be retried or it is fatal. +type RecoverableError struct { + Err string + Recoverable bool +} + +// NewRecoverableError is used to wrap an error and mark it as recoverable or +// not. +func NewRecoverableError(e error, recoverable bool) error { + if e == nil { + return nil + } + + return &RecoverableError{ + Err: e.Error(), + Recoverable: recoverable, + } +} + +// WrapRecoverable wraps an existing error in a new RecoverableError with a new +// message. If the error was recoverable before the returned error is as well; +// otherwise it is unrecoverable. +func WrapRecoverable(msg string, err error) error { + return &RecoverableError{Err: msg, Recoverable: IsRecoverable(err)} +} + +func (r *RecoverableError) Error() string { + return r.Err +} + +func (r *RecoverableError) IsRecoverable() bool { + return r.Recoverable +} + +// Recoverable is an interface for errors to implement to indicate whether or +// not they are fatal or recoverable. +type Recoverable interface { + error + IsRecoverable() bool +} + +// IsRecoverable returns true if error is a RecoverableError with +// Recoverable=true. Otherwise false is returned. +func IsRecoverable(e error) bool { + if re, ok := e.(Recoverable); ok { + return re.IsRecoverable() + } + return false +} + +// ACLPolicy is used to represent an ACL policy +type ACLPolicy struct { + Name string // Unique name + Description string // Human readable + Rules string // HCL or JSON format + Hash []byte + CreateIndex uint64 + ModifyIndex uint64 +} + +// SetHash is used to compute and set the hash of the ACL policy +func (c *ACLPolicy) SetHash() []byte { + // Initialize a 256bit Blake2 hash (32 bytes) + hash, err := blake2b.New256(nil) + if err != nil { + panic(err) + } + + // Write all the user set fields + hash.Write([]byte(c.Name)) + hash.Write([]byte(c.Description)) + hash.Write([]byte(c.Rules)) + + // Finalize the hash + hashVal := hash.Sum(nil) + + // Set and return the hash + c.Hash = hashVal + return hashVal +} + +func (a *ACLPolicy) Stub() *ACLPolicyListStub { + return &ACLPolicyListStub{ + Name: a.Name, + Description: a.Description, + Hash: a.Hash, + CreateIndex: a.CreateIndex, + ModifyIndex: a.ModifyIndex, + } +} + +func (a *ACLPolicy) Validate() error { + var mErr multierror.Error + if !validPolicyName.MatchString(a.Name) { + err := fmt.Errorf("invalid name '%s'", a.Name) + mErr.Errors = append(mErr.Errors, err) + } + if _, err := acl.Parse(a.Rules); err != nil { + err = fmt.Errorf("failed to parse rules: %v", err) + mErr.Errors = append(mErr.Errors, err) + } + if len(a.Description) > maxPolicyDescriptionLength { + err := fmt.Errorf("description longer than %d", maxPolicyDescriptionLength) + mErr.Errors = append(mErr.Errors, err) + } + return mErr.ErrorOrNil() +} + +// ACLPolicyListStub is used to for listing ACL policies +type ACLPolicyListStub struct { + Name string + Description string + Hash []byte + CreateIndex uint64 + ModifyIndex uint64 +} + +// ACLPolicyListRequest is used to request a list of policies +type ACLPolicyListRequest struct { + QueryOptions +} + +// ACLPolicySpecificRequest is used to query a specific policy +type ACLPolicySpecificRequest struct { + Name string + QueryOptions +} + +// ACLPolicySetRequest is used to query a set of policies +type ACLPolicySetRequest struct { + Names []string + QueryOptions +} + +// ACLPolicyListResponse is used for a list request +type ACLPolicyListResponse struct { + Policies []*ACLPolicyListStub + QueryMeta +} + +// SingleACLPolicyResponse is used to return a single policy +type SingleACLPolicyResponse struct { + Policy *ACLPolicy + QueryMeta +} + +// ACLPolicySetResponse is used to return a set of policies +type ACLPolicySetResponse struct { + Policies map[string]*ACLPolicy + QueryMeta +} + +// ACLPolicyDeleteRequest is used to delete a set of policies +type ACLPolicyDeleteRequest struct { + Names []string + WriteRequest +} + +// ACLPolicyUpsertRequest is used to upsert a set of policies +type ACLPolicyUpsertRequest struct { + Policies []*ACLPolicy + WriteRequest +} + +// ACLToken represents a client token which is used to Authenticate +type ACLToken struct { + AccessorID string // Public Accessor ID (UUID) + SecretID string // Secret ID, private (UUID) + Name string // Human friendly name + Type string // Client or Management + Policies []string // Policies this token ties to + Global bool // Global or Region local + Hash []byte + CreateTime time.Time // Time of creation + CreateIndex uint64 + ModifyIndex uint64 +} + +var ( + // AnonymousACLToken is used no SecretID is provided, and the + // request is made anonymously. + AnonymousACLToken = &ACLToken{ + AccessorID: "anonymous", + Name: "Anonymous Token", + Type: ACLClientToken, + Policies: []string{"anonymous"}, + Global: false, + } +) + +type ACLTokenListStub struct { + AccessorID string + Name string + Type string + Policies []string + Global bool + Hash []byte + CreateTime time.Time + CreateIndex uint64 + ModifyIndex uint64 +} + +// SetHash is used to compute and set the hash of the ACL token +func (a *ACLToken) SetHash() []byte { + // Initialize a 256bit Blake2 hash (32 bytes) + hash, err := blake2b.New256(nil) + if err != nil { + panic(err) + } + + // Write all the user set fields + hash.Write([]byte(a.Name)) + hash.Write([]byte(a.Type)) + for _, policyName := range a.Policies { + hash.Write([]byte(policyName)) + } + if a.Global { + hash.Write([]byte("global")) + } else { + hash.Write([]byte("local")) + } + + // Finalize the hash + hashVal := hash.Sum(nil) + + // Set and return the hash + a.Hash = hashVal + return hashVal +} + +func (a *ACLToken) Stub() *ACLTokenListStub { + return &ACLTokenListStub{ + AccessorID: a.AccessorID, + Name: a.Name, + Type: a.Type, + Policies: a.Policies, + Global: a.Global, + Hash: a.Hash, + CreateTime: a.CreateTime, + CreateIndex: a.CreateIndex, + ModifyIndex: a.ModifyIndex, + } +} + +// Validate is used to sanity check a token +func (a *ACLToken) Validate() error { + var mErr multierror.Error + if len(a.Name) > maxTokenNameLength { + mErr.Errors = append(mErr.Errors, fmt.Errorf("token name too long")) + } + switch a.Type { + case ACLClientToken: + if len(a.Policies) == 0 { + mErr.Errors = append(mErr.Errors, fmt.Errorf("client token missing policies")) + } + case ACLManagementToken: + if len(a.Policies) != 0 { + mErr.Errors = append(mErr.Errors, fmt.Errorf("management token cannot be associated with policies")) + } + default: + mErr.Errors = append(mErr.Errors, fmt.Errorf("token type must be client or management")) + } + return mErr.ErrorOrNil() +} + +// PolicySubset checks if a given set of policies is a subset of the token +func (a *ACLToken) PolicySubset(policies []string) bool { + // Hot-path the management tokens, superset of all policies. + if a.Type == ACLManagementToken { + return true + } + associatedPolicies := make(map[string]struct{}, len(a.Policies)) + for _, policy := range a.Policies { + associatedPolicies[policy] = struct{}{} + } + for _, policy := range policies { + if _, ok := associatedPolicies[policy]; !ok { + return false + } + } + return true +} + +// ACLTokenListRequest is used to request a list of tokens +type ACLTokenListRequest struct { + GlobalOnly bool + QueryOptions +} + +// ACLTokenSpecificRequest is used to query a specific token +type ACLTokenSpecificRequest struct { + AccessorID string + QueryOptions +} + +// ACLTokenSetRequest is used to query a set of tokens +type ACLTokenSetRequest struct { + AccessorIDS []string + QueryOptions +} + +// ACLTokenListResponse is used for a list request +type ACLTokenListResponse struct { + Tokens []*ACLTokenListStub + QueryMeta +} + +// SingleACLTokenResponse is used to return a single token +type SingleACLTokenResponse struct { + Token *ACLToken + QueryMeta +} + +// ACLTokenSetResponse is used to return a set of token +type ACLTokenSetResponse struct { + Tokens map[string]*ACLToken // Keyed by Accessor ID + QueryMeta +} + +// ResolveACLTokenRequest is used to resolve a specific token +type ResolveACLTokenRequest struct { + SecretID string + QueryOptions +} + +// ResolveACLTokenResponse is used to resolve a single token +type ResolveACLTokenResponse struct { + Token *ACLToken + QueryMeta +} + +// ACLTokenDeleteRequest is used to delete a set of tokens +type ACLTokenDeleteRequest struct { + AccessorIDs []string + WriteRequest +} + +// ACLTokenBootstrapRequest is used to bootstrap ACLs +type ACLTokenBootstrapRequest struct { + Token *ACLToken // Not client specifiable + ResetIndex uint64 // Reset index is used to clear the bootstrap token + WriteRequest +} + +// ACLTokenUpsertRequest is used to upsert a set of tokens +type ACLTokenUpsertRequest struct { + Tokens []*ACLToken + WriteRequest +} + +// ACLTokenUpsertResponse is used to return from an ACLTokenUpsertRequest +type ACLTokenUpsertResponse struct { + Tokens []*ACLToken + WriteMeta +} diff --git a/vendor/github.com/hashicorp/nomad/nomad/structs/structs_codegen.go b/vendor/github.com/hashicorp/nomad/nomad/structs/structs_codegen.go new file mode 100644 index 000000000..bdc324bb5 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/nomad/structs/structs_codegen.go @@ -0,0 +1,3 @@ +package structs + +//go:generate codecgen -d 100 -o structs.generated.go structs.go diff --git a/vendor/vendor.json b/vendor/vendor.json index ce23fa219..2551f9e61 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -990,6 +990,12 @@ "revision": "317e0006254c44a0ac427cc52a0e083ff0b9622f", "revisionTime": "2017-09-15T02:47:31Z" }, + { + "checksumSHA1": "5DBIm/bJOKLR3CbQH6wIELQDLlQ=", + "path": "github.com/gorhill/cronexpr", + "revision": "d520615e531a6bf3fb69406b9eba718261285ec8", + "revisionTime": "2016-12-05T14:13:22Z" + }, { "checksumSHA1": "O0r0hj4YL+jSRNjnshkeH4GY+4s=", "path": "github.com/hailocab/go-hostpool", @@ -1146,6 +1152,24 @@ "revision": "a1e08240a1cff7d1bd10c89d722110a053f8dc6e", "revisionTime": "2017-09-20T19:48:06Z" }, + { + "checksumSHA1": "Is7OvHxCEEkKpdQnW8olCxL0444=", + "path": "github.com/hashicorp/nomad/api/contexts", + "revision": "a1e08240a1cff7d1bd10c89d722110a053f8dc6e", + "revisionTime": "2017-09-20T19:48:06Z" + }, + { + "checksumSHA1": "GpikwcF9oi5Rrs/58xDSfiMy/I8=", + "path": "github.com/hashicorp/nomad/helper", + "revision": "a1e08240a1cff7d1bd10c89d722110a053f8dc6e", + "revisionTime": "2017-09-20T19:48:06Z" + }, + { + "checksumSHA1": "hrzGvgMsH9p6MKOu3zYS8fooL3g=", + "path": "github.com/hashicorp/nomad/nomad/structs", + "revision": "a1e08240a1cff7d1bd10c89d722110a053f8dc6e", + "revisionTime": "2017-09-20T19:48:06Z" + }, { "checksumSHA1": "/oss17GO4hXGM7QnUdI3VzcAHzA=", "path": "github.com/hashicorp/serf/coordinate", From 484401689bdcf6dfd501e10de6d20a8a9c6426db Mon Sep 17 00:00:00 2001 From: Chris Hoffman Date: Mon, 2 Oct 2017 13:46:42 -0400 Subject: [PATCH 15/52] fixing dependencies --- vendor/github.com/hashicorp/nomad/api/jobs.go | 31 +- .../hashicorp/nomad/api/jobs_testing.go | 12 +- .../github.com/hashicorp/nomad/api/nodes.go | 2 +- .../hashicorp/nomad/api/operator.go | 2 +- .../github.com/hashicorp/nomad/api/tasks.go | 2 +- .../hashicorp/nomad/helper/funcs.go | 2 +- .../hashicorp/nomad/nomad/structs/bitmap.go | 78 - .../hashicorp/nomad/nomad/structs/diff.go | 1231 ---- .../hashicorp/nomad/nomad/structs/funcs.go | 310 - .../hashicorp/nomad/nomad/structs/network.go | 326 - .../nomad/nomad/structs/node_class.go | 94 - .../hashicorp/nomad/nomad/structs/operator.go | 49 - .../hashicorp/nomad/nomad/structs/structs.go | 5783 ----------------- .../nomad/nomad/structs/structs_codegen.go | 3 - vendor/vendor.json | 24 +- 15 files changed, 44 insertions(+), 7905 deletions(-) delete mode 100644 vendor/github.com/hashicorp/nomad/nomad/structs/bitmap.go delete mode 100644 vendor/github.com/hashicorp/nomad/nomad/structs/diff.go delete mode 100644 vendor/github.com/hashicorp/nomad/nomad/structs/funcs.go delete mode 100644 vendor/github.com/hashicorp/nomad/nomad/structs/network.go delete mode 100644 vendor/github.com/hashicorp/nomad/nomad/structs/node_class.go delete mode 100644 vendor/github.com/hashicorp/nomad/nomad/structs/operator.go delete mode 100644 vendor/github.com/hashicorp/nomad/nomad/structs/structs.go delete mode 100644 vendor/github.com/hashicorp/nomad/nomad/structs/structs_codegen.go diff --git a/vendor/github.com/hashicorp/nomad/api/jobs.go b/vendor/github.com/hashicorp/nomad/api/jobs.go index 4ec71af4a..e68bef1e7 100644 --- a/vendor/github.com/hashicorp/nomad/api/jobs.go +++ b/vendor/github.com/hashicorp/nomad/api/jobs.go @@ -9,7 +9,6 @@ import ( "github.com/gorhill/cronexpr" "github.com/hashicorp/nomad/helper" - "github.com/hashicorp/nomad/nomad/structs" ) const ( @@ -330,6 +329,20 @@ type UpdateStrategy struct { Canary *int `mapstructure:"canary"` } +// DefaultUpdateStrategy provides a baseline that can be used to upgrade +// jobs with the old policy or for populating field defaults. +func DefaultUpdateStrategy() *UpdateStrategy { + return &UpdateStrategy{ + Stagger: helper.TimeToPtr(30 * time.Second), + MaxParallel: helper.IntToPtr(1), + HealthCheck: helper.StringToPtr("checks"), + MinHealthyTime: helper.TimeToPtr(10 * time.Second), + HealthyDeadline: helper.TimeToPtr(5 * time.Minute), + AutoRevert: helper.BoolToPtr(false), + Canary: helper.IntToPtr(0), + } +} + func (u *UpdateStrategy) Copy() *UpdateStrategy { if u == nil { return nil @@ -403,34 +416,34 @@ func (u *UpdateStrategy) Merge(o *UpdateStrategy) { } func (u *UpdateStrategy) Canonicalize() { - d := structs.DefaultUpdateStrategy + d := DefaultUpdateStrategy() if u.MaxParallel == nil { - u.MaxParallel = helper.IntToPtr(d.MaxParallel) + u.MaxParallel = d.MaxParallel } if u.Stagger == nil { - u.Stagger = helper.TimeToPtr(d.Stagger) + u.Stagger = d.Stagger } if u.HealthCheck == nil { - u.HealthCheck = helper.StringToPtr(d.HealthCheck) + u.HealthCheck = d.HealthCheck } if u.HealthyDeadline == nil { - u.HealthyDeadline = helper.TimeToPtr(d.HealthyDeadline) + u.HealthyDeadline = d.HealthyDeadline } if u.MinHealthyTime == nil { - u.MinHealthyTime = helper.TimeToPtr(d.MinHealthyTime) + u.MinHealthyTime = d.MinHealthyTime } if u.AutoRevert == nil { - u.AutoRevert = helper.BoolToPtr(d.AutoRevert) + u.AutoRevert = d.AutoRevert } if u.Canary == nil { - u.Canary = helper.IntToPtr(d.Canary) + u.Canary = d.Canary } } diff --git a/vendor/github.com/hashicorp/nomad/api/jobs_testing.go b/vendor/github.com/hashicorp/nomad/api/jobs_testing.go index bed9ac474..1bd47496c 100644 --- a/vendor/github.com/hashicorp/nomad/api/jobs_testing.go +++ b/vendor/github.com/hashicorp/nomad/api/jobs_testing.go @@ -4,27 +4,27 @@ import ( "time" "github.com/hashicorp/nomad/helper" - "github.com/hashicorp/nomad/nomad/structs" + "github.com/hashicorp/nomad/helper/uuid" ) func MockJob() *Job { job := &Job{ Region: helper.StringToPtr("global"), - ID: helper.StringToPtr(structs.GenerateUUID()), + ID: helper.StringToPtr(uuid.Generate()), Name: helper.StringToPtr("my-job"), Type: helper.StringToPtr("service"), Priority: helper.IntToPtr(50), AllAtOnce: helper.BoolToPtr(false), Datacenters: []string{"dc1"}, Constraints: []*Constraint{ - &Constraint{ + { LTarget: "${attr.kernel.name}", RTarget: "linux", Operand: "=", }, }, TaskGroups: []*TaskGroup{ - &TaskGroup{ + { Name: helper.StringToPtr("web"), Count: helper.IntToPtr(10), EphemeralDisk: &EphemeralDisk{ @@ -37,7 +37,7 @@ func MockJob() *Job { Mode: helper.StringToPtr("delay"), }, Tasks: []*Task{ - &Task{ + { Name: "web", Driver: "exec", Config: map[string]interface{}{ @@ -72,7 +72,7 @@ func MockJob() *Job { CPU: helper.IntToPtr(500), MemoryMB: helper.IntToPtr(256), Networks: []*NetworkResource{ - &NetworkResource{ + { MBits: helper.IntToPtr(50), DynamicPorts: []Port{{Label: "http"}, {Label: "admin"}}, }, diff --git a/vendor/github.com/hashicorp/nomad/api/nodes.go b/vendor/github.com/hashicorp/nomad/api/nodes.go index 50a159628..e1ef5e2aa 100644 --- a/vendor/github.com/hashicorp/nomad/api/nodes.go +++ b/vendor/github.com/hashicorp/nomad/api/nodes.go @@ -22,7 +22,7 @@ func (n *Nodes) List(q *QueryOptions) ([]*NodeListStub, *QueryMeta, error) { if err != nil { return nil, nil, err } - sort.Sort(NodeIndexSort(resp)) + sort.Sort(resp) return resp, qm, nil } diff --git a/vendor/github.com/hashicorp/nomad/api/operator.go b/vendor/github.com/hashicorp/nomad/api/operator.go index a10648a29..a83d54cb3 100644 --- a/vendor/github.com/hashicorp/nomad/api/operator.go +++ b/vendor/github.com/hashicorp/nomad/api/operator.go @@ -75,7 +75,7 @@ func (op *Operator) RaftRemovePeerByAddress(address string, q *WriteOptions) err // TODO (alexdadgar) Currently we made address a query parameter. Once // IDs are in place this will be DELETE /v1/operator/raft/peer/. - r.params.Set("address", string(address)) + r.params.Set("address", address) _, resp, err := requireOK(op.c.doRequest(r)) if err != nil { diff --git a/vendor/github.com/hashicorp/nomad/api/tasks.go b/vendor/github.com/hashicorp/nomad/api/tasks.go index a3d10831e..3233c9963 100644 --- a/vendor/github.com/hashicorp/nomad/api/tasks.go +++ b/vendor/github.com/hashicorp/nomad/api/tasks.go @@ -83,7 +83,7 @@ func (r *RestartPolicy) Merge(rp *RestartPolicy) { // failing health checks. type CheckRestart struct { Limit int `mapstructure:"limit"` - Grace *time.Duration `mapstructure:"grace_period"` + Grace *time.Duration `mapstructure:"grace"` IgnoreWarnings bool `mapstructure:"ignore_warnings"` } diff --git a/vendor/github.com/hashicorp/nomad/helper/funcs.go b/vendor/github.com/hashicorp/nomad/helper/funcs.go index 0b0796059..19911941f 100644 --- a/vendor/github.com/hashicorp/nomad/helper/funcs.go +++ b/vendor/github.com/hashicorp/nomad/helper/funcs.go @@ -180,7 +180,7 @@ func CopyMapStringStruct(m map[string]struct{}) map[string]struct{} { } c := make(map[string]struct{}, l) - for k, _ := range m { + for k := range m { c[k] = struct{}{} } return c diff --git a/vendor/github.com/hashicorp/nomad/nomad/structs/bitmap.go b/vendor/github.com/hashicorp/nomad/nomad/structs/bitmap.go deleted file mode 100644 index 63758a0be..000000000 --- a/vendor/github.com/hashicorp/nomad/nomad/structs/bitmap.go +++ /dev/null @@ -1,78 +0,0 @@ -package structs - -import "fmt" - -// Bitmap is a simple uncompressed bitmap -type Bitmap []byte - -// NewBitmap returns a bitmap with up to size indexes -func NewBitmap(size uint) (Bitmap, error) { - if size == 0 { - return nil, fmt.Errorf("bitmap must be positive size") - } - if size&7 != 0 { - return nil, fmt.Errorf("bitmap must be byte aligned") - } - b := make([]byte, size>>3) - return Bitmap(b), nil -} - -// Copy returns a copy of the Bitmap -func (b Bitmap) Copy() (Bitmap, error) { - if b == nil { - return nil, fmt.Errorf("can't copy nil Bitmap") - } - - raw := make([]byte, len(b)) - copy(raw, b) - return Bitmap(raw), nil -} - -// Size returns the size of the bitmap -func (b Bitmap) Size() uint { - return uint(len(b) << 3) -} - -// Set is used to set the given index of the bitmap -func (b Bitmap) Set(idx uint) { - bucket := idx >> 3 - mask := byte(1 << (idx & 7)) - b[bucket] |= mask -} - -// Unset is used to unset the given index of the bitmap -func (b Bitmap) Unset(idx uint) { - bucket := idx >> 3 - // Mask should be all ones minus the idx position - offset := 1 << (idx & 7) - mask := byte(offset ^ 0xff) - b[bucket] &= mask -} - -// Check is used to check the given index of the bitmap -func (b Bitmap) Check(idx uint) bool { - bucket := idx >> 3 - mask := byte(1 << (idx & 7)) - return (b[bucket] & mask) != 0 -} - -// Clear is used to efficiently clear the bitmap -func (b Bitmap) Clear() { - for i := range b { - b[i] = 0 - } -} - -// IndexesInRange returns the indexes in which the values are either set or unset based -// on the passed parameter in the passed range -func (b Bitmap) IndexesInRange(set bool, from, to uint) []int { - var indexes []int - for i := from; i <= to && i < b.Size(); i++ { - c := b.Check(i) - if c && set || !c && !set { - indexes = append(indexes, int(i)) - } - } - - return indexes -} diff --git a/vendor/github.com/hashicorp/nomad/nomad/structs/diff.go b/vendor/github.com/hashicorp/nomad/nomad/structs/diff.go deleted file mode 100644 index b8ced9180..000000000 --- a/vendor/github.com/hashicorp/nomad/nomad/structs/diff.go +++ /dev/null @@ -1,1231 +0,0 @@ -package structs - -import ( - "fmt" - "reflect" - "sort" - "strings" - - "github.com/hashicorp/nomad/helper/flatmap" - "github.com/mitchellh/hashstructure" -) - -// DiffType denotes the type of a diff object. -type DiffType string - -var ( - DiffTypeNone DiffType = "None" - DiffTypeAdded DiffType = "Added" - DiffTypeDeleted DiffType = "Deleted" - DiffTypeEdited DiffType = "Edited" -) - -func (d DiffType) Less(other DiffType) bool { - // Edited > Added > Deleted > None - // But we do a reverse sort - if d == other { - return false - } - - if d == DiffTypeEdited { - return true - } else if other == DiffTypeEdited { - return false - } else if d == DiffTypeAdded { - return true - } else if other == DiffTypeAdded { - return false - } else if d == DiffTypeDeleted { - return true - } else if other == DiffTypeDeleted { - return false - } - - return true -} - -// JobDiff contains the diff of two jobs. -type JobDiff struct { - Type DiffType - ID string - Fields []*FieldDiff - Objects []*ObjectDiff - TaskGroups []*TaskGroupDiff -} - -// Diff returns a diff of two jobs and a potential error if the Jobs are not -// diffable. If contextual diff is enabled, objects within the job will contain -// field information even if unchanged. -func (j *Job) Diff(other *Job, contextual bool) (*JobDiff, error) { - // COMPAT: Remove "Update" in 0.7.0. Update pushed down to task groups - // in 0.6.0 - diff := &JobDiff{Type: DiffTypeNone} - var oldPrimitiveFlat, newPrimitiveFlat map[string]string - filter := []string{"ID", "Status", "StatusDescription", "Version", "Stable", "CreateIndex", - "ModifyIndex", "JobModifyIndex", "Update", "SubmitTime"} - - if j == nil && other == nil { - return diff, nil - } else if j == nil { - j = &Job{} - diff.Type = DiffTypeAdded - newPrimitiveFlat = flatmap.Flatten(other, filter, true) - diff.ID = other.ID - } else if other == nil { - other = &Job{} - diff.Type = DiffTypeDeleted - oldPrimitiveFlat = flatmap.Flatten(j, filter, true) - diff.ID = j.ID - } else { - if j.ID != other.ID { - return nil, fmt.Errorf("can not diff jobs with different IDs: %q and %q", j.ID, other.ID) - } - - oldPrimitiveFlat = flatmap.Flatten(j, filter, true) - newPrimitiveFlat = flatmap.Flatten(other, filter, true) - diff.ID = other.ID - } - - // Diff the primitive fields. - diff.Fields = fieldDiffs(oldPrimitiveFlat, newPrimitiveFlat, false) - - // Datacenters diff - if setDiff := stringSetDiff(j.Datacenters, other.Datacenters, "Datacenters", contextual); setDiff != nil && setDiff.Type != DiffTypeNone { - diff.Objects = append(diff.Objects, setDiff) - } - - // Constraints diff - conDiff := primitiveObjectSetDiff( - interfaceSlice(j.Constraints), - interfaceSlice(other.Constraints), - []string{"str"}, - "Constraint", - contextual) - if conDiff != nil { - diff.Objects = append(diff.Objects, conDiff...) - } - - // Task groups diff - tgs, err := taskGroupDiffs(j.TaskGroups, other.TaskGroups, contextual) - if err != nil { - return nil, err - } - diff.TaskGroups = tgs - - // Periodic diff - if pDiff := primitiveObjectDiff(j.Periodic, other.Periodic, nil, "Periodic", contextual); pDiff != nil { - diff.Objects = append(diff.Objects, pDiff) - } - - // ParameterizedJob diff - if cDiff := parameterizedJobDiff(j.ParameterizedJob, other.ParameterizedJob, contextual); cDiff != nil { - diff.Objects = append(diff.Objects, cDiff) - } - - // Check to see if there is a diff. We don't use reflect because we are - // filtering quite a few fields that will change on each diff. - if diff.Type == DiffTypeNone { - for _, fd := range diff.Fields { - if fd.Type != DiffTypeNone { - diff.Type = DiffTypeEdited - break - } - } - } - - if diff.Type == DiffTypeNone { - for _, od := range diff.Objects { - if od.Type != DiffTypeNone { - diff.Type = DiffTypeEdited - break - } - } - } - - if diff.Type == DiffTypeNone { - for _, tg := range diff.TaskGroups { - if tg.Type != DiffTypeNone { - diff.Type = DiffTypeEdited - break - } - } - } - - return diff, nil -} - -func (j *JobDiff) GoString() string { - out := fmt.Sprintf("Job %q (%s):\n", j.ID, j.Type) - - for _, f := range j.Fields { - out += fmt.Sprintf("%#v\n", f) - } - - for _, o := range j.Objects { - out += fmt.Sprintf("%#v\n", o) - } - - for _, tg := range j.TaskGroups { - out += fmt.Sprintf("%#v\n", tg) - } - - return out -} - -// TaskGroupDiff contains the diff of two task groups. -type TaskGroupDiff struct { - Type DiffType - Name string - Fields []*FieldDiff - Objects []*ObjectDiff - Tasks []*TaskDiff - Updates map[string]uint64 -} - -// Diff returns a diff of two task groups. If contextual diff is enabled, -// objects' fields will be stored even if no diff occurred as long as one field -// changed. -func (tg *TaskGroup) Diff(other *TaskGroup, contextual bool) (*TaskGroupDiff, error) { - diff := &TaskGroupDiff{Type: DiffTypeNone} - var oldPrimitiveFlat, newPrimitiveFlat map[string]string - filter := []string{"Name"} - - if tg == nil && other == nil { - return diff, nil - } else if tg == nil { - tg = &TaskGroup{} - diff.Type = DiffTypeAdded - diff.Name = other.Name - newPrimitiveFlat = flatmap.Flatten(other, filter, true) - } else if other == nil { - other = &TaskGroup{} - diff.Type = DiffTypeDeleted - diff.Name = tg.Name - oldPrimitiveFlat = flatmap.Flatten(tg, filter, true) - } else { - if !reflect.DeepEqual(tg, other) { - diff.Type = DiffTypeEdited - } - if tg.Name != other.Name { - return nil, fmt.Errorf("can not diff task groups with different names: %q and %q", tg.Name, other.Name) - } - diff.Name = other.Name - oldPrimitiveFlat = flatmap.Flatten(tg, filter, true) - newPrimitiveFlat = flatmap.Flatten(other, filter, true) - } - - // Diff the primitive fields. - diff.Fields = fieldDiffs(oldPrimitiveFlat, newPrimitiveFlat, false) - - // Constraints diff - conDiff := primitiveObjectSetDiff( - interfaceSlice(tg.Constraints), - interfaceSlice(other.Constraints), - []string{"str"}, - "Constraint", - contextual) - if conDiff != nil { - diff.Objects = append(diff.Objects, conDiff...) - } - - // Restart policy diff - rDiff := primitiveObjectDiff(tg.RestartPolicy, other.RestartPolicy, nil, "RestartPolicy", contextual) - if rDiff != nil { - diff.Objects = append(diff.Objects, rDiff) - } - - // EphemeralDisk diff - diskDiff := primitiveObjectDiff(tg.EphemeralDisk, other.EphemeralDisk, nil, "EphemeralDisk", contextual) - if diskDiff != nil { - diff.Objects = append(diff.Objects, diskDiff) - } - - // Update diff - // COMPAT: Remove "Stagger" in 0.7.0. - if uDiff := primitiveObjectDiff(tg.Update, other.Update, []string{"Stagger"}, "Update", contextual); uDiff != nil { - diff.Objects = append(diff.Objects, uDiff) - } - - // Tasks diff - tasks, err := taskDiffs(tg.Tasks, other.Tasks, contextual) - if err != nil { - return nil, err - } - diff.Tasks = tasks - - return diff, nil -} - -func (tg *TaskGroupDiff) GoString() string { - out := fmt.Sprintf("Group %q (%s):\n", tg.Name, tg.Type) - - if len(tg.Updates) != 0 { - out += "Updates {\n" - for update, count := range tg.Updates { - out += fmt.Sprintf("%d %s\n", count, update) - } - out += "}\n" - } - - for _, f := range tg.Fields { - out += fmt.Sprintf("%#v\n", f) - } - - for _, o := range tg.Objects { - out += fmt.Sprintf("%#v\n", o) - } - - for _, t := range tg.Tasks { - out += fmt.Sprintf("%#v\n", t) - } - - return out -} - -// TaskGroupDiffs diffs two sets of task groups. If contextual diff is enabled, -// objects' fields will be stored even if no diff occurred as long as one field -// changed. -func taskGroupDiffs(old, new []*TaskGroup, contextual bool) ([]*TaskGroupDiff, error) { - oldMap := make(map[string]*TaskGroup, len(old)) - newMap := make(map[string]*TaskGroup, len(new)) - for _, o := range old { - oldMap[o.Name] = o - } - for _, n := range new { - newMap[n.Name] = n - } - - var diffs []*TaskGroupDiff - for name, oldGroup := range oldMap { - // Diff the same, deleted and edited - diff, err := oldGroup.Diff(newMap[name], contextual) - if err != nil { - return nil, err - } - diffs = append(diffs, diff) - } - - for name, newGroup := range newMap { - // Diff the added - if old, ok := oldMap[name]; !ok { - diff, err := old.Diff(newGroup, contextual) - if err != nil { - return nil, err - } - diffs = append(diffs, diff) - } - } - - sort.Sort(TaskGroupDiffs(diffs)) - return diffs, nil -} - -// For sorting TaskGroupDiffs -type TaskGroupDiffs []*TaskGroupDiff - -func (tg TaskGroupDiffs) Len() int { return len(tg) } -func (tg TaskGroupDiffs) Swap(i, j int) { tg[i], tg[j] = tg[j], tg[i] } -func (tg TaskGroupDiffs) Less(i, j int) bool { return tg[i].Name < tg[j].Name } - -// TaskDiff contains the diff of two Tasks -type TaskDiff struct { - Type DiffType - Name string - Fields []*FieldDiff - Objects []*ObjectDiff - Annotations []string -} - -// Diff returns a diff of two tasks. If contextual diff is enabled, objects -// within the task will contain field information even if unchanged. -func (t *Task) Diff(other *Task, contextual bool) (*TaskDiff, error) { - diff := &TaskDiff{Type: DiffTypeNone} - var oldPrimitiveFlat, newPrimitiveFlat map[string]string - filter := []string{"Name", "Config"} - - if t == nil && other == nil { - return diff, nil - } else if t == nil { - t = &Task{} - diff.Type = DiffTypeAdded - diff.Name = other.Name - newPrimitiveFlat = flatmap.Flatten(other, filter, true) - } else if other == nil { - other = &Task{} - diff.Type = DiffTypeDeleted - diff.Name = t.Name - oldPrimitiveFlat = flatmap.Flatten(t, filter, true) - } else { - if !reflect.DeepEqual(t, other) { - diff.Type = DiffTypeEdited - } - if t.Name != other.Name { - return nil, fmt.Errorf("can not diff tasks with different names: %q and %q", t.Name, other.Name) - } - diff.Name = other.Name - oldPrimitiveFlat = flatmap.Flatten(t, filter, true) - newPrimitiveFlat = flatmap.Flatten(other, filter, true) - } - - // Diff the primitive fields. - diff.Fields = fieldDiffs(oldPrimitiveFlat, newPrimitiveFlat, false) - - // Constraints diff - conDiff := primitiveObjectSetDiff( - interfaceSlice(t.Constraints), - interfaceSlice(other.Constraints), - []string{"str"}, - "Constraint", - contextual) - if conDiff != nil { - diff.Objects = append(diff.Objects, conDiff...) - } - - // Config diff - if cDiff := configDiff(t.Config, other.Config, contextual); cDiff != nil { - diff.Objects = append(diff.Objects, cDiff) - } - - // Resources diff - if rDiff := t.Resources.Diff(other.Resources, contextual); rDiff != nil { - diff.Objects = append(diff.Objects, rDiff) - } - - // LogConfig diff - lDiff := primitiveObjectDiff(t.LogConfig, other.LogConfig, nil, "LogConfig", contextual) - if lDiff != nil { - diff.Objects = append(diff.Objects, lDiff) - } - - // Dispatch payload diff - dDiff := primitiveObjectDiff(t.DispatchPayload, other.DispatchPayload, nil, "DispatchPayload", contextual) - if dDiff != nil { - diff.Objects = append(diff.Objects, dDiff) - } - - // Artifacts diff - diffs := primitiveObjectSetDiff( - interfaceSlice(t.Artifacts), - interfaceSlice(other.Artifacts), - nil, - "Artifact", - contextual) - if diffs != nil { - diff.Objects = append(diff.Objects, diffs...) - } - - // Services diff - if sDiffs := serviceDiffs(t.Services, other.Services, contextual); sDiffs != nil { - diff.Objects = append(diff.Objects, sDiffs...) - } - - // Vault diff - vDiff := vaultDiff(t.Vault, other.Vault, contextual) - if vDiff != nil { - diff.Objects = append(diff.Objects, vDiff) - } - - // Template diff - tmplDiffs := primitiveObjectSetDiff( - interfaceSlice(t.Templates), - interfaceSlice(other.Templates), - nil, - "Template", - contextual) - if tmplDiffs != nil { - diff.Objects = append(diff.Objects, tmplDiffs...) - } - - return diff, nil -} - -func (t *TaskDiff) GoString() string { - var out string - if len(t.Annotations) == 0 { - out = fmt.Sprintf("Task %q (%s):\n", t.Name, t.Type) - } else { - out = fmt.Sprintf("Task %q (%s) (%s):\n", t.Name, t.Type, strings.Join(t.Annotations, ",")) - } - - for _, f := range t.Fields { - out += fmt.Sprintf("%#v\n", f) - } - - for _, o := range t.Objects { - out += fmt.Sprintf("%#v\n", o) - } - - return out -} - -// taskDiffs diffs a set of tasks. If contextual diff is enabled, unchanged -// fields within objects nested in the tasks will be returned. -func taskDiffs(old, new []*Task, contextual bool) ([]*TaskDiff, error) { - oldMap := make(map[string]*Task, len(old)) - newMap := make(map[string]*Task, len(new)) - for _, o := range old { - oldMap[o.Name] = o - } - for _, n := range new { - newMap[n.Name] = n - } - - var diffs []*TaskDiff - for name, oldGroup := range oldMap { - // Diff the same, deleted and edited - diff, err := oldGroup.Diff(newMap[name], contextual) - if err != nil { - return nil, err - } - diffs = append(diffs, diff) - } - - for name, newGroup := range newMap { - // Diff the added - if old, ok := oldMap[name]; !ok { - diff, err := old.Diff(newGroup, contextual) - if err != nil { - return nil, err - } - diffs = append(diffs, diff) - } - } - - sort.Sort(TaskDiffs(diffs)) - return diffs, nil -} - -// For sorting TaskDiffs -type TaskDiffs []*TaskDiff - -func (t TaskDiffs) Len() int { return len(t) } -func (t TaskDiffs) Swap(i, j int) { t[i], t[j] = t[j], t[i] } -func (t TaskDiffs) Less(i, j int) bool { return t[i].Name < t[j].Name } - -// serviceDiff returns the diff of two service objects. If contextual diff is -// enabled, all fields will be returned, even if no diff occurred. -func serviceDiff(old, new *Service, contextual bool) *ObjectDiff { - diff := &ObjectDiff{Type: DiffTypeNone, Name: "Service"} - var oldPrimitiveFlat, newPrimitiveFlat map[string]string - - if reflect.DeepEqual(old, new) { - return nil - } else if old == nil { - old = &Service{} - diff.Type = DiffTypeAdded - newPrimitiveFlat = flatmap.Flatten(new, nil, true) - } else if new == nil { - new = &Service{} - diff.Type = DiffTypeDeleted - oldPrimitiveFlat = flatmap.Flatten(old, nil, true) - } else { - diff.Type = DiffTypeEdited - oldPrimitiveFlat = flatmap.Flatten(old, nil, true) - newPrimitiveFlat = flatmap.Flatten(new, nil, true) - } - - // Diff the primitive fields. - diff.Fields = fieldDiffs(oldPrimitiveFlat, newPrimitiveFlat, contextual) - - // Checks diffs - if cDiffs := serviceCheckDiffs(old.Checks, new.Checks, contextual); cDiffs != nil { - diff.Objects = append(diff.Objects, cDiffs...) - } - - return diff -} - -// serviceDiffs diffs a set of services. If contextual diff is enabled, unchanged -// fields within objects nested in the tasks will be returned. -func serviceDiffs(old, new []*Service, contextual bool) []*ObjectDiff { - oldMap := make(map[string]*Service, len(old)) - newMap := make(map[string]*Service, len(new)) - for _, o := range old { - oldMap[o.Name] = o - } - for _, n := range new { - newMap[n.Name] = n - } - - var diffs []*ObjectDiff - for name, oldService := range oldMap { - // Diff the same, deleted and edited - if diff := serviceDiff(oldService, newMap[name], contextual); diff != nil { - diffs = append(diffs, diff) - } - } - - for name, newService := range newMap { - // Diff the added - if old, ok := oldMap[name]; !ok { - if diff := serviceDiff(old, newService, contextual); diff != nil { - diffs = append(diffs, diff) - } - } - } - - sort.Sort(ObjectDiffs(diffs)) - return diffs -} - -// serviceCheckDiff returns the diff of two service check objects. If contextual -// diff is enabled, all fields will be returned, even if no diff occurred. -func serviceCheckDiff(old, new *ServiceCheck, contextual bool) *ObjectDiff { - diff := &ObjectDiff{Type: DiffTypeNone, Name: "Check"} - var oldPrimitiveFlat, newPrimitiveFlat map[string]string - - if reflect.DeepEqual(old, new) { - return nil - } else if old == nil { - old = &ServiceCheck{} - diff.Type = DiffTypeAdded - newPrimitiveFlat = flatmap.Flatten(new, nil, true) - } else if new == nil { - new = &ServiceCheck{} - diff.Type = DiffTypeDeleted - oldPrimitiveFlat = flatmap.Flatten(old, nil, true) - } else { - diff.Type = DiffTypeEdited - oldPrimitiveFlat = flatmap.Flatten(old, nil, true) - newPrimitiveFlat = flatmap.Flatten(new, nil, true) - } - - // Diff the primitive fields. - diff.Fields = fieldDiffs(oldPrimitiveFlat, newPrimitiveFlat, contextual) - - // Diff Header - if headerDiff := checkHeaderDiff(old.Header, new.Header, contextual); headerDiff != nil { - diff.Objects = append(diff.Objects, headerDiff) - } - - return diff -} - -// checkHeaderDiff returns the diff of two service check header objects. If -// contextual diff is enabled, all fields will be returned, even if no diff -// occurred. -func checkHeaderDiff(old, new map[string][]string, contextual bool) *ObjectDiff { - diff := &ObjectDiff{Type: DiffTypeNone, Name: "Header"} - if reflect.DeepEqual(old, new) { - return nil - } else if len(old) == 0 { - diff.Type = DiffTypeAdded - } else if len(new) == 0 { - diff.Type = DiffTypeDeleted - } else { - diff.Type = DiffTypeEdited - } - oldFlat := flatmap.Flatten(old, nil, false) - newFlat := flatmap.Flatten(new, nil, false) - diff.Fields = fieldDiffs(oldFlat, newFlat, contextual) - return diff -} - -// serviceCheckDiffs diffs a set of service checks. If contextual diff is -// enabled, unchanged fields within objects nested in the tasks will be -// returned. -func serviceCheckDiffs(old, new []*ServiceCheck, contextual bool) []*ObjectDiff { - oldMap := make(map[string]*ServiceCheck, len(old)) - newMap := make(map[string]*ServiceCheck, len(new)) - for _, o := range old { - oldMap[o.Name] = o - } - for _, n := range new { - newMap[n.Name] = n - } - - var diffs []*ObjectDiff - for name, oldCheck := range oldMap { - // Diff the same, deleted and edited - if diff := serviceCheckDiff(oldCheck, newMap[name], contextual); diff != nil { - diffs = append(diffs, diff) - } - } - - for name, newCheck := range newMap { - // Diff the added - if old, ok := oldMap[name]; !ok { - if diff := serviceCheckDiff(old, newCheck, contextual); diff != nil { - diffs = append(diffs, diff) - } - } - } - - sort.Sort(ObjectDiffs(diffs)) - return diffs -} - -// vaultDiff returns the diff of two vault objects. If contextual diff is -// enabled, all fields will be returned, even if no diff occurred. -func vaultDiff(old, new *Vault, contextual bool) *ObjectDiff { - diff := &ObjectDiff{Type: DiffTypeNone, Name: "Vault"} - var oldPrimitiveFlat, newPrimitiveFlat map[string]string - - if reflect.DeepEqual(old, new) { - return nil - } else if old == nil { - old = &Vault{} - diff.Type = DiffTypeAdded - newPrimitiveFlat = flatmap.Flatten(new, nil, true) - } else if new == nil { - new = &Vault{} - diff.Type = DiffTypeDeleted - oldPrimitiveFlat = flatmap.Flatten(old, nil, true) - } else { - diff.Type = DiffTypeEdited - oldPrimitiveFlat = flatmap.Flatten(old, nil, true) - newPrimitiveFlat = flatmap.Flatten(new, nil, true) - } - - // Diff the primitive fields. - diff.Fields = fieldDiffs(oldPrimitiveFlat, newPrimitiveFlat, contextual) - - // Policies diffs - if setDiff := stringSetDiff(old.Policies, new.Policies, "Policies", contextual); setDiff != nil { - diff.Objects = append(diff.Objects, setDiff) - } - - return diff -} - -// parameterizedJobDiff returns the diff of two parameterized job objects. If -// contextual diff is enabled, all fields will be returned, even if no diff -// occurred. -func parameterizedJobDiff(old, new *ParameterizedJobConfig, contextual bool) *ObjectDiff { - diff := &ObjectDiff{Type: DiffTypeNone, Name: "ParameterizedJob"} - var oldPrimitiveFlat, newPrimitiveFlat map[string]string - - if reflect.DeepEqual(old, new) { - return nil - } else if old == nil { - old = &ParameterizedJobConfig{} - diff.Type = DiffTypeAdded - newPrimitiveFlat = flatmap.Flatten(new, nil, true) - } else if new == nil { - new = &ParameterizedJobConfig{} - diff.Type = DiffTypeDeleted - oldPrimitiveFlat = flatmap.Flatten(old, nil, true) - } else { - diff.Type = DiffTypeEdited - oldPrimitiveFlat = flatmap.Flatten(old, nil, true) - newPrimitiveFlat = flatmap.Flatten(new, nil, true) - } - - // Diff the primitive fields. - diff.Fields = fieldDiffs(oldPrimitiveFlat, newPrimitiveFlat, contextual) - - // Meta diffs - if optionalDiff := stringSetDiff(old.MetaOptional, new.MetaOptional, "MetaOptional", contextual); optionalDiff != nil { - diff.Objects = append(diff.Objects, optionalDiff) - } - - if requiredDiff := stringSetDiff(old.MetaRequired, new.MetaRequired, "MetaRequired", contextual); requiredDiff != nil { - diff.Objects = append(diff.Objects, requiredDiff) - } - - return diff -} - -// Diff returns a diff of two resource objects. If contextual diff is enabled, -// non-changed fields will still be returned. -func (r *Resources) Diff(other *Resources, contextual bool) *ObjectDiff { - diff := &ObjectDiff{Type: DiffTypeNone, Name: "Resources"} - var oldPrimitiveFlat, newPrimitiveFlat map[string]string - - if reflect.DeepEqual(r, other) { - return nil - } else if r == nil { - r = &Resources{} - diff.Type = DiffTypeAdded - newPrimitiveFlat = flatmap.Flatten(other, nil, true) - } else if other == nil { - other = &Resources{} - diff.Type = DiffTypeDeleted - oldPrimitiveFlat = flatmap.Flatten(r, nil, true) - } else { - diff.Type = DiffTypeEdited - oldPrimitiveFlat = flatmap.Flatten(r, nil, true) - newPrimitiveFlat = flatmap.Flatten(other, nil, true) - } - - // Diff the primitive fields. - diff.Fields = fieldDiffs(oldPrimitiveFlat, newPrimitiveFlat, contextual) - - // Network Resources diff - if nDiffs := networkResourceDiffs(r.Networks, other.Networks, contextual); nDiffs != nil { - diff.Objects = append(diff.Objects, nDiffs...) - } - - return diff -} - -// Diff returns a diff of two network resources. If contextual diff is enabled, -// non-changed fields will still be returned. -func (r *NetworkResource) Diff(other *NetworkResource, contextual bool) *ObjectDiff { - diff := &ObjectDiff{Type: DiffTypeNone, Name: "Network"} - var oldPrimitiveFlat, newPrimitiveFlat map[string]string - filter := []string{"Device", "CIDR", "IP"} - - if reflect.DeepEqual(r, other) { - return nil - } else if r == nil { - r = &NetworkResource{} - diff.Type = DiffTypeAdded - newPrimitiveFlat = flatmap.Flatten(other, filter, true) - } else if other == nil { - other = &NetworkResource{} - diff.Type = DiffTypeDeleted - oldPrimitiveFlat = flatmap.Flatten(r, filter, true) - } else { - diff.Type = DiffTypeEdited - oldPrimitiveFlat = flatmap.Flatten(r, filter, true) - newPrimitiveFlat = flatmap.Flatten(other, filter, true) - } - - // Diff the primitive fields. - diff.Fields = fieldDiffs(oldPrimitiveFlat, newPrimitiveFlat, contextual) - - // Port diffs - resPorts := portDiffs(r.ReservedPorts, other.ReservedPorts, false, contextual) - dynPorts := portDiffs(r.DynamicPorts, other.DynamicPorts, true, contextual) - if resPorts != nil { - diff.Objects = append(diff.Objects, resPorts...) - } - if dynPorts != nil { - diff.Objects = append(diff.Objects, dynPorts...) - } - - return diff -} - -// networkResourceDiffs diffs a set of NetworkResources. If contextual diff is enabled, -// non-changed fields will still be returned. -func networkResourceDiffs(old, new []*NetworkResource, contextual bool) []*ObjectDiff { - makeSet := func(objects []*NetworkResource) map[string]*NetworkResource { - objMap := make(map[string]*NetworkResource, len(objects)) - for _, obj := range objects { - hash, err := hashstructure.Hash(obj, nil) - if err != nil { - panic(err) - } - objMap[fmt.Sprintf("%d", hash)] = obj - } - - return objMap - } - - oldSet := makeSet(old) - newSet := makeSet(new) - - var diffs []*ObjectDiff - for k, oldV := range oldSet { - if newV, ok := newSet[k]; !ok { - if diff := oldV.Diff(newV, contextual); diff != nil { - diffs = append(diffs, diff) - } - } - } - for k, newV := range newSet { - if oldV, ok := oldSet[k]; !ok { - if diff := oldV.Diff(newV, contextual); diff != nil { - diffs = append(diffs, diff) - } - } - } - - sort.Sort(ObjectDiffs(diffs)) - return diffs - -} - -// portDiffs returns the diff of two sets of ports. The dynamic flag marks the -// set of ports as being Dynamic ports versus Static ports. If contextual diff is enabled, -// non-changed fields will still be returned. -func portDiffs(old, new []Port, dynamic bool, contextual bool) []*ObjectDiff { - makeSet := func(ports []Port) map[string]Port { - portMap := make(map[string]Port, len(ports)) - for _, port := range ports { - portMap[port.Label] = port - } - - return portMap - } - - oldPorts := makeSet(old) - newPorts := makeSet(new) - - var filter []string - name := "Static Port" - if dynamic { - filter = []string{"Value"} - name = "Dynamic Port" - } - - var diffs []*ObjectDiff - for portLabel, oldPort := range oldPorts { - // Diff the same, deleted and edited - if newPort, ok := newPorts[portLabel]; ok { - diff := primitiveObjectDiff(oldPort, newPort, filter, name, contextual) - if diff != nil { - diffs = append(diffs, diff) - } - } else { - diff := primitiveObjectDiff(oldPort, nil, filter, name, contextual) - if diff != nil { - diffs = append(diffs, diff) - } - } - } - for label, newPort := range newPorts { - // Diff the added - if _, ok := oldPorts[label]; !ok { - diff := primitiveObjectDiff(nil, newPort, filter, name, contextual) - if diff != nil { - diffs = append(diffs, diff) - } - } - } - - sort.Sort(ObjectDiffs(diffs)) - return diffs - -} - -// configDiff returns the diff of two Task Config objects. If contextual diff is -// enabled, all fields will be returned, even if no diff occurred. -func configDiff(old, new map[string]interface{}, contextual bool) *ObjectDiff { - diff := &ObjectDiff{Type: DiffTypeNone, Name: "Config"} - if reflect.DeepEqual(old, new) { - return nil - } else if len(old) == 0 { - diff.Type = DiffTypeAdded - } else if len(new) == 0 { - diff.Type = DiffTypeDeleted - } else { - diff.Type = DiffTypeEdited - } - - // Diff the primitive fields. - oldPrimitiveFlat := flatmap.Flatten(old, nil, false) - newPrimitiveFlat := flatmap.Flatten(new, nil, false) - diff.Fields = fieldDiffs(oldPrimitiveFlat, newPrimitiveFlat, contextual) - return diff -} - -// ObjectDiff contains the diff of two generic objects. -type ObjectDiff struct { - Type DiffType - Name string - Fields []*FieldDiff - Objects []*ObjectDiff -} - -func (o *ObjectDiff) GoString() string { - out := fmt.Sprintf("\n%q (%s) {\n", o.Name, o.Type) - for _, f := range o.Fields { - out += fmt.Sprintf("%#v\n", f) - } - for _, o := range o.Objects { - out += fmt.Sprintf("%#v\n", o) - } - out += "}" - return out -} - -func (o *ObjectDiff) Less(other *ObjectDiff) bool { - if reflect.DeepEqual(o, other) { - return false - } else if other == nil { - return false - } else if o == nil { - return true - } - - if o.Name != other.Name { - return o.Name < other.Name - } - - if o.Type != other.Type { - return o.Type.Less(other.Type) - } - - if lO, lOther := len(o.Fields), len(other.Fields); lO != lOther { - return lO < lOther - } - - if lO, lOther := len(o.Objects), len(other.Objects); lO != lOther { - return lO < lOther - } - - // Check each field - sort.Sort(FieldDiffs(o.Fields)) - sort.Sort(FieldDiffs(other.Fields)) - - for i, oV := range o.Fields { - if oV.Less(other.Fields[i]) { - return true - } - } - - // Check each object - sort.Sort(ObjectDiffs(o.Objects)) - sort.Sort(ObjectDiffs(other.Objects)) - for i, oV := range o.Objects { - if oV.Less(other.Objects[i]) { - return true - } - } - - return false -} - -// For sorting ObjectDiffs -type ObjectDiffs []*ObjectDiff - -func (o ObjectDiffs) Len() int { return len(o) } -func (o ObjectDiffs) Swap(i, j int) { o[i], o[j] = o[j], o[i] } -func (o ObjectDiffs) Less(i, j int) bool { return o[i].Less(o[j]) } - -type FieldDiff struct { - Type DiffType - Name string - Old, New string - Annotations []string -} - -// fieldDiff returns a FieldDiff if old and new are different otherwise, it -// returns nil. If contextual diff is enabled, even non-changed fields will be -// returned. -func fieldDiff(old, new, name string, contextual bool) *FieldDiff { - diff := &FieldDiff{Name: name, Type: DiffTypeNone} - if old == new { - if !contextual { - return nil - } - diff.Old, diff.New = old, new - return diff - } - - if old == "" { - diff.Type = DiffTypeAdded - diff.New = new - } else if new == "" { - diff.Type = DiffTypeDeleted - diff.Old = old - } else { - diff.Type = DiffTypeEdited - diff.Old = old - diff.New = new - } - return diff -} - -func (f *FieldDiff) GoString() string { - out := fmt.Sprintf("%q (%s): %q => %q", f.Name, f.Type, f.Old, f.New) - if len(f.Annotations) != 0 { - out += fmt.Sprintf(" (%s)", strings.Join(f.Annotations, ", ")) - } - - return out -} - -func (f *FieldDiff) Less(other *FieldDiff) bool { - if reflect.DeepEqual(f, other) { - return false - } else if other == nil { - return false - } else if f == nil { - return true - } - - if f.Name != other.Name { - return f.Name < other.Name - } else if f.Old != other.Old { - return f.Old < other.Old - } - - return f.New < other.New -} - -// For sorting FieldDiffs -type FieldDiffs []*FieldDiff - -func (f FieldDiffs) Len() int { return len(f) } -func (f FieldDiffs) Swap(i, j int) { f[i], f[j] = f[j], f[i] } -func (f FieldDiffs) Less(i, j int) bool { return f[i].Less(f[j]) } - -// fieldDiffs takes a map of field names to their values and returns a set of -// field diffs. If contextual diff is enabled, even non-changed fields will be -// returned. -func fieldDiffs(old, new map[string]string, contextual bool) []*FieldDiff { - var diffs []*FieldDiff - visited := make(map[string]struct{}) - for k, oldV := range old { - visited[k] = struct{}{} - newV := new[k] - if diff := fieldDiff(oldV, newV, k, contextual); diff != nil { - diffs = append(diffs, diff) - } - } - - for k, newV := range new { - if _, ok := visited[k]; !ok { - if diff := fieldDiff("", newV, k, contextual); diff != nil { - diffs = append(diffs, diff) - } - } - } - - sort.Sort(FieldDiffs(diffs)) - return diffs -} - -// stringSetDiff diffs two sets of strings with the given name. -func stringSetDiff(old, new []string, name string, contextual bool) *ObjectDiff { - oldMap := make(map[string]struct{}, len(old)) - newMap := make(map[string]struct{}, len(new)) - for _, o := range old { - oldMap[o] = struct{}{} - } - for _, n := range new { - newMap[n] = struct{}{} - } - if reflect.DeepEqual(oldMap, newMap) && !contextual { - return nil - } - - diff := &ObjectDiff{Name: name} - var added, removed bool - for k := range oldMap { - if _, ok := newMap[k]; !ok { - diff.Fields = append(diff.Fields, fieldDiff(k, "", name, contextual)) - removed = true - } else if contextual { - diff.Fields = append(diff.Fields, fieldDiff(k, k, name, contextual)) - } - } - - for k := range newMap { - if _, ok := oldMap[k]; !ok { - diff.Fields = append(diff.Fields, fieldDiff("", k, name, contextual)) - added = true - } - } - - sort.Sort(FieldDiffs(diff.Fields)) - - // Determine the type - if added && removed { - diff.Type = DiffTypeEdited - } else if added { - diff.Type = DiffTypeAdded - } else if removed { - diff.Type = DiffTypeDeleted - } else { - // Diff of an empty set - if len(diff.Fields) == 0 { - return nil - } - - diff.Type = DiffTypeNone - } - - return diff -} - -// primitiveObjectDiff returns a diff of the passed objects' primitive fields. -// The filter field can be used to exclude fields from the diff. The name is the -// name of the objects. If contextual is set, non-changed fields will also be -// stored in the object diff. -func primitiveObjectDiff(old, new interface{}, filter []string, name string, contextual bool) *ObjectDiff { - oldPrimitiveFlat := flatmap.Flatten(old, filter, true) - newPrimitiveFlat := flatmap.Flatten(new, filter, true) - delete(oldPrimitiveFlat, "") - delete(newPrimitiveFlat, "") - - diff := &ObjectDiff{Name: name} - diff.Fields = fieldDiffs(oldPrimitiveFlat, newPrimitiveFlat, contextual) - - var added, deleted, edited bool - for _, f := range diff.Fields { - switch f.Type { - case DiffTypeEdited: - edited = true - break - case DiffTypeDeleted: - deleted = true - case DiffTypeAdded: - added = true - } - } - - if edited || added && deleted { - diff.Type = DiffTypeEdited - } else if added { - diff.Type = DiffTypeAdded - } else if deleted { - diff.Type = DiffTypeDeleted - } else { - return nil - } - - return diff -} - -// primitiveObjectSetDiff does a set difference of the old and new sets. The -// filter parameter can be used to filter a set of primitive fields in the -// passed structs. The name corresponds to the name of the passed objects. If -// contextual diff is enabled, objects' primtive fields will be returned even if -// no diff exists. -func primitiveObjectSetDiff(old, new []interface{}, filter []string, name string, contextual bool) []*ObjectDiff { - makeSet := func(objects []interface{}) map[string]interface{} { - objMap := make(map[string]interface{}, len(objects)) - for _, obj := range objects { - hash, err := hashstructure.Hash(obj, nil) - if err != nil { - panic(err) - } - objMap[fmt.Sprintf("%d", hash)] = obj - } - - return objMap - } - - oldSet := makeSet(old) - newSet := makeSet(new) - - var diffs []*ObjectDiff - for k, v := range oldSet { - // Deleted - if _, ok := newSet[k]; !ok { - diffs = append(diffs, primitiveObjectDiff(v, nil, filter, name, contextual)) - } - } - for k, v := range newSet { - // Added - if _, ok := oldSet[k]; !ok { - diffs = append(diffs, primitiveObjectDiff(nil, v, filter, name, contextual)) - } - } - - sort.Sort(ObjectDiffs(diffs)) - return diffs -} - -// interfaceSlice is a helper method that takes a slice of typed elements and -// returns a slice of interface. This method will panic if given a non-slice -// input. -func interfaceSlice(slice interface{}) []interface{} { - s := reflect.ValueOf(slice) - if s.Kind() != reflect.Slice { - panic("InterfaceSlice() given a non-slice type") - } - - ret := make([]interface{}, s.Len()) - - for i := 0; i < s.Len(); i++ { - ret[i] = s.Index(i).Interface() - } - - return ret -} diff --git a/vendor/github.com/hashicorp/nomad/nomad/structs/funcs.go b/vendor/github.com/hashicorp/nomad/nomad/structs/funcs.go deleted file mode 100644 index 751befd65..000000000 --- a/vendor/github.com/hashicorp/nomad/nomad/structs/funcs.go +++ /dev/null @@ -1,310 +0,0 @@ -package structs - -import ( - crand "crypto/rand" - "encoding/binary" - "fmt" - "math" - "sort" - "strings" - - "golang.org/x/crypto/blake2b" - - multierror "github.com/hashicorp/go-multierror" - lru "github.com/hashicorp/golang-lru" - "github.com/hashicorp/nomad/acl" -) - -// MergeMultierrorWarnings takes job warnings and canonicalize warnings and -// merges them into a returnable string. Both the errors may be nil. -func MergeMultierrorWarnings(warnings ...error) string { - var warningMsg multierror.Error - for _, warn := range warnings { - if warn != nil { - multierror.Append(&warningMsg, warn) - } - } - - if len(warningMsg.Errors) == 0 { - return "" - } - - // Set the formatter - warningMsg.ErrorFormat = warningsFormatter - return warningMsg.Error() -} - -// warningsFormatter is used to format job warnings -func warningsFormatter(es []error) string { - points := make([]string, len(es)) - for i, err := range es { - points[i] = fmt.Sprintf("* %s", err) - } - - return fmt.Sprintf( - "%d warning(s):\n\n%s", - len(es), strings.Join(points, "\n")) -} - -// RemoveAllocs is used to remove any allocs with the given IDs -// from the list of allocations -func RemoveAllocs(alloc []*Allocation, remove []*Allocation) []*Allocation { - // Convert remove into a set - removeSet := make(map[string]struct{}) - for _, remove := range remove { - removeSet[remove.ID] = struct{}{} - } - - n := len(alloc) - for i := 0; i < n; i++ { - if _, ok := removeSet[alloc[i].ID]; ok { - alloc[i], alloc[n-1] = alloc[n-1], nil - i-- - n-- - } - } - - alloc = alloc[:n] - return alloc -} - -// FilterTerminalAllocs filters out all allocations in a terminal state and -// returns the latest terminal allocations -func FilterTerminalAllocs(allocs []*Allocation) ([]*Allocation, map[string]*Allocation) { - terminalAllocsByName := make(map[string]*Allocation) - n := len(allocs) - for i := 0; i < n; i++ { - if allocs[i].TerminalStatus() { - - // Add the allocation to the terminal allocs map if it's not already - // added or has a higher create index than the one which is - // currently present. - alloc, ok := terminalAllocsByName[allocs[i].Name] - if !ok || alloc.CreateIndex < allocs[i].CreateIndex { - terminalAllocsByName[allocs[i].Name] = allocs[i] - } - - // Remove the allocation - allocs[i], allocs[n-1] = allocs[n-1], nil - i-- - n-- - } - } - return allocs[:n], terminalAllocsByName -} - -// AllocsFit checks if a given set of allocations will fit on a node. -// The netIdx can optionally be provided if its already been computed. -// If the netIdx is provided, it is assumed that the client has already -// ensured there are no collisions. -func AllocsFit(node *Node, allocs []*Allocation, netIdx *NetworkIndex) (bool, string, *Resources, error) { - // Compute the utilization from zero - used := new(Resources) - - // Add the reserved resources of the node - if node.Reserved != nil { - if err := used.Add(node.Reserved); err != nil { - return false, "", nil, err - } - } - - // For each alloc, add the resources - for _, alloc := range allocs { - if alloc.Resources != nil { - if err := used.Add(alloc.Resources); err != nil { - return false, "", nil, err - } - } else if alloc.TaskResources != nil { - - // Adding the shared resource asks for the allocation to the used - // resources - if err := used.Add(alloc.SharedResources); err != nil { - return false, "", nil, err - } - // Allocations within the plan have the combined resources stripped - // to save space, so sum up the individual task resources. - for _, taskResource := range alloc.TaskResources { - if err := used.Add(taskResource); err != nil { - return false, "", nil, err - } - } - } else { - return false, "", nil, fmt.Errorf("allocation %q has no resources set", alloc.ID) - } - } - - // Check that the node resources are a super set of those - // that are being allocated - if superset, dimension := node.Resources.Superset(used); !superset { - return false, dimension, used, nil - } - - // Create the network index if missing - if netIdx == nil { - netIdx = NewNetworkIndex() - defer netIdx.Release() - if netIdx.SetNode(node) || netIdx.AddAllocs(allocs) { - return false, "reserved port collision", used, nil - } - } - - // Check if the network is overcommitted - if netIdx.Overcommitted() { - return false, "bandwidth exceeded", used, nil - } - - // Allocations fit! - return true, "", used, nil -} - -// ScoreFit is used to score the fit based on the Google work published here: -// http://www.columbia.edu/~cs2035/courses/ieor4405.S13/datacenter_scheduling.ppt -// This is equivalent to their BestFit v3 -func ScoreFit(node *Node, util *Resources) float64 { - // Determine the node availability - nodeCpu := float64(node.Resources.CPU) - if node.Reserved != nil { - nodeCpu -= float64(node.Reserved.CPU) - } - nodeMem := float64(node.Resources.MemoryMB) - if node.Reserved != nil { - nodeMem -= float64(node.Reserved.MemoryMB) - } - - // Compute the free percentage - freePctCpu := 1 - (float64(util.CPU) / nodeCpu) - freePctRam := 1 - (float64(util.MemoryMB) / nodeMem) - - // Total will be "maximized" the smaller the value is. - // At 100% utilization, the total is 2, while at 0% util it is 20. - total := math.Pow(10, freePctCpu) + math.Pow(10, freePctRam) - - // Invert so that the "maximized" total represents a high-value - // score. Because the floor is 20, we simply use that as an anchor. - // This means at a perfect fit, we return 18 as the score. - score := 20.0 - total - - // Bound the score, just in case - // If the score is over 18, that means we've overfit the node. - if score > 18.0 { - score = 18.0 - } else if score < 0 { - score = 0 - } - return score -} - -// GenerateUUID is used to generate a random UUID -func GenerateUUID() string { - buf := make([]byte, 16) - if _, err := crand.Read(buf); err != nil { - panic(fmt.Errorf("failed to read random bytes: %v", err)) - } - - return fmt.Sprintf("%08x-%04x-%04x-%04x-%12x", - buf[0:4], - buf[4:6], - buf[6:8], - buf[8:10], - buf[10:16]) -} - -func CopySliceConstraints(s []*Constraint) []*Constraint { - l := len(s) - if l == 0 { - return nil - } - - c := make([]*Constraint, l) - for i, v := range s { - c[i] = v.Copy() - } - return c -} - -// VaultPoliciesSet takes the structure returned by VaultPolicies and returns -// the set of required policies -func VaultPoliciesSet(policies map[string]map[string]*Vault) []string { - set := make(map[string]struct{}) - - for _, tgp := range policies { - for _, tp := range tgp { - for _, p := range tp.Policies { - set[p] = struct{}{} - } - } - } - - flattened := make([]string, 0, len(set)) - for p := range set { - flattened = append(flattened, p) - } - return flattened -} - -// DenormalizeAllocationJobs is used to attach a job to all allocations that are -// non-terminal and do not have a job already. This is useful in cases where the -// job is normalized. -func DenormalizeAllocationJobs(job *Job, allocs []*Allocation) { - if job != nil { - for _, alloc := range allocs { - if alloc.Job == nil && !alloc.TerminalStatus() { - alloc.Job = job - } - } - } -} - -// AllocName returns the name of the allocation given the input. -func AllocName(job, group string, idx uint) string { - return fmt.Sprintf("%s.%s[%d]", job, group, idx) -} - -// ACLPolicyListHash returns a consistent hash for a set of policies. -func ACLPolicyListHash(policies []*ACLPolicy) string { - cacheKeyHash, err := blake2b.New256(nil) - if err != nil { - panic(err) - } - for _, policy := range policies { - cacheKeyHash.Write([]byte(policy.Name)) - binary.Write(cacheKeyHash, binary.BigEndian, policy.ModifyIndex) - } - cacheKey := string(cacheKeyHash.Sum(nil)) - return cacheKey -} - -// CompileACLObject compiles a set of ACL policies into an ACL object with a cache -func CompileACLObject(cache *lru.TwoQueueCache, policies []*ACLPolicy) (*acl.ACL, error) { - // Sort the policies to ensure consistent ordering - sort.Slice(policies, func(i, j int) bool { - return policies[i].Name < policies[j].Name - }) - - // Determine the cache key - cacheKey := ACLPolicyListHash(policies) - aclRaw, ok := cache.Get(cacheKey) - if ok { - return aclRaw.(*acl.ACL), nil - } - - // Parse the policies - parsed := make([]*acl.Policy, 0, len(policies)) - for _, policy := range policies { - p, err := acl.Parse(policy.Rules) - if err != nil { - return nil, fmt.Errorf("failed to parse %q: %v", policy.Name, err) - } - parsed = append(parsed, p) - } - - // Create the ACL object - aclObj, err := acl.NewACL(false, parsed) - if err != nil { - return nil, fmt.Errorf("failed to construct ACL: %v", err) - } - - // Update the cache - cache.Add(cacheKey, aclObj) - return aclObj, nil -} diff --git a/vendor/github.com/hashicorp/nomad/nomad/structs/network.go b/vendor/github.com/hashicorp/nomad/nomad/structs/network.go deleted file mode 100644 index 3f0ebff4f..000000000 --- a/vendor/github.com/hashicorp/nomad/nomad/structs/network.go +++ /dev/null @@ -1,326 +0,0 @@ -package structs - -import ( - "fmt" - "math/rand" - "net" - "sync" -) - -const ( - // MinDynamicPort is the smallest dynamic port generated - MinDynamicPort = 20000 - - // MaxDynamicPort is the largest dynamic port generated - MaxDynamicPort = 32000 - - // maxRandPortAttempts is the maximum number of attempt - // to assign a random port - maxRandPortAttempts = 20 - - // maxValidPort is the max valid port number - maxValidPort = 65536 -) - -var ( - // bitmapPool is used to pool the bitmaps used for port collision - // checking. They are fairly large (8K) so we can re-use them to - // avoid GC pressure. Care should be taken to call Clear() on any - // bitmap coming from the pool. - bitmapPool = new(sync.Pool) -) - -// NetworkIndex is used to index the available network resources -// and the used network resources on a machine given allocations -type NetworkIndex struct { - AvailNetworks []*NetworkResource // List of available networks - AvailBandwidth map[string]int // Bandwidth by device - UsedPorts map[string]Bitmap // Ports by IP - UsedBandwidth map[string]int // Bandwidth by device -} - -// NewNetworkIndex is used to construct a new network index -func NewNetworkIndex() *NetworkIndex { - return &NetworkIndex{ - AvailBandwidth: make(map[string]int), - UsedPorts: make(map[string]Bitmap), - UsedBandwidth: make(map[string]int), - } -} - -// Release is called when the network index is no longer needed -// to attempt to re-use some of the memory it has allocated -func (idx *NetworkIndex) Release() { - for _, b := range idx.UsedPorts { - bitmapPool.Put(b) - } -} - -// Overcommitted checks if the network is overcommitted -func (idx *NetworkIndex) Overcommitted() bool { - for device, used := range idx.UsedBandwidth { - avail := idx.AvailBandwidth[device] - if used > avail { - return true - } - } - return false -} - -// SetNode is used to setup the available network resources. Returns -// true if there is a collision -func (idx *NetworkIndex) SetNode(node *Node) (collide bool) { - // Add the available CIDR blocks - for _, n := range node.Resources.Networks { - if n.Device != "" { - idx.AvailNetworks = append(idx.AvailNetworks, n) - idx.AvailBandwidth[n.Device] = n.MBits - } - } - - // Add the reserved resources - if r := node.Reserved; r != nil { - for _, n := range r.Networks { - if idx.AddReserved(n) { - collide = true - } - } - } - return -} - -// AddAllocs is used to add the used network resources. Returns -// true if there is a collision -func (idx *NetworkIndex) AddAllocs(allocs []*Allocation) (collide bool) { - for _, alloc := range allocs { - for _, task := range alloc.TaskResources { - if len(task.Networks) == 0 { - continue - } - n := task.Networks[0] - if idx.AddReserved(n) { - collide = true - } - } - } - return -} - -// AddReserved is used to add a reserved network usage, returns true -// if there is a port collision -func (idx *NetworkIndex) AddReserved(n *NetworkResource) (collide bool) { - // Add the port usage - used := idx.UsedPorts[n.IP] - if used == nil { - // Try to get a bitmap from the pool, else create - raw := bitmapPool.Get() - if raw != nil { - used = raw.(Bitmap) - used.Clear() - } else { - used, _ = NewBitmap(maxValidPort) - } - idx.UsedPorts[n.IP] = used - } - - for _, ports := range [][]Port{n.ReservedPorts, n.DynamicPorts} { - for _, port := range ports { - // Guard against invalid port - if port.Value < 0 || port.Value >= maxValidPort { - return true - } - if used.Check(uint(port.Value)) { - collide = true - } else { - used.Set(uint(port.Value)) - } - } - } - - // Add the bandwidth - idx.UsedBandwidth[n.Device] += n.MBits - return -} - -// yieldIP is used to iteratively invoke the callback with -// an available IP -func (idx *NetworkIndex) yieldIP(cb func(net *NetworkResource, ip net.IP) bool) { - inc := func(ip net.IP) { - for j := len(ip) - 1; j >= 0; j-- { - ip[j]++ - if ip[j] > 0 { - break - } - } - } - - for _, n := range idx.AvailNetworks { - ip, ipnet, err := net.ParseCIDR(n.CIDR) - if err != nil { - continue - } - for ip := ip.Mask(ipnet.Mask); ipnet.Contains(ip); inc(ip) { - if cb(n, ip) { - return - } - } - } -} - -// AssignNetwork is used to assign network resources given an ask. -// If the ask cannot be satisfied, returns nil -func (idx *NetworkIndex) AssignNetwork(ask *NetworkResource) (out *NetworkResource, err error) { - err = fmt.Errorf("no networks available") - idx.yieldIP(func(n *NetworkResource, ip net.IP) (stop bool) { - // Convert the IP to a string - ipStr := ip.String() - - // Check if we would exceed the bandwidth cap - availBandwidth := idx.AvailBandwidth[n.Device] - usedBandwidth := idx.UsedBandwidth[n.Device] - if usedBandwidth+ask.MBits > availBandwidth { - err = fmt.Errorf("bandwidth exceeded") - return - } - - used := idx.UsedPorts[ipStr] - - // Check if any of the reserved ports are in use - for _, port := range ask.ReservedPorts { - // Guard against invalid port - if port.Value < 0 || port.Value >= maxValidPort { - err = fmt.Errorf("invalid port %d (out of range)", port.Value) - return - } - - // Check if in use - if used != nil && used.Check(uint(port.Value)) { - err = fmt.Errorf("reserved port collision") - return - } - } - - // Create the offer - offer := &NetworkResource{ - Device: n.Device, - IP: ipStr, - MBits: ask.MBits, - ReservedPorts: ask.ReservedPorts, - DynamicPorts: ask.DynamicPorts, - } - - // Try to stochastically pick the dynamic ports as it is faster and - // lower memory usage. - var dynPorts []int - var dynErr error - dynPorts, dynErr = getDynamicPortsStochastic(used, ask) - if dynErr == nil { - goto BUILD_OFFER - } - - // Fall back to the precise method if the random sampling failed. - dynPorts, dynErr = getDynamicPortsPrecise(used, ask) - if dynErr != nil { - err = dynErr - return - } - - BUILD_OFFER: - for i, port := range dynPorts { - offer.DynamicPorts[i].Value = port - } - - // Stop, we have an offer! - out = offer - err = nil - return true - }) - return -} - -// getDynamicPortsPrecise takes the nodes used port bitmap which may be nil if -// no ports have been allocated yet, the network ask and returns a set of unused -// ports to fullfil the ask's DynamicPorts or an error if it failed. An error -// means the ask can not be satisfied as the method does a precise search. -func getDynamicPortsPrecise(nodeUsed Bitmap, ask *NetworkResource) ([]int, error) { - // Create a copy of the used ports and apply the new reserves - var usedSet Bitmap - var err error - if nodeUsed != nil { - usedSet, err = nodeUsed.Copy() - if err != nil { - return nil, err - } - } else { - usedSet, err = NewBitmap(maxValidPort) - if err != nil { - return nil, err - } - } - - for _, port := range ask.ReservedPorts { - usedSet.Set(uint(port.Value)) - } - - // Get the indexes of the unset - availablePorts := usedSet.IndexesInRange(false, MinDynamicPort, MaxDynamicPort) - - // Randomize the amount we need - numDyn := len(ask.DynamicPorts) - if len(availablePorts) < numDyn { - return nil, fmt.Errorf("dynamic port selection failed") - } - - numAvailable := len(availablePorts) - for i := 0; i < numDyn; i++ { - j := rand.Intn(numAvailable) - availablePorts[i], availablePorts[j] = availablePorts[j], availablePorts[i] - } - - return availablePorts[:numDyn], nil -} - -// getDynamicPortsStochastic takes the nodes used port bitmap which may be nil if -// no ports have been allocated yet, the network ask and returns a set of unused -// ports to fullfil the ask's DynamicPorts or an error if it failed. An error -// does not mean the ask can not be satisfied as the method has a fixed amount -// of random probes and if these fail, the search is aborted. -func getDynamicPortsStochastic(nodeUsed Bitmap, ask *NetworkResource) ([]int, error) { - var reserved, dynamic []int - for _, port := range ask.ReservedPorts { - reserved = append(reserved, port.Value) - } - - for i := 0; i < len(ask.DynamicPorts); i++ { - attempts := 0 - PICK: - attempts++ - if attempts > maxRandPortAttempts { - return nil, fmt.Errorf("stochastic dynamic port selection failed") - } - - randPort := MinDynamicPort + rand.Intn(MaxDynamicPort-MinDynamicPort) - if nodeUsed != nil && nodeUsed.Check(uint(randPort)) { - goto PICK - } - - for _, ports := range [][]int{reserved, dynamic} { - if isPortReserved(ports, randPort) { - goto PICK - } - } - dynamic = append(dynamic, randPort) - } - - return dynamic, nil -} - -// IntContains scans an integer slice for a value -func isPortReserved(haystack []int, needle int) bool { - for _, item := range haystack { - if item == needle { - return true - } - } - return false -} diff --git a/vendor/github.com/hashicorp/nomad/nomad/structs/node_class.go b/vendor/github.com/hashicorp/nomad/nomad/structs/node_class.go deleted file mode 100644 index aab070055..000000000 --- a/vendor/github.com/hashicorp/nomad/nomad/structs/node_class.go +++ /dev/null @@ -1,94 +0,0 @@ -package structs - -import ( - "fmt" - "strings" - - "github.com/mitchellh/hashstructure" -) - -const ( - // NodeUniqueNamespace is a prefix that can be appended to node meta or - // attribute keys to mark them for exclusion in computed node class. - NodeUniqueNamespace = "unique." -) - -// UniqueNamespace takes a key and returns the key marked under the unique -// namespace. -func UniqueNamespace(key string) string { - return fmt.Sprintf("%s%s", NodeUniqueNamespace, key) -} - -// IsUniqueNamespace returns whether the key is under the unique namespace. -func IsUniqueNamespace(key string) bool { - return strings.HasPrefix(key, NodeUniqueNamespace) -} - -// ComputeClass computes a derived class for the node based on its attributes. -// ComputedClass is a unique id that identifies nodes with a common set of -// attributes and capabilities. Thus, when calculating a node's computed class -// we avoid including any uniquely identifing fields. -func (n *Node) ComputeClass() error { - hash, err := hashstructure.Hash(n, nil) - if err != nil { - return err - } - - n.ComputedClass = fmt.Sprintf("v1:%d", hash) - return nil -} - -// HashInclude is used to blacklist uniquely identifying node fields from being -// included in the computed node class. -func (n Node) HashInclude(field string, v interface{}) (bool, error) { - switch field { - case "Datacenter", "Attributes", "Meta", "NodeClass": - return true, nil - default: - return false, nil - } -} - -// HashIncludeMap is used to blacklist uniquely identifying node map keys from being -// included in the computed node class. -func (n Node) HashIncludeMap(field string, k, v interface{}) (bool, error) { - key, ok := k.(string) - if !ok { - return false, fmt.Errorf("map key %v not a string", k) - } - - switch field { - case "Meta", "Attributes": - return !IsUniqueNamespace(key), nil - default: - return false, fmt.Errorf("unexpected map field: %v", field) - } -} - -// EscapedConstraints takes a set of constraints and returns the set that -// escapes computed node classes. -func EscapedConstraints(constraints []*Constraint) []*Constraint { - var escaped []*Constraint - for _, c := range constraints { - if constraintTargetEscapes(c.LTarget) || constraintTargetEscapes(c.RTarget) { - escaped = append(escaped, c) - } - } - - return escaped -} - -// constraintTargetEscapes returns whether the target of a constraint escapes -// computed node class optimization. -func constraintTargetEscapes(target string) bool { - switch { - case strings.HasPrefix(target, "${node.unique."): - return true - case strings.HasPrefix(target, "${attr.unique."): - return true - case strings.HasPrefix(target, "${meta.unique."): - return true - default: - return false - } -} diff --git a/vendor/github.com/hashicorp/nomad/nomad/structs/operator.go b/vendor/github.com/hashicorp/nomad/nomad/structs/operator.go deleted file mode 100644 index 93b99f6fb..000000000 --- a/vendor/github.com/hashicorp/nomad/nomad/structs/operator.go +++ /dev/null @@ -1,49 +0,0 @@ -package structs - -import ( - "github.com/hashicorp/raft" -) - -// RaftServer has information about a server in the Raft configuration. -type RaftServer struct { - // ID is the unique ID for the server. These are currently the same - // as the address, but they will be changed to a real GUID in a future - // release of Nomad. - ID raft.ServerID - - // Node is the node name of the server, as known by Nomad, or this - // will be set to "(unknown)" otherwise. - Node string - - // Address is the IP:port of the server, used for Raft communications. - Address raft.ServerAddress - - // Leader is true if this server is the current cluster leader. - Leader bool - - // Voter is true if this server has a vote in the cluster. This might - // be false if the server is staging and still coming online, or if - // it's a non-voting server, which will be added in a future release of - // Nomad. - Voter bool -} - -// RaftConfigrationResponse is returned when querying for the current Raft -// configuration. -type RaftConfigurationResponse struct { - // Servers has the list of servers in the Raft configuration. - Servers []*RaftServer - - // Index has the Raft index of this configuration. - Index uint64 -} - -// RaftPeerByAddressRequest is used by the Operator endpoint to apply a Raft -// operation on a specific Raft peer by address in the form of "IP:port". -type RaftPeerByAddressRequest struct { - // Address is the peer to remove, in the form "IP:port". - Address raft.ServerAddress - - // WriteRequest holds the Region for this request. - WriteRequest -} diff --git a/vendor/github.com/hashicorp/nomad/nomad/structs/structs.go b/vendor/github.com/hashicorp/nomad/nomad/structs/structs.go deleted file mode 100644 index 1ca19e35d..000000000 --- a/vendor/github.com/hashicorp/nomad/nomad/structs/structs.go +++ /dev/null @@ -1,5783 +0,0 @@ -package structs - -import ( - "bytes" - "crypto/md5" - "crypto/sha1" - "crypto/sha256" - "crypto/sha512" - "encoding/hex" - "errors" - "fmt" - "io" - "net" - "os" - "path/filepath" - "reflect" - "regexp" - "sort" - "strconv" - "strings" - "time" - - "golang.org/x/crypto/blake2b" - - "github.com/gorhill/cronexpr" - "github.com/hashicorp/consul/api" - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/go-version" - "github.com/hashicorp/nomad/acl" - "github.com/hashicorp/nomad/helper" - "github.com/hashicorp/nomad/helper/args" - "github.com/mitchellh/copystructure" - "github.com/ugorji/go/codec" - - hcodec "github.com/hashicorp/go-msgpack/codec" -) - -var ( - ErrNoLeader = fmt.Errorf("No cluster leader") - ErrNoRegionPath = fmt.Errorf("No path to region") - ErrTokenNotFound = errors.New("ACL token not found") - ErrPermissionDenied = errors.New("Permission denied") - - // validPolicyName is used to validate a policy name - validPolicyName = regexp.MustCompile("^[a-zA-Z0-9-]{1,128}$") -) - -type MessageType uint8 - -const ( - NodeRegisterRequestType MessageType = iota - NodeDeregisterRequestType - NodeUpdateStatusRequestType - NodeUpdateDrainRequestType - JobRegisterRequestType - JobDeregisterRequestType - EvalUpdateRequestType - EvalDeleteRequestType - AllocUpdateRequestType - AllocClientUpdateRequestType - ReconcileJobSummariesRequestType - VaultAccessorRegisterRequestType - VaultAccessorDegisterRequestType - ApplyPlanResultsRequestType - DeploymentStatusUpdateRequestType - DeploymentPromoteRequestType - DeploymentAllocHealthRequestType - DeploymentDeleteRequestType - JobStabilityRequestType - ACLPolicyUpsertRequestType - ACLPolicyDeleteRequestType - ACLTokenUpsertRequestType - ACLTokenDeleteRequestType - ACLTokenBootstrapRequestType -) - -const ( - // IgnoreUnknownTypeFlag is set along with a MessageType - // to indicate that the message type can be safely ignored - // if it is not recognized. This is for future proofing, so - // that new commands can be added in a way that won't cause - // old servers to crash when the FSM attempts to process them. - IgnoreUnknownTypeFlag MessageType = 128 - - // ApiMajorVersion is returned as part of the Status.Version request. - // It should be incremented anytime the APIs are changed in a way - // that would break clients for sane client versioning. - ApiMajorVersion = 1 - - // ApiMinorVersion is returned as part of the Status.Version request. - // It should be incremented anytime the APIs are changed to allow - // for sane client versioning. Minor changes should be compatible - // within the major version. - ApiMinorVersion = 1 - - ProtocolVersion = "protocol" - APIMajorVersion = "api.major" - APIMinorVersion = "api.minor" - - GetterModeAny = "any" - GetterModeFile = "file" - GetterModeDir = "dir" - - // maxPolicyDescriptionLength limits a policy description length - maxPolicyDescriptionLength = 256 - - // maxTokenNameLength limits a ACL token name length - maxTokenNameLength = 64 - - // ACLClientToken and ACLManagementToken are the only types of tokens - ACLClientToken = "client" - ACLManagementToken = "management" - - // DefaultNamespace is the default namespace. - DefaultNamespace = "default" - DefaultNamespaceDescription = "Default shared namespace" -) - -// Context defines the scope in which a search for Nomad object operates, and -// is also used to query the matching index value for this context -type Context string - -const ( - Allocs Context = "allocs" - Deployments Context = "deployment" - Evals Context = "evals" - Jobs Context = "jobs" - Nodes Context = "nodes" - Namespaces Context = "namespaces" - All Context = "all" -) - -// NamespacedID is a tuple of an ID and a namespace -type NamespacedID struct { - ID string - Namespace string -} - -// RPCInfo is used to describe common information about query -type RPCInfo interface { - RequestRegion() string - IsRead() bool - AllowStaleRead() bool -} - -// QueryOptions is used to specify various flags for read queries -type QueryOptions struct { - // The target region for this query - Region string - - // Namespace is the target namespace for the query. - Namespace string - - // If set, wait until query exceeds given index. Must be provided - // with MaxQueryTime. - MinQueryIndex uint64 - - // Provided with MinQueryIndex to wait for change. - MaxQueryTime time.Duration - - // If set, any follower can service the request. Results - // may be arbitrarily stale. - AllowStale bool - - // If set, used as prefix for resource list searches - Prefix string - - // SecretID is secret portion of the ACL token used for the request - SecretID string -} - -func (q QueryOptions) RequestRegion() string { - return q.Region -} - -func (q QueryOptions) RequestNamespace() string { - if q.Namespace == "" { - return DefaultNamespace - } - return q.Namespace -} - -// QueryOption only applies to reads, so always true -func (q QueryOptions) IsRead() bool { - return true -} - -func (q QueryOptions) AllowStaleRead() bool { - return q.AllowStale -} - -type WriteRequest struct { - // The target region for this write - Region string - - // Namespace is the target namespace for the write. - Namespace string - - // SecretID is secret portion of the ACL token used for the request - SecretID string -} - -func (w WriteRequest) RequestRegion() string { - // The target region for this request - return w.Region -} - -func (w WriteRequest) RequestNamespace() string { - if w.Namespace == "" { - return DefaultNamespace - } - return w.Namespace -} - -// WriteRequest only applies to writes, always false -func (w WriteRequest) IsRead() bool { - return false -} - -func (w WriteRequest) AllowStaleRead() bool { - return false -} - -// QueryMeta allows a query response to include potentially -// useful metadata about a query -type QueryMeta struct { - // This is the index associated with the read - Index uint64 - - // If AllowStale is used, this is time elapsed since - // last contact between the follower and leader. This - // can be used to gauge staleness. - LastContact time.Duration - - // Used to indicate if there is a known leader node - KnownLeader bool -} - -// WriteMeta allows a write response to include potentially -// useful metadata about the write -type WriteMeta struct { - // This is the index associated with the write - Index uint64 -} - -// NodeRegisterRequest is used for Node.Register endpoint -// to register a node as being a schedulable entity. -type NodeRegisterRequest struct { - Node *Node - WriteRequest -} - -// NodeDeregisterRequest is used for Node.Deregister endpoint -// to deregister a node as being a schedulable entity. -type NodeDeregisterRequest struct { - NodeID string - WriteRequest -} - -// NodeServerInfo is used to in NodeUpdateResponse to return Nomad server -// information used in RPC server lists. -type NodeServerInfo struct { - // RPCAdvertiseAddr is the IP endpoint that a Nomad Server wishes to - // be contacted at for RPCs. - RPCAdvertiseAddr string - - // RpcMajorVersion is the major version number the Nomad Server - // supports - RPCMajorVersion int32 - - // RpcMinorVersion is the minor version number the Nomad Server - // supports - RPCMinorVersion int32 - - // Datacenter is the datacenter that a Nomad server belongs to - Datacenter string -} - -// NodeUpdateStatusRequest is used for Node.UpdateStatus endpoint -// to update the status of a node. -type NodeUpdateStatusRequest struct { - NodeID string - Status string - WriteRequest -} - -// NodeUpdateDrainRequest is used for updatin the drain status -type NodeUpdateDrainRequest struct { - NodeID string - Drain bool - WriteRequest -} - -// NodeEvaluateRequest is used to re-evaluate the ndoe -type NodeEvaluateRequest struct { - NodeID string - WriteRequest -} - -// NodeSpecificRequest is used when we just need to specify a target node -type NodeSpecificRequest struct { - NodeID string - SecretID string - QueryOptions -} - -// SearchResponse is used to return matches and information about whether -// the match list is truncated specific to each type of context. -type SearchResponse struct { - // Map of context types to ids which match a specified prefix - Matches map[Context][]string - - // Truncations indicates whether the matches for a particular context have - // been truncated - Truncations map[Context]bool - - QueryMeta -} - -// SearchRequest is used to parameterize a request, and returns a -// list of matches made up of jobs, allocations, evaluations, and/or nodes, -// along with whether or not the information returned is truncated. -type SearchRequest struct { - // Prefix is what ids are matched to. I.e, if the given prefix were - // "a", potential matches might be "abcd" or "aabb" - Prefix string - - // Context is the type that can be matched against. A context can be a job, - // node, evaluation, allocation, or empty (indicated every context should be - // matched) - Context Context - - QueryOptions -} - -// JobRegisterRequest is used for Job.Register endpoint -// to register a job as being a schedulable entity. -type JobRegisterRequest struct { - Job *Job - - // If EnforceIndex is set then the job will only be registered if the passed - // JobModifyIndex matches the current Jobs index. If the index is zero, the - // register only occurs if the job is new. - EnforceIndex bool - JobModifyIndex uint64 - - // PolicyOverride is set when the user is attempting to override any policies - PolicyOverride bool - - WriteRequest -} - -// JobDeregisterRequest is used for Job.Deregister endpoint -// to deregister a job as being a schedulable entity. -type JobDeregisterRequest struct { - JobID string - - // Purge controls whether the deregister purges the job from the system or - // whether the job is just marked as stopped and will be removed by the - // garbage collector - Purge bool - - WriteRequest -} - -// JobEvaluateRequest is used when we just need to re-evaluate a target job -type JobEvaluateRequest struct { - JobID string - WriteRequest -} - -// JobSpecificRequest is used when we just need to specify a target job -type JobSpecificRequest struct { - JobID string - AllAllocs bool - QueryOptions -} - -// JobListRequest is used to parameterize a list request -type JobListRequest struct { - QueryOptions -} - -// JobPlanRequest is used for the Job.Plan endpoint to trigger a dry-run -// evaluation of the Job. -type JobPlanRequest struct { - Job *Job - Diff bool // Toggles an annotated diff - // PolicyOverride is set when the user is attempting to override any policies - PolicyOverride bool - WriteRequest -} - -// JobSummaryRequest is used when we just need to get a specific job summary -type JobSummaryRequest struct { - JobID string - QueryOptions -} - -// JobDispatchRequest is used to dispatch a job based on a parameterized job -type JobDispatchRequest struct { - JobID string - Payload []byte - Meta map[string]string - WriteRequest -} - -// JobValidateRequest is used to validate a job -type JobValidateRequest struct { - Job *Job - WriteRequest -} - -// JobRevertRequest is used to revert a job to a prior version. -type JobRevertRequest struct { - // JobID is the ID of the job being reverted - JobID string - - // JobVersion the version to revert to. - JobVersion uint64 - - // EnforcePriorVersion if set will enforce that the job is at the given - // version before reverting. - EnforcePriorVersion *uint64 - - WriteRequest -} - -// JobStabilityRequest is used to marked a job as stable. -type JobStabilityRequest struct { - // Job to set the stability on - JobID string - JobVersion uint64 - - // Set the stability - Stable bool - WriteRequest -} - -// JobStabilityResponse is the response when marking a job as stable. -type JobStabilityResponse struct { - WriteMeta -} - -// NodeListRequest is used to parameterize a list request -type NodeListRequest struct { - QueryOptions -} - -// EvalUpdateRequest is used for upserting evaluations. -type EvalUpdateRequest struct { - Evals []*Evaluation - EvalToken string - WriteRequest -} - -// EvalDeleteRequest is used for deleting an evaluation. -type EvalDeleteRequest struct { - Evals []string - Allocs []string - WriteRequest -} - -// EvalSpecificRequest is used when we just need to specify a target evaluation -type EvalSpecificRequest struct { - EvalID string - QueryOptions -} - -// EvalAckRequest is used to Ack/Nack a specific evaluation -type EvalAckRequest struct { - EvalID string - Token string - WriteRequest -} - -// EvalDequeueRequest is used when we want to dequeue an evaluation -type EvalDequeueRequest struct { - Schedulers []string - Timeout time.Duration - SchedulerVersion uint16 - WriteRequest -} - -// EvalListRequest is used to list the evaluations -type EvalListRequest struct { - QueryOptions -} - -// PlanRequest is used to submit an allocation plan to the leader -type PlanRequest struct { - Plan *Plan - WriteRequest -} - -// ApplyPlanResultsRequest is used by the planner to apply a Raft transaction -// committing the result of a plan. -type ApplyPlanResultsRequest struct { - // AllocUpdateRequest holds the allocation updates to be made by the - // scheduler. - AllocUpdateRequest - - // Deployment is the deployment created or updated as a result of a - // scheduling event. - Deployment *Deployment - - // DeploymentUpdates is a set of status updates to apply to the given - // deployments. This allows the scheduler to cancel any unneeded deployment - // because the job is stopped or the update block is removed. - DeploymentUpdates []*DeploymentStatusUpdate -} - -// AllocUpdateRequest is used to submit changes to allocations, either -// to cause evictions or to assign new allocaitons. Both can be done -// within a single transaction -type AllocUpdateRequest struct { - // Alloc is the list of new allocations to assign - Alloc []*Allocation - - // Job is the shared parent job of the allocations. - // It is pulled out since it is common to reduce payload size. - Job *Job - - WriteRequest -} - -// AllocListRequest is used to request a list of allocations -type AllocListRequest struct { - QueryOptions -} - -// AllocSpecificRequest is used to query a specific allocation -type AllocSpecificRequest struct { - AllocID string - QueryOptions -} - -// AllocsGetRequest is used to query a set of allocations -type AllocsGetRequest struct { - AllocIDs []string - QueryOptions -} - -// PeriodicForceReqeuest is used to force a specific periodic job. -type PeriodicForceRequest struct { - JobID string - WriteRequest -} - -// ServerMembersResponse has the list of servers in a cluster -type ServerMembersResponse struct { - ServerName string - ServerRegion string - ServerDC string - Members []*ServerMember -} - -// ServerMember holds information about a Nomad server agent in a cluster -type ServerMember struct { - Name string - Addr net.IP - Port uint16 - Tags map[string]string - Status string - ProtocolMin uint8 - ProtocolMax uint8 - ProtocolCur uint8 - DelegateMin uint8 - DelegateMax uint8 - DelegateCur uint8 -} - -// DeriveVaultTokenRequest is used to request wrapped Vault tokens for the -// following tasks in the given allocation -type DeriveVaultTokenRequest struct { - NodeID string - SecretID string - AllocID string - Tasks []string - QueryOptions -} - -// VaultAccessorsRequest is used to operate on a set of Vault accessors -type VaultAccessorsRequest struct { - Accessors []*VaultAccessor -} - -// VaultAccessor is a reference to a created Vault token on behalf of -// an allocation's task. -type VaultAccessor struct { - AllocID string - Task string - NodeID string - Accessor string - CreationTTL int - - // Raft Indexes - CreateIndex uint64 -} - -// DeriveVaultTokenResponse returns the wrapped tokens for each requested task -type DeriveVaultTokenResponse struct { - // Tasks is a mapping between the task name and the wrapped token - Tasks map[string]string - - // Error stores any error that occurred. Errors are stored here so we can - // communicate whether it is retriable - Error *RecoverableError - - QueryMeta -} - -// GenericRequest is used to request where no -// specific information is needed. -type GenericRequest struct { - QueryOptions -} - -// DeploymentListRequest is used to list the deployments -type DeploymentListRequest struct { - QueryOptions -} - -// DeploymentDeleteRequest is used for deleting deployments. -type DeploymentDeleteRequest struct { - Deployments []string - WriteRequest -} - -// DeploymentStatusUpdateRequest is used to update the status of a deployment as -// well as optionally creating an evaluation atomically. -type DeploymentStatusUpdateRequest struct { - // Eval, if set, is used to create an evaluation at the same time as - // updating the status of a deployment. - Eval *Evaluation - - // DeploymentUpdate is a status update to apply to the given - // deployment. - DeploymentUpdate *DeploymentStatusUpdate - - // Job is used to optionally upsert a job. This is used when setting the - // allocation health results in a deployment failure and the deployment - // auto-reverts to the latest stable job. - Job *Job -} - -// DeploymentAllocHealthRequest is used to set the health of a set of -// allocations as part of a deployment. -type DeploymentAllocHealthRequest struct { - DeploymentID string - - // Marks these allocations as healthy, allow further allocations - // to be rolled. - HealthyAllocationIDs []string - - // Any unhealthy allocations fail the deployment - UnhealthyAllocationIDs []string - - WriteRequest -} - -// ApplyDeploymentAllocHealthRequest is used to apply an alloc health request via Raft -type ApplyDeploymentAllocHealthRequest struct { - DeploymentAllocHealthRequest - - // An optional field to update the status of a deployment - DeploymentUpdate *DeploymentStatusUpdate - - // Job is used to optionally upsert a job. This is used when setting the - // allocation health results in a deployment failure and the deployment - // auto-reverts to the latest stable job. - Job *Job - - // An optional evaluation to create after promoting the canaries - Eval *Evaluation -} - -// DeploymentPromoteRequest is used to promote task groups in a deployment -type DeploymentPromoteRequest struct { - DeploymentID string - - // All is to promote all task groups - All bool - - // Groups is used to set the promotion status per task group - Groups []string - - WriteRequest -} - -// ApplyDeploymentPromoteRequest is used to apply a promotion request via Raft -type ApplyDeploymentPromoteRequest struct { - DeploymentPromoteRequest - - // An optional evaluation to create after promoting the canaries - Eval *Evaluation -} - -// DeploymentPauseRequest is used to pause a deployment -type DeploymentPauseRequest struct { - DeploymentID string - - // Pause sets the pause status - Pause bool - - WriteRequest -} - -// DeploymentSpecificRequest is used to make a request specific to a particular -// deployment -type DeploymentSpecificRequest struct { - DeploymentID string - QueryOptions -} - -// DeploymentFailRequest is used to fail a particular deployment -type DeploymentFailRequest struct { - DeploymentID string - WriteRequest -} - -// SingleDeploymentResponse is used to respond with a single deployment -type SingleDeploymentResponse struct { - Deployment *Deployment - QueryMeta -} - -// GenericResponse is used to respond to a request where no -// specific response information is needed. -type GenericResponse struct { - WriteMeta -} - -// VersionResponse is used for the Status.Version reseponse -type VersionResponse struct { - Build string - Versions map[string]int - QueryMeta -} - -// JobRegisterResponse is used to respond to a job registration -type JobRegisterResponse struct { - EvalID string - EvalCreateIndex uint64 - JobModifyIndex uint64 - - // Warnings contains any warnings about the given job. These may include - // deprecation warnings. - Warnings string - - QueryMeta -} - -// JobDeregisterResponse is used to respond to a job deregistration -type JobDeregisterResponse struct { - EvalID string - EvalCreateIndex uint64 - JobModifyIndex uint64 - QueryMeta -} - -// JobValidateResponse is the response from validate request -type JobValidateResponse struct { - // DriverConfigValidated indicates whether the agent validated the driver - // config - DriverConfigValidated bool - - // ValidationErrors is a list of validation errors - ValidationErrors []string - - // Error is a string version of any error that may have occurred - Error string - - // Warnings contains any warnings about the given job. These may include - // deprecation warnings. - Warnings string -} - -// NodeUpdateResponse is used to respond to a node update -type NodeUpdateResponse struct { - HeartbeatTTL time.Duration - EvalIDs []string - EvalCreateIndex uint64 - NodeModifyIndex uint64 - - // LeaderRPCAddr is the RPC address of the current Raft Leader. If - // empty, the current Nomad Server is in the minority of a partition. - LeaderRPCAddr string - - // NumNodes is the number of Nomad nodes attached to this quorum of - // Nomad Servers at the time of the response. This value can - // fluctuate based on the health of the cluster between heartbeats. - NumNodes int32 - - // Servers is the full list of known Nomad servers in the local - // region. - Servers []*NodeServerInfo - - QueryMeta -} - -// NodeDrainUpdateResponse is used to respond to a node drain update -type NodeDrainUpdateResponse struct { - EvalIDs []string - EvalCreateIndex uint64 - NodeModifyIndex uint64 - QueryMeta -} - -// NodeAllocsResponse is used to return allocs for a single node -type NodeAllocsResponse struct { - Allocs []*Allocation - QueryMeta -} - -// NodeClientAllocsResponse is used to return allocs meta data for a single node -type NodeClientAllocsResponse struct { - Allocs map[string]uint64 - QueryMeta -} - -// SingleNodeResponse is used to return a single node -type SingleNodeResponse struct { - Node *Node - QueryMeta -} - -// NodeListResponse is used for a list request -type NodeListResponse struct { - Nodes []*NodeListStub - QueryMeta -} - -// SingleJobResponse is used to return a single job -type SingleJobResponse struct { - Job *Job - QueryMeta -} - -// JobSummaryResponse is used to return a single job summary -type JobSummaryResponse struct { - JobSummary *JobSummary - QueryMeta -} - -type JobDispatchResponse struct { - DispatchedJobID string - EvalID string - EvalCreateIndex uint64 - JobCreateIndex uint64 - WriteMeta -} - -// JobListResponse is used for a list request -type JobListResponse struct { - Jobs []*JobListStub - QueryMeta -} - -// JobVersionsRequest is used to get a jobs versions -type JobVersionsRequest struct { - JobID string - Diffs bool - QueryOptions -} - -// JobVersionsResponse is used for a job get versions request -type JobVersionsResponse struct { - Versions []*Job - Diffs []*JobDiff - QueryMeta -} - -// JobPlanResponse is used to respond to a job plan request -type JobPlanResponse struct { - // Annotations stores annotations explaining decisions the scheduler made. - Annotations *PlanAnnotations - - // FailedTGAllocs is the placement failures per task group. - FailedTGAllocs map[string]*AllocMetric - - // JobModifyIndex is the modification index of the job. The value can be - // used when running `nomad run` to ensure that the Job wasn’t modified - // since the last plan. If the job is being created, the value is zero. - JobModifyIndex uint64 - - // CreatedEvals is the set of evaluations created by the scheduler. The - // reasons for this can be rolling-updates or blocked evals. - CreatedEvals []*Evaluation - - // Diff contains the diff of the job and annotations on whether the change - // causes an in-place update or create/destroy - Diff *JobDiff - - // NextPeriodicLaunch is the time duration till the job would be launched if - // submitted. - NextPeriodicLaunch time.Time - - // Warnings contains any warnings about the given job. These may include - // deprecation warnings. - Warnings string - - WriteMeta -} - -// SingleAllocResponse is used to return a single allocation -type SingleAllocResponse struct { - Alloc *Allocation - QueryMeta -} - -// AllocsGetResponse is used to return a set of allocations -type AllocsGetResponse struct { - Allocs []*Allocation - QueryMeta -} - -// JobAllocationsResponse is used to return the allocations for a job -type JobAllocationsResponse struct { - Allocations []*AllocListStub - QueryMeta -} - -// JobEvaluationsResponse is used to return the evaluations for a job -type JobEvaluationsResponse struct { - Evaluations []*Evaluation - QueryMeta -} - -// SingleEvalResponse is used to return a single evaluation -type SingleEvalResponse struct { - Eval *Evaluation - QueryMeta -} - -// EvalDequeueResponse is used to return from a dequeue -type EvalDequeueResponse struct { - Eval *Evaluation - Token string - - // WaitIndex is the Raft index the worker should wait until invoking the - // scheduler. - WaitIndex uint64 - - QueryMeta -} - -// GetWaitIndex is used to retrieve the Raft index in which state should be at -// or beyond before invoking the scheduler. -func (e *EvalDequeueResponse) GetWaitIndex() uint64 { - // Prefer the wait index sent. This will be populated on all responses from - // 0.7.0 and above - if e.WaitIndex != 0 { - return e.WaitIndex - } else if e.Eval != nil { - return e.Eval.ModifyIndex - } - - // This should never happen - return 1 -} - -// PlanResponse is used to return from a PlanRequest -type PlanResponse struct { - Result *PlanResult - WriteMeta -} - -// AllocListResponse is used for a list request -type AllocListResponse struct { - Allocations []*AllocListStub - QueryMeta -} - -// DeploymentListResponse is used for a list request -type DeploymentListResponse struct { - Deployments []*Deployment - QueryMeta -} - -// EvalListResponse is used for a list request -type EvalListResponse struct { - Evaluations []*Evaluation - QueryMeta -} - -// EvalAllocationsResponse is used to return the allocations for an evaluation -type EvalAllocationsResponse struct { - Allocations []*AllocListStub - QueryMeta -} - -// PeriodicForceResponse is used to respond to a periodic job force launch -type PeriodicForceResponse struct { - EvalID string - EvalCreateIndex uint64 - WriteMeta -} - -// DeploymentUpdateResponse is used to respond to a deployment change. The -// response will include the modify index of the deployment as well as details -// of any triggered evaluation. -type DeploymentUpdateResponse struct { - EvalID string - EvalCreateIndex uint64 - DeploymentModifyIndex uint64 - - // RevertedJobVersion is the version the job was reverted to. If unset, the - // job wasn't reverted - RevertedJobVersion *uint64 - - WriteMeta -} - -const ( - NodeStatusInit = "initializing" - NodeStatusReady = "ready" - NodeStatusDown = "down" -) - -// ShouldDrainNode checks if a given node status should trigger an -// evaluation. Some states don't require any further action. -func ShouldDrainNode(status string) bool { - switch status { - case NodeStatusInit, NodeStatusReady: - return false - case NodeStatusDown: - return true - default: - panic(fmt.Sprintf("unhandled node status %s", status)) - } -} - -// ValidNodeStatus is used to check if a node status is valid -func ValidNodeStatus(status string) bool { - switch status { - case NodeStatusInit, NodeStatusReady, NodeStatusDown: - return true - default: - return false - } -} - -// Node is a representation of a schedulable client node -type Node struct { - // ID is a unique identifier for the node. It can be constructed - // by doing a concatenation of the Name and Datacenter as a simple - // approach. Alternatively a UUID may be used. - ID string - - // SecretID is an ID that is only known by the Node and the set of Servers. - // It is not accessible via the API and is used to authenticate nodes - // conducting priviledged activities. - SecretID string - - // Datacenter for this node - Datacenter string - - // Node name - Name string - - // HTTPAddr is the address on which the Nomad client is listening for http - // requests - HTTPAddr string - - // TLSEnabled indicates if the Agent has TLS enabled for the HTTP API - TLSEnabled bool - - // Attributes is an arbitrary set of key/value - // data that can be used for constraints. Examples - // include "kernel.name=linux", "arch=386", "driver.docker=1", - // "docker.runtime=1.8.3" - Attributes map[string]string - - // Resources is the available resources on the client. - // For example 'cpu=2' 'memory=2048' - Resources *Resources - - // Reserved is the set of resources that are reserved, - // and should be subtracted from the total resources for - // the purposes of scheduling. This may be provide certain - // high-watermark tolerances or because of external schedulers - // consuming resources. - Reserved *Resources - - // Links are used to 'link' this client to external - // systems. For example 'consul=foo.dc1' 'aws=i-83212' - // 'ami=ami-123' - Links map[string]string - - // Meta is used to associate arbitrary metadata with this - // client. This is opaque to Nomad. - Meta map[string]string - - // NodeClass is an opaque identifier used to group nodes - // together for the purpose of determining scheduling pressure. - NodeClass string - - // ComputedClass is a unique id that identifies nodes with a common set of - // attributes and capabilities. - ComputedClass string - - // Drain is controlled by the servers, and not the client. - // If true, no jobs will be scheduled to this node, and existing - // allocations will be drained. - Drain bool - - // Status of this node - Status string - - // StatusDescription is meant to provide more human useful information - StatusDescription string - - // StatusUpdatedAt is the time stamp at which the state of the node was - // updated - StatusUpdatedAt int64 - - // Raft Indexes - CreateIndex uint64 - ModifyIndex uint64 -} - -// Ready returns if the node is ready for running allocations -func (n *Node) Ready() bool { - return n.Status == NodeStatusReady && !n.Drain -} - -func (n *Node) Copy() *Node { - if n == nil { - return nil - } - nn := new(Node) - *nn = *n - nn.Attributes = helper.CopyMapStringString(nn.Attributes) - nn.Resources = nn.Resources.Copy() - nn.Reserved = nn.Reserved.Copy() - nn.Links = helper.CopyMapStringString(nn.Links) - nn.Meta = helper.CopyMapStringString(nn.Meta) - return nn -} - -// TerminalStatus returns if the current status is terminal and -// will no longer transition. -func (n *Node) TerminalStatus() bool { - switch n.Status { - case NodeStatusDown: - return true - default: - return false - } -} - -// Stub returns a summarized version of the node -func (n *Node) Stub() *NodeListStub { - return &NodeListStub{ - ID: n.ID, - Datacenter: n.Datacenter, - Name: n.Name, - NodeClass: n.NodeClass, - Version: n.Attributes["nomad.version"], - Drain: n.Drain, - Status: n.Status, - StatusDescription: n.StatusDescription, - CreateIndex: n.CreateIndex, - ModifyIndex: n.ModifyIndex, - } -} - -// NodeListStub is used to return a subset of job information -// for the job list -type NodeListStub struct { - ID string - Datacenter string - Name string - NodeClass string - Version string - Drain bool - Status string - StatusDescription string - CreateIndex uint64 - ModifyIndex uint64 -} - -// Networks defined for a task on the Resources struct. -type Networks []*NetworkResource - -// Port assignment and IP for the given label or empty values. -func (ns Networks) Port(label string) (string, int) { - for _, n := range ns { - for _, p := range n.ReservedPorts { - if p.Label == label { - return n.IP, p.Value - } - } - for _, p := range n.DynamicPorts { - if p.Label == label { - return n.IP, p.Value - } - } - } - return "", 0 -} - -// Resources is used to define the resources available -// on a client -type Resources struct { - CPU int - MemoryMB int - DiskMB int - IOPS int - Networks Networks -} - -const ( - BytesInMegabyte = 1024 * 1024 -) - -// DefaultResources returns the default resources for a task. -func DefaultResources() *Resources { - return &Resources{ - CPU: 100, - MemoryMB: 10, - IOPS: 0, - } -} - -// DiskInBytes returns the amount of disk resources in bytes. -func (r *Resources) DiskInBytes() int64 { - return int64(r.DiskMB * BytesInMegabyte) -} - -// Merge merges this resource with another resource. -func (r *Resources) Merge(other *Resources) { - if other.CPU != 0 { - r.CPU = other.CPU - } - if other.MemoryMB != 0 { - r.MemoryMB = other.MemoryMB - } - if other.DiskMB != 0 { - r.DiskMB = other.DiskMB - } - if other.IOPS != 0 { - r.IOPS = other.IOPS - } - if len(other.Networks) != 0 { - r.Networks = other.Networks - } -} - -func (r *Resources) Canonicalize() { - // Ensure that an empty and nil slices are treated the same to avoid scheduling - // problems since we use reflect DeepEquals. - if len(r.Networks) == 0 { - r.Networks = nil - } - - for _, n := range r.Networks { - n.Canonicalize() - } -} - -// MeetsMinResources returns an error if the resources specified are less than -// the minimum allowed. -func (r *Resources) MeetsMinResources() error { - var mErr multierror.Error - if r.CPU < 20 { - mErr.Errors = append(mErr.Errors, fmt.Errorf("minimum CPU value is 20; got %d", r.CPU)) - } - if r.MemoryMB < 10 { - mErr.Errors = append(mErr.Errors, fmt.Errorf("minimum MemoryMB value is 10; got %d", r.MemoryMB)) - } - if r.IOPS < 0 { - mErr.Errors = append(mErr.Errors, fmt.Errorf("minimum IOPS value is 0; got %d", r.IOPS)) - } - for i, n := range r.Networks { - if err := n.MeetsMinResources(); err != nil { - mErr.Errors = append(mErr.Errors, fmt.Errorf("network resource at index %d failed: %v", i, err)) - } - } - - return mErr.ErrorOrNil() -} - -// Copy returns a deep copy of the resources -func (r *Resources) Copy() *Resources { - if r == nil { - return nil - } - newR := new(Resources) - *newR = *r - if r.Networks != nil { - n := len(r.Networks) - newR.Networks = make([]*NetworkResource, n) - for i := 0; i < n; i++ { - newR.Networks[i] = r.Networks[i].Copy() - } - } - return newR -} - -// NetIndex finds the matching net index using device name -func (r *Resources) NetIndex(n *NetworkResource) int { - for idx, net := range r.Networks { - if net.Device == n.Device { - return idx - } - } - return -1 -} - -// Superset checks if one set of resources is a superset -// of another. This ignores network resources, and the NetworkIndex -// should be used for that. -func (r *Resources) Superset(other *Resources) (bool, string) { - if r.CPU < other.CPU { - return false, "cpu exhausted" - } - if r.MemoryMB < other.MemoryMB { - return false, "memory exhausted" - } - if r.DiskMB < other.DiskMB { - return false, "disk exhausted" - } - if r.IOPS < other.IOPS { - return false, "iops exhausted" - } - return true, "" -} - -// Add adds the resources of the delta to this, potentially -// returning an error if not possible. -func (r *Resources) Add(delta *Resources) error { - if delta == nil { - return nil - } - r.CPU += delta.CPU - r.MemoryMB += delta.MemoryMB - r.DiskMB += delta.DiskMB - r.IOPS += delta.IOPS - - for _, n := range delta.Networks { - // Find the matching interface by IP or CIDR - idx := r.NetIndex(n) - if idx == -1 { - r.Networks = append(r.Networks, n.Copy()) - } else { - r.Networks[idx].Add(n) - } - } - return nil -} - -func (r *Resources) GoString() string { - return fmt.Sprintf("*%#v", *r) -} - -type Port struct { - Label string - Value int -} - -// NetworkResource is used to represent available network -// resources -type NetworkResource struct { - Device string // Name of the device - CIDR string // CIDR block of addresses - IP string // Host IP address - MBits int // Throughput - ReservedPorts []Port // Host Reserved ports - DynamicPorts []Port // Host Dynamically assigned ports -} - -func (n *NetworkResource) Canonicalize() { - // Ensure that an empty and nil slices are treated the same to avoid scheduling - // problems since we use reflect DeepEquals. - if len(n.ReservedPorts) == 0 { - n.ReservedPorts = nil - } - if len(n.DynamicPorts) == 0 { - n.DynamicPorts = nil - } -} - -// MeetsMinResources returns an error if the resources specified are less than -// the minimum allowed. -func (n *NetworkResource) MeetsMinResources() error { - var mErr multierror.Error - if n.MBits < 1 { - mErr.Errors = append(mErr.Errors, fmt.Errorf("minimum MBits value is 1; got %d", n.MBits)) - } - return mErr.ErrorOrNil() -} - -// Copy returns a deep copy of the network resource -func (n *NetworkResource) Copy() *NetworkResource { - if n == nil { - return nil - } - newR := new(NetworkResource) - *newR = *n - if n.ReservedPorts != nil { - newR.ReservedPorts = make([]Port, len(n.ReservedPorts)) - copy(newR.ReservedPorts, n.ReservedPorts) - } - if n.DynamicPorts != nil { - newR.DynamicPorts = make([]Port, len(n.DynamicPorts)) - copy(newR.DynamicPorts, n.DynamicPorts) - } - return newR -} - -// Add adds the resources of the delta to this, potentially -// returning an error if not possible. -func (n *NetworkResource) Add(delta *NetworkResource) { - if len(delta.ReservedPorts) > 0 { - n.ReservedPorts = append(n.ReservedPorts, delta.ReservedPorts...) - } - n.MBits += delta.MBits - n.DynamicPorts = append(n.DynamicPorts, delta.DynamicPorts...) -} - -func (n *NetworkResource) GoString() string { - return fmt.Sprintf("*%#v", *n) -} - -// PortLabels returns a map of port labels to their assigned host ports. -func (n *NetworkResource) PortLabels() map[string]int { - num := len(n.ReservedPorts) + len(n.DynamicPorts) - labelValues := make(map[string]int, num) - for _, port := range n.ReservedPorts { - labelValues[port.Label] = port.Value - } - for _, port := range n.DynamicPorts { - labelValues[port.Label] = port.Value - } - return labelValues -} - -const ( - // JobTypeNomad is reserved for internal system tasks and is - // always handled by the CoreScheduler. - JobTypeCore = "_core" - JobTypeService = "service" - JobTypeBatch = "batch" - JobTypeSystem = "system" -) - -const ( - JobStatusPending = "pending" // Pending means the job is waiting on scheduling - JobStatusRunning = "running" // Running means the job has non-terminal allocations - JobStatusDead = "dead" // Dead means all evaluation's and allocations are terminal -) - -const ( - // JobMinPriority is the minimum allowed priority - JobMinPriority = 1 - - // JobDefaultPriority is the default priority if not - // not specified. - JobDefaultPriority = 50 - - // JobMaxPriority is the maximum allowed priority - JobMaxPriority = 100 - - // Ensure CoreJobPriority is higher than any user - // specified job so that it gets priority. This is important - // for the system to remain healthy. - CoreJobPriority = JobMaxPriority * 2 - - // JobTrackedVersions is the number of historic job versions that are - // kept. - JobTrackedVersions = 6 -) - -// Job is the scope of a scheduling request to Nomad. It is the largest -// scoped object, and is a named collection of task groups. Each task group -// is further composed of tasks. A task group (TG) is the unit of scheduling -// however. -type Job struct { - // Stop marks whether the user has stopped the job. A stopped job will - // have all created allocations stopped and acts as a way to stop a job - // without purging it from the system. This allows existing allocs to be - // queried and the job to be inspected as it is being killed. - Stop bool - - // Region is the Nomad region that handles scheduling this job - Region string - - // Namespace is the namespace the job is submitted into. - Namespace string - - // ID is a unique identifier for the job per region. It can be - // specified hierarchically like LineOfBiz/OrgName/Team/Project - ID string - - // ParentID is the unique identifier of the job that spawned this job. - ParentID string - - // Name is the logical name of the job used to refer to it. This is unique - // per region, but not unique globally. - Name string - - // Type is used to control various behaviors about the job. Most jobs - // are service jobs, meaning they are expected to be long lived. - // Some jobs are batch oriented meaning they run and then terminate. - // This can be extended in the future to support custom schedulers. - Type string - - // Priority is used to control scheduling importance and if this job - // can preempt other jobs. - Priority int - - // AllAtOnce is used to control if incremental scheduling of task groups - // is allowed or if we must do a gang scheduling of the entire job. This - // can slow down larger jobs if resources are not available. - AllAtOnce bool - - // Datacenters contains all the datacenters this job is allowed to span - Datacenters []string - - // Constraints can be specified at a job level and apply to - // all the task groups and tasks. - Constraints []*Constraint - - // TaskGroups are the collections of task groups that this job needs - // to run. Each task group is an atomic unit of scheduling and placement. - TaskGroups []*TaskGroup - - // COMPAT: Remove in 0.7.0. Stagger is deprecated in 0.6.0. - Update UpdateStrategy - - // Periodic is used to define the interval the job is run at. - Periodic *PeriodicConfig - - // ParameterizedJob is used to specify the job as a parameterized job - // for dispatching. - ParameterizedJob *ParameterizedJobConfig - - // Payload is the payload supplied when the job was dispatched. - Payload []byte - - // Meta is used to associate arbitrary metadata with this - // job. This is opaque to Nomad. - Meta map[string]string - - // VaultToken is the Vault token that proves the submitter of the job has - // access to the specified Vault policies. This field is only used to - // transfer the token and is not stored after Job submission. - VaultToken string - - // Job status - Status string - - // StatusDescription is meant to provide more human useful information - StatusDescription string - - // Stable marks a job as stable. Stability is only defined on "service" and - // "system" jobs. The stability of a job will be set automatically as part - // of a deployment and can be manually set via APIs. - Stable bool - - // Version is a monitonically increasing version number that is incremened - // on each job register. - Version uint64 - - // SubmitTime is the time at which the job was submitted as a UnixNano in - // UTC - SubmitTime int64 - - // Raft Indexes - CreateIndex uint64 - ModifyIndex uint64 - JobModifyIndex uint64 -} - -// Canonicalize is used to canonicalize fields in the Job. This should be called -// when registering a Job. A set of warnings are returned if the job was changed -// in anyway that the user should be made aware of. -func (j *Job) Canonicalize() (warnings error) { - if j == nil { - return nil - } - - var mErr multierror.Error - // Ensure that an empty and nil map are treated the same to avoid scheduling - // problems since we use reflect DeepEquals. - if len(j.Meta) == 0 { - j.Meta = nil - } - - // Ensure the job is in a namespace. - if j.Namespace == "" { - j.Namespace = DefaultNamespace - } - - for _, tg := range j.TaskGroups { - tg.Canonicalize(j) - } - - if j.ParameterizedJob != nil { - j.ParameterizedJob.Canonicalize() - } - - if j.Periodic != nil { - j.Periodic.Canonicalize() - } - - // COMPAT: Remove in 0.7.0 - // Rewrite any job that has an update block with pre 0.6.0 syntax. - jobHasOldUpdate := j.Update.Stagger > 0 && j.Update.MaxParallel > 0 - if jobHasOldUpdate && j.Type != JobTypeBatch { - // Build an appropriate update block and copy it down to each task group - base := DefaultUpdateStrategy.Copy() - base.MaxParallel = j.Update.MaxParallel - base.MinHealthyTime = j.Update.Stagger - - // Add to each task group, modifying as needed - upgraded := false - l := len(j.TaskGroups) - for _, tg := range j.TaskGroups { - // The task group doesn't need upgrading if it has an update block with the new syntax - u := tg.Update - if u != nil && u.Stagger > 0 && u.MaxParallel > 0 && - u.HealthCheck != "" && u.MinHealthyTime > 0 && u.HealthyDeadline > 0 { - continue - } - - upgraded = true - - // The MaxParallel for the job should be 10% of the total count - // unless there is just one task group then we can infer the old - // max parallel should be the new - tgu := base.Copy() - if l != 1 { - // RoundTo 10% - var percent float64 = float64(tg.Count) * 0.1 - tgu.MaxParallel = int(percent + 0.5) - } - - // Safety guards - if tgu.MaxParallel == 0 { - tgu.MaxParallel = 1 - } else if tgu.MaxParallel > tg.Count { - tgu.MaxParallel = tg.Count - } - - tg.Update = tgu - } - - if upgraded { - w := "A best effort conversion to new update stanza introduced in v0.6.0 applied. " + - "Please update upgrade stanza before v0.7.0." - multierror.Append(&mErr, fmt.Errorf(w)) - } - } - - // Ensure that the batch job doesn't have new style or old style update - // stanza. Unfortunately are scanning here because we have to deprecate over - // a release so we can't check in the task group since that may be new style - // but wouldn't capture the old style and we don't want to have duplicate - // warnings. - if j.Type == JobTypeBatch { - displayWarning := jobHasOldUpdate - j.Update.Stagger = 0 - j.Update.MaxParallel = 0 - j.Update.HealthCheck = "" - j.Update.MinHealthyTime = 0 - j.Update.HealthyDeadline = 0 - j.Update.AutoRevert = false - j.Update.Canary = 0 - - // Remove any update spec from the task groups - for _, tg := range j.TaskGroups { - if tg.Update != nil { - displayWarning = true - tg.Update = nil - } - } - - if displayWarning { - w := "Update stanza is disallowed for batch jobs since v0.6.0. " + - "The update block has automatically been removed" - multierror.Append(&mErr, fmt.Errorf(w)) - } - } - - return mErr.ErrorOrNil() -} - -// Copy returns a deep copy of the Job. It is expected that callers use recover. -// This job can panic if the deep copy failed as it uses reflection. -func (j *Job) Copy() *Job { - if j == nil { - return nil - } - nj := new(Job) - *nj = *j - nj.Datacenters = helper.CopySliceString(nj.Datacenters) - nj.Constraints = CopySliceConstraints(nj.Constraints) - - if j.TaskGroups != nil { - tgs := make([]*TaskGroup, len(nj.TaskGroups)) - for i, tg := range nj.TaskGroups { - tgs[i] = tg.Copy() - } - nj.TaskGroups = tgs - } - - nj.Periodic = nj.Periodic.Copy() - nj.Meta = helper.CopyMapStringString(nj.Meta) - nj.ParameterizedJob = nj.ParameterizedJob.Copy() - return nj -} - -// Validate is used to sanity check a job input -func (j *Job) Validate() error { - var mErr multierror.Error - - if j.Region == "" { - mErr.Errors = append(mErr.Errors, errors.New("Missing job region")) - } - if j.ID == "" { - mErr.Errors = append(mErr.Errors, errors.New("Missing job ID")) - } else if strings.Contains(j.ID, " ") { - mErr.Errors = append(mErr.Errors, errors.New("Job ID contains a space")) - } - if j.Name == "" { - mErr.Errors = append(mErr.Errors, errors.New("Missing job name")) - } - if j.Namespace == "" { - mErr.Errors = append(mErr.Errors, errors.New("Job must be in a namespace")) - } - switch j.Type { - case JobTypeCore, JobTypeService, JobTypeBatch, JobTypeSystem: - case "": - mErr.Errors = append(mErr.Errors, errors.New("Missing job type")) - default: - mErr.Errors = append(mErr.Errors, fmt.Errorf("Invalid job type: %q", j.Type)) - } - if j.Priority < JobMinPriority || j.Priority > JobMaxPriority { - mErr.Errors = append(mErr.Errors, fmt.Errorf("Job priority must be between [%d, %d]", JobMinPriority, JobMaxPriority)) - } - if len(j.Datacenters) == 0 { - mErr.Errors = append(mErr.Errors, errors.New("Missing job datacenters")) - } - if len(j.TaskGroups) == 0 { - mErr.Errors = append(mErr.Errors, errors.New("Missing job task groups")) - } - for idx, constr := range j.Constraints { - if err := constr.Validate(); err != nil { - outer := fmt.Errorf("Constraint %d validation failed: %s", idx+1, err) - mErr.Errors = append(mErr.Errors, outer) - } - } - - // Check for duplicate task groups - taskGroups := make(map[string]int) - for idx, tg := range j.TaskGroups { - if tg.Name == "" { - mErr.Errors = append(mErr.Errors, fmt.Errorf("Job task group %d missing name", idx+1)) - } else if existing, ok := taskGroups[tg.Name]; ok { - mErr.Errors = append(mErr.Errors, fmt.Errorf("Job task group %d redefines '%s' from group %d", idx+1, tg.Name, existing+1)) - } else { - taskGroups[tg.Name] = idx - } - - if j.Type == "system" && tg.Count > 1 { - mErr.Errors = append(mErr.Errors, - fmt.Errorf("Job task group %s has count %d. Count cannot exceed 1 with system scheduler", - tg.Name, tg.Count)) - } - } - - // Validate the task group - for _, tg := range j.TaskGroups { - if err := tg.Validate(j); err != nil { - outer := fmt.Errorf("Task group %s validation failed: %v", tg.Name, err) - mErr.Errors = append(mErr.Errors, outer) - } - } - - // Validate periodic is only used with batch jobs. - if j.IsPeriodic() && j.Periodic.Enabled { - if j.Type != JobTypeBatch { - mErr.Errors = append(mErr.Errors, - fmt.Errorf("Periodic can only be used with %q scheduler", JobTypeBatch)) - } - - if err := j.Periodic.Validate(); err != nil { - mErr.Errors = append(mErr.Errors, err) - } - } - - if j.IsParameterized() { - if j.Type != JobTypeBatch { - mErr.Errors = append(mErr.Errors, - fmt.Errorf("Parameterized job can only be used with %q scheduler", JobTypeBatch)) - } - - if err := j.ParameterizedJob.Validate(); err != nil { - mErr.Errors = append(mErr.Errors, err) - } - } - - return mErr.ErrorOrNil() -} - -// Warnings returns a list of warnings that may be from dubious settings or -// deprecation warnings. -func (j *Job) Warnings() error { - var mErr multierror.Error - - // Check the groups - for _, tg := range j.TaskGroups { - if err := tg.Warnings(j); err != nil { - outer := fmt.Errorf("Group %q has warnings: %v", tg.Name, err) - mErr.Errors = append(mErr.Errors, outer) - } - } - - return mErr.ErrorOrNil() -} - -// LookupTaskGroup finds a task group by name -func (j *Job) LookupTaskGroup(name string) *TaskGroup { - for _, tg := range j.TaskGroups { - if tg.Name == name { - return tg - } - } - return nil -} - -// CombinedTaskMeta takes a TaskGroup and Task name and returns the combined -// meta data for the task. When joining Job, Group and Task Meta, the precedence -// is by deepest scope (Task > Group > Job). -func (j *Job) CombinedTaskMeta(groupName, taskName string) map[string]string { - group := j.LookupTaskGroup(groupName) - if group == nil { - return nil - } - - task := group.LookupTask(taskName) - if task == nil { - return nil - } - - meta := helper.CopyMapStringString(task.Meta) - if meta == nil { - meta = make(map[string]string, len(group.Meta)+len(j.Meta)) - } - - // Add the group specific meta - for k, v := range group.Meta { - if _, ok := meta[k]; !ok { - meta[k] = v - } - } - - // Add the job specific meta - for k, v := range j.Meta { - if _, ok := meta[k]; !ok { - meta[k] = v - } - } - - return meta -} - -// Stopped returns if a job is stopped. -func (j *Job) Stopped() bool { - return j == nil || j.Stop -} - -// HasUpdateStrategy returns if any task group in the job has an update strategy -func (j *Job) HasUpdateStrategy() bool { - for _, tg := range j.TaskGroups { - if tg.Update != nil { - return true - } - } - - return false -} - -// Stub is used to return a summary of the job -func (j *Job) Stub(summary *JobSummary) *JobListStub { - return &JobListStub{ - ID: j.ID, - ParentID: j.ParentID, - Name: j.Name, - Type: j.Type, - Priority: j.Priority, - Periodic: j.IsPeriodic(), - ParameterizedJob: j.IsParameterized(), - Stop: j.Stop, - Status: j.Status, - StatusDescription: j.StatusDescription, - CreateIndex: j.CreateIndex, - ModifyIndex: j.ModifyIndex, - JobModifyIndex: j.JobModifyIndex, - SubmitTime: j.SubmitTime, - JobSummary: summary, - } -} - -// IsPeriodic returns whether a job is periodic. -func (j *Job) IsPeriodic() bool { - return j.Periodic != nil -} - -// IsParameterized returns whether a job is parameterized job. -func (j *Job) IsParameterized() bool { - return j.ParameterizedJob != nil -} - -// VaultPolicies returns the set of Vault policies per task group, per task -func (j *Job) VaultPolicies() map[string]map[string]*Vault { - policies := make(map[string]map[string]*Vault, len(j.TaskGroups)) - - for _, tg := range j.TaskGroups { - tgPolicies := make(map[string]*Vault, len(tg.Tasks)) - - for _, task := range tg.Tasks { - if task.Vault == nil { - continue - } - - tgPolicies[task.Name] = task.Vault - } - - if len(tgPolicies) != 0 { - policies[tg.Name] = tgPolicies - } - } - - return policies -} - -// RequiredSignals returns a mapping of task groups to tasks to their required -// set of signals -func (j *Job) RequiredSignals() map[string]map[string][]string { - signals := make(map[string]map[string][]string) - - for _, tg := range j.TaskGroups { - for _, task := range tg.Tasks { - // Use this local one as a set - taskSignals := make(map[string]struct{}) - - // Check if the Vault change mode uses signals - if task.Vault != nil && task.Vault.ChangeMode == VaultChangeModeSignal { - taskSignals[task.Vault.ChangeSignal] = struct{}{} - } - - // Check if any template change mode uses signals - for _, t := range task.Templates { - if t.ChangeMode != TemplateChangeModeSignal { - continue - } - - taskSignals[t.ChangeSignal] = struct{}{} - } - - // Flatten and sort the signals - l := len(taskSignals) - if l == 0 { - continue - } - - flat := make([]string, 0, l) - for sig := range taskSignals { - flat = append(flat, sig) - } - - sort.Strings(flat) - tgSignals, ok := signals[tg.Name] - if !ok { - tgSignals = make(map[string][]string) - signals[tg.Name] = tgSignals - } - tgSignals[task.Name] = flat - } - - } - - return signals -} - -// SpecChanged determines if the functional specification has changed between -// two job versions. -func (j *Job) SpecChanged(new *Job) bool { - if j == nil { - return new != nil - } - - // Create a copy of the new job - c := new.Copy() - - // Update the new job so we can do a reflect - c.Status = j.Status - c.StatusDescription = j.StatusDescription - c.Stable = j.Stable - c.Version = j.Version - c.CreateIndex = j.CreateIndex - c.ModifyIndex = j.ModifyIndex - c.JobModifyIndex = j.JobModifyIndex - c.SubmitTime = j.SubmitTime - - // Deep equals the jobs - return !reflect.DeepEqual(j, c) -} - -func (j *Job) SetSubmitTime() { - j.SubmitTime = time.Now().UTC().UnixNano() -} - -// JobListStub is used to return a subset of job information -// for the job list -type JobListStub struct { - ID string - ParentID string - Name string - Type string - Priority int - Periodic bool - ParameterizedJob bool - Stop bool - Status string - StatusDescription string - JobSummary *JobSummary - CreateIndex uint64 - ModifyIndex uint64 - JobModifyIndex uint64 - SubmitTime int64 -} - -// JobSummary summarizes the state of the allocations of a job -type JobSummary struct { - // JobID is the ID of the job the summary is for - JobID string - - // Namespace is the namespace of the job and its summary - Namespace string - - // Summmary contains the summary per task group for the Job - Summary map[string]TaskGroupSummary - - // Children contains a summary for the children of this job. - Children *JobChildrenSummary - - // Raft Indexes - CreateIndex uint64 - ModifyIndex uint64 -} - -// Copy returns a new copy of JobSummary -func (js *JobSummary) Copy() *JobSummary { - newJobSummary := new(JobSummary) - *newJobSummary = *js - newTGSummary := make(map[string]TaskGroupSummary, len(js.Summary)) - for k, v := range js.Summary { - newTGSummary[k] = v - } - newJobSummary.Summary = newTGSummary - newJobSummary.Children = newJobSummary.Children.Copy() - return newJobSummary -} - -// JobChildrenSummary contains the summary of children job statuses -type JobChildrenSummary struct { - Pending int64 - Running int64 - Dead int64 -} - -// Copy returns a new copy of a JobChildrenSummary -func (jc *JobChildrenSummary) Copy() *JobChildrenSummary { - if jc == nil { - return nil - } - - njc := new(JobChildrenSummary) - *njc = *jc - return njc -} - -// TaskGroup summarizes the state of all the allocations of a particular -// TaskGroup -type TaskGroupSummary struct { - Queued int - Complete int - Failed int - Running int - Starting int - Lost int -} - -const ( - // Checks uses any registered health check state in combination with task - // states to determine if a allocation is healthy. - UpdateStrategyHealthCheck_Checks = "checks" - - // TaskStates uses the task states of an allocation to determine if the - // allocation is healthy. - UpdateStrategyHealthCheck_TaskStates = "task_states" - - // Manual allows the operator to manually signal to Nomad when an - // allocations is healthy. This allows more advanced health checking that is - // outside of the scope of Nomad. - UpdateStrategyHealthCheck_Manual = "manual" -) - -var ( - // DefaultUpdateStrategy provides a baseline that can be used to upgrade - // jobs with the old policy or for populating field defaults. - DefaultUpdateStrategy = &UpdateStrategy{ - Stagger: 30 * time.Second, - MaxParallel: 1, - HealthCheck: UpdateStrategyHealthCheck_Checks, - MinHealthyTime: 10 * time.Second, - HealthyDeadline: 5 * time.Minute, - AutoRevert: false, - Canary: 0, - } -) - -// UpdateStrategy is used to modify how updates are done -type UpdateStrategy struct { - // Stagger is used to determine the rate at which allocations are migrated - // due to down or draining nodes. - Stagger time.Duration - - // MaxParallel is how many updates can be done in parallel - MaxParallel int - - // HealthCheck specifies the mechanism in which allocations are marked - // healthy or unhealthy as part of a deployment. - HealthCheck string - - // MinHealthyTime is the minimum time an allocation must be in the healthy - // state before it is marked as healthy, unblocking more alllocations to be - // rolled. - MinHealthyTime time.Duration - - // HealthyDeadline is the time in which an allocation must be marked as - // healthy before it is automatically transistioned to unhealthy. This time - // period doesn't count against the MinHealthyTime. - HealthyDeadline time.Duration - - // AutoRevert declares that if a deployment fails because of unhealthy - // allocations, there should be an attempt to auto-revert the job to a - // stable version. - AutoRevert bool - - // Canary is the number of canaries to deploy when a change to the task - // group is detected. - Canary int -} - -func (u *UpdateStrategy) Copy() *UpdateStrategy { - if u == nil { - return nil - } - - copy := new(UpdateStrategy) - *copy = *u - return copy -} - -func (u *UpdateStrategy) Validate() error { - if u == nil { - return nil - } - - var mErr multierror.Error - switch u.HealthCheck { - case UpdateStrategyHealthCheck_Checks, UpdateStrategyHealthCheck_TaskStates, UpdateStrategyHealthCheck_Manual: - default: - multierror.Append(&mErr, fmt.Errorf("Invalid health check given: %q", u.HealthCheck)) - } - - if u.MaxParallel < 1 { - multierror.Append(&mErr, fmt.Errorf("Max parallel can not be less than one: %d < 1", u.MaxParallel)) - } - if u.Canary < 0 { - multierror.Append(&mErr, fmt.Errorf("Canary count can not be less than zero: %d < 0", u.Canary)) - } - if u.MinHealthyTime < 0 { - multierror.Append(&mErr, fmt.Errorf("Minimum healthy time may not be less than zero: %v", u.MinHealthyTime)) - } - if u.HealthyDeadline <= 0 { - multierror.Append(&mErr, fmt.Errorf("Healthy deadline must be greater than zero: %v", u.HealthyDeadline)) - } - if u.MinHealthyTime >= u.HealthyDeadline { - multierror.Append(&mErr, fmt.Errorf("Minimum healthy time must be less than healthy deadline: %v > %v", u.MinHealthyTime, u.HealthyDeadline)) - } - if u.Stagger <= 0 { - multierror.Append(&mErr, fmt.Errorf("Stagger must be greater than zero: %v", u.Stagger)) - } - - return mErr.ErrorOrNil() -} - -// TODO(alexdadgar): Remove once no longer used by the scheduler. -// Rolling returns if a rolling strategy should be used -func (u *UpdateStrategy) Rolling() bool { - return u.Stagger > 0 && u.MaxParallel > 0 -} - -const ( - // PeriodicSpecCron is used for a cron spec. - PeriodicSpecCron = "cron" - - // PeriodicSpecTest is only used by unit tests. It is a sorted, comma - // separated list of unix timestamps at which to launch. - PeriodicSpecTest = "_internal_test" -) - -// Periodic defines the interval a job should be run at. -type PeriodicConfig struct { - // Enabled determines if the job should be run periodically. - Enabled bool - - // Spec specifies the interval the job should be run as. It is parsed based - // on the SpecType. - Spec string - - // SpecType defines the format of the spec. - SpecType string - - // ProhibitOverlap enforces that spawned jobs do not run in parallel. - ProhibitOverlap bool - - // TimeZone is the user specified string that determines the time zone to - // launch against. The time zones must be specified from IANA Time Zone - // database, such as "America/New_York". - // Reference: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones - // Reference: https://www.iana.org/time-zones - TimeZone string - - // location is the time zone to evaluate the launch time against - location *time.Location -} - -func (p *PeriodicConfig) Copy() *PeriodicConfig { - if p == nil { - return nil - } - np := new(PeriodicConfig) - *np = *p - return np -} - -func (p *PeriodicConfig) Validate() error { - if !p.Enabled { - return nil - } - - var mErr multierror.Error - if p.Spec == "" { - multierror.Append(&mErr, fmt.Errorf("Must specify a spec")) - } - - // Check if we got a valid time zone - if p.TimeZone != "" { - if _, err := time.LoadLocation(p.TimeZone); err != nil { - multierror.Append(&mErr, fmt.Errorf("Invalid time zone %q: %v", p.TimeZone, err)) - } - } - - switch p.SpecType { - case PeriodicSpecCron: - // Validate the cron spec - if _, err := cronexpr.Parse(p.Spec); err != nil { - multierror.Append(&mErr, fmt.Errorf("Invalid cron spec %q: %v", p.Spec, err)) - } - case PeriodicSpecTest: - // No-op - default: - multierror.Append(&mErr, fmt.Errorf("Unknown periodic specification type %q", p.SpecType)) - } - - return mErr.ErrorOrNil() -} - -func (p *PeriodicConfig) Canonicalize() { - // Load the location - l, err := time.LoadLocation(p.TimeZone) - if err != nil { - p.location = time.UTC - } - - p.location = l -} - -// Next returns the closest time instant matching the spec that is after the -// passed time. If no matching instance exists, the zero value of time.Time is -// returned. The `time.Location` of the returned value matches that of the -// passed time. -func (p *PeriodicConfig) Next(fromTime time.Time) time.Time { - switch p.SpecType { - case PeriodicSpecCron: - if e, err := cronexpr.Parse(p.Spec); err == nil { - return e.Next(fromTime) - } - case PeriodicSpecTest: - split := strings.Split(p.Spec, ",") - if len(split) == 1 && split[0] == "" { - return time.Time{} - } - - // Parse the times - times := make([]time.Time, len(split)) - for i, s := range split { - unix, err := strconv.Atoi(s) - if err != nil { - return time.Time{} - } - - times[i] = time.Unix(int64(unix), 0) - } - - // Find the next match - for _, next := range times { - if fromTime.Before(next) { - return next - } - } - } - - return time.Time{} -} - -// GetLocation returns the location to use for determining the time zone to run -// the periodic job against. -func (p *PeriodicConfig) GetLocation() *time.Location { - // Jobs pre 0.5.5 will not have this - if p.location != nil { - return p.location - } - - return time.UTC -} - -const ( - // PeriodicLaunchSuffix is the string appended to the periodic jobs ID - // when launching derived instances of it. - PeriodicLaunchSuffix = "/periodic-" -) - -// PeriodicLaunch tracks the last launch time of a periodic job. -type PeriodicLaunch struct { - ID string // ID of the periodic job. - Namespace string // Namespace of the periodic job - Launch time.Time // The last launch time. - - // Raft Indexes - CreateIndex uint64 - ModifyIndex uint64 -} - -const ( - DispatchPayloadForbidden = "forbidden" - DispatchPayloadOptional = "optional" - DispatchPayloadRequired = "required" - - // DispatchLaunchSuffix is the string appended to the parameterized job's ID - // when dispatching instances of it. - DispatchLaunchSuffix = "/dispatch-" -) - -// ParameterizedJobConfig is used to configure the parameterized job -type ParameterizedJobConfig struct { - // Payload configure the payload requirements - Payload string - - // MetaRequired is metadata keys that must be specified by the dispatcher - MetaRequired []string - - // MetaOptional is metadata keys that may be specified by the dispatcher - MetaOptional []string -} - -func (d *ParameterizedJobConfig) Validate() error { - var mErr multierror.Error - switch d.Payload { - case DispatchPayloadOptional, DispatchPayloadRequired, DispatchPayloadForbidden: - default: - multierror.Append(&mErr, fmt.Errorf("Unknown payload requirement: %q", d.Payload)) - } - - // Check that the meta configurations are disjoint sets - disjoint, offending := helper.SliceSetDisjoint(d.MetaRequired, d.MetaOptional) - if !disjoint { - multierror.Append(&mErr, fmt.Errorf("Required and optional meta keys should be disjoint. Following keys exist in both: %v", offending)) - } - - return mErr.ErrorOrNil() -} - -func (d *ParameterizedJobConfig) Canonicalize() { - if d.Payload == "" { - d.Payload = DispatchPayloadOptional - } -} - -func (d *ParameterizedJobConfig) Copy() *ParameterizedJobConfig { - if d == nil { - return nil - } - nd := new(ParameterizedJobConfig) - *nd = *d - nd.MetaOptional = helper.CopySliceString(nd.MetaOptional) - nd.MetaRequired = helper.CopySliceString(nd.MetaRequired) - return nd -} - -// DispatchedID returns an ID appropriate for a job dispatched against a -// particular parameterized job -func DispatchedID(templateID string, t time.Time) string { - u := GenerateUUID()[:8] - return fmt.Sprintf("%s%s%d-%s", templateID, DispatchLaunchSuffix, t.Unix(), u) -} - -// DispatchPayloadConfig configures how a task gets its input from a job dispatch -type DispatchPayloadConfig struct { - // File specifies a relative path to where the input data should be written - File string -} - -func (d *DispatchPayloadConfig) Copy() *DispatchPayloadConfig { - if d == nil { - return nil - } - nd := new(DispatchPayloadConfig) - *nd = *d - return nd -} - -func (d *DispatchPayloadConfig) Validate() error { - // Verify the destination doesn't escape - escaped, err := PathEscapesAllocDir("task/local/", d.File) - if err != nil { - return fmt.Errorf("invalid destination path: %v", err) - } else if escaped { - return fmt.Errorf("destination escapes allocation directory") - } - - return nil -} - -var ( - defaultServiceJobRestartPolicy = RestartPolicy{ - Delay: 15 * time.Second, - Attempts: 2, - Interval: 1 * time.Minute, - Mode: RestartPolicyModeDelay, - } - defaultBatchJobRestartPolicy = RestartPolicy{ - Delay: 15 * time.Second, - Attempts: 15, - Interval: 7 * 24 * time.Hour, - Mode: RestartPolicyModeDelay, - } -) - -const ( - // RestartPolicyModeDelay causes an artificial delay till the next interval is - // reached when the specified attempts have been reached in the interval. - RestartPolicyModeDelay = "delay" - - // RestartPolicyModeFail causes a job to fail if the specified number of - // attempts are reached within an interval. - RestartPolicyModeFail = "fail" - - // RestartPolicyMinInterval is the minimum interval that is accepted for a - // restart policy. - RestartPolicyMinInterval = 5 * time.Second -) - -// RestartPolicy configures how Tasks are restarted when they crash or fail. -type RestartPolicy struct { - // Attempts is the number of restart that will occur in an interval. - Attempts int - - // Interval is a duration in which we can limit the number of restarts - // within. - Interval time.Duration - - // Delay is the time between a failure and a restart. - Delay time.Duration - - // Mode controls what happens when the task restarts more than attempt times - // in an interval. - Mode string -} - -func (r *RestartPolicy) Copy() *RestartPolicy { - if r == nil { - return nil - } - nrp := new(RestartPolicy) - *nrp = *r - return nrp -} - -func (r *RestartPolicy) Validate() error { - var mErr multierror.Error - switch r.Mode { - case RestartPolicyModeDelay, RestartPolicyModeFail: - default: - multierror.Append(&mErr, fmt.Errorf("Unsupported restart mode: %q", r.Mode)) - } - - // Check for ambiguous/confusing settings - if r.Attempts == 0 && r.Mode != RestartPolicyModeFail { - multierror.Append(&mErr, fmt.Errorf("Restart policy %q with %d attempts is ambiguous", r.Mode, r.Attempts)) - } - - if r.Interval.Nanoseconds() < RestartPolicyMinInterval.Nanoseconds() { - multierror.Append(&mErr, fmt.Errorf("Interval can not be less than %v (got %v)", RestartPolicyMinInterval, r.Interval)) - } - if time.Duration(r.Attempts)*r.Delay > r.Interval { - multierror.Append(&mErr, - fmt.Errorf("Nomad can't restart the TaskGroup %v times in an interval of %v with a delay of %v", r.Attempts, r.Interval, r.Delay)) - } - return mErr.ErrorOrNil() -} - -func NewRestartPolicy(jobType string) *RestartPolicy { - switch jobType { - case JobTypeService, JobTypeSystem: - rp := defaultServiceJobRestartPolicy - return &rp - case JobTypeBatch: - rp := defaultBatchJobRestartPolicy - return &rp - } - return nil -} - -// TaskGroup is an atomic unit of placement. Each task group belongs to -// a job and may contain any number of tasks. A task group support running -// in many replicas using the same configuration.. -type TaskGroup struct { - // Name of the task group - Name string - - // Count is the number of replicas of this task group that should - // be scheduled. - Count int - - // Update is used to control the update strategy for this task group - Update *UpdateStrategy - - // Constraints can be specified at a task group level and apply to - // all the tasks contained. - Constraints []*Constraint - - //RestartPolicy of a TaskGroup - RestartPolicy *RestartPolicy - - // Tasks are the collection of tasks that this task group needs to run - Tasks []*Task - - // EphemeralDisk is the disk resources that the task group requests - EphemeralDisk *EphemeralDisk - - // Meta is used to associate arbitrary metadata with this - // task group. This is opaque to Nomad. - Meta map[string]string -} - -func (tg *TaskGroup) Copy() *TaskGroup { - if tg == nil { - return nil - } - ntg := new(TaskGroup) - *ntg = *tg - ntg.Update = ntg.Update.Copy() - ntg.Constraints = CopySliceConstraints(ntg.Constraints) - ntg.RestartPolicy = ntg.RestartPolicy.Copy() - - if tg.Tasks != nil { - tasks := make([]*Task, len(ntg.Tasks)) - for i, t := range ntg.Tasks { - tasks[i] = t.Copy() - } - ntg.Tasks = tasks - } - - ntg.Meta = helper.CopyMapStringString(ntg.Meta) - - if tg.EphemeralDisk != nil { - ntg.EphemeralDisk = tg.EphemeralDisk.Copy() - } - return ntg -} - -// Canonicalize is used to canonicalize fields in the TaskGroup. -func (tg *TaskGroup) Canonicalize(job *Job) { - // Ensure that an empty and nil map are treated the same to avoid scheduling - // problems since we use reflect DeepEquals. - if len(tg.Meta) == 0 { - tg.Meta = nil - } - - // Set the default restart policy. - if tg.RestartPolicy == nil { - tg.RestartPolicy = NewRestartPolicy(job.Type) - } - - // Set a default ephemeral disk object if the user has not requested for one - if tg.EphemeralDisk == nil { - tg.EphemeralDisk = DefaultEphemeralDisk() - } - - for _, task := range tg.Tasks { - task.Canonicalize(job, tg) - } - - // Add up the disk resources to EphemeralDisk. This is done so that users - // are not required to move their disk attribute from resources to - // EphemeralDisk section of the job spec in Nomad 0.5 - // COMPAT 0.4.1 -> 0.5 - // Remove in 0.6 - var diskMB int - for _, task := range tg.Tasks { - diskMB += task.Resources.DiskMB - } - if diskMB > 0 { - tg.EphemeralDisk.SizeMB = diskMB - } -} - -// Validate is used to sanity check a task group -func (tg *TaskGroup) Validate(j *Job) error { - var mErr multierror.Error - if tg.Name == "" { - mErr.Errors = append(mErr.Errors, errors.New("Missing task group name")) - } - if tg.Count < 0 { - mErr.Errors = append(mErr.Errors, errors.New("Task group count can't be negative")) - } - if len(tg.Tasks) == 0 { - mErr.Errors = append(mErr.Errors, errors.New("Missing tasks for task group")) - } - for idx, constr := range tg.Constraints { - if err := constr.Validate(); err != nil { - outer := fmt.Errorf("Constraint %d validation failed: %s", idx+1, err) - mErr.Errors = append(mErr.Errors, outer) - } - } - - if tg.RestartPolicy != nil { - if err := tg.RestartPolicy.Validate(); err != nil { - mErr.Errors = append(mErr.Errors, err) - } - } else { - mErr.Errors = append(mErr.Errors, fmt.Errorf("Task Group %v should have a restart policy", tg.Name)) - } - - if tg.EphemeralDisk != nil { - if err := tg.EphemeralDisk.Validate(); err != nil { - mErr.Errors = append(mErr.Errors, err) - } - } else { - mErr.Errors = append(mErr.Errors, fmt.Errorf("Task Group %v should have an ephemeral disk object", tg.Name)) - } - - // Validate the update strategy - if u := tg.Update; u != nil { - switch j.Type { - case JobTypeService, JobTypeSystem: - default: - // COMPAT: Enable in 0.7.0 - //mErr.Errors = append(mErr.Errors, fmt.Errorf("Job type %q does not allow update block", j.Type)) - } - if err := u.Validate(); err != nil { - mErr.Errors = append(mErr.Errors, err) - } - } - - // Check for duplicate tasks, that there is only leader task if any, - // and no duplicated static ports - tasks := make(map[string]int) - staticPorts := make(map[int]string) - leaderTasks := 0 - for idx, task := range tg.Tasks { - if task.Name == "" { - mErr.Errors = append(mErr.Errors, fmt.Errorf("Task %d missing name", idx+1)) - } else if existing, ok := tasks[task.Name]; ok { - mErr.Errors = append(mErr.Errors, fmt.Errorf("Task %d redefines '%s' from task %d", idx+1, task.Name, existing+1)) - } else { - tasks[task.Name] = idx - } - - if task.Leader { - leaderTasks++ - } - - if task.Resources == nil { - continue - } - - for _, net := range task.Resources.Networks { - for _, port := range net.ReservedPorts { - if other, ok := staticPorts[port.Value]; ok { - err := fmt.Errorf("Static port %d already reserved by %s", port.Value, other) - mErr.Errors = append(mErr.Errors, err) - } else { - staticPorts[port.Value] = fmt.Sprintf("%s:%s", task.Name, port.Label) - } - } - } - } - - if leaderTasks > 1 { - mErr.Errors = append(mErr.Errors, fmt.Errorf("Only one task may be marked as leader")) - } - - // Validate the tasks - for _, task := range tg.Tasks { - if err := task.Validate(tg.EphemeralDisk); err != nil { - outer := fmt.Errorf("Task %s validation failed: %v", task.Name, err) - mErr.Errors = append(mErr.Errors, outer) - } - } - return mErr.ErrorOrNil() -} - -// Warnings returns a list of warnings that may be from dubious settings or -// deprecation warnings. -func (tg *TaskGroup) Warnings(j *Job) error { - var mErr multierror.Error - - // Validate the update strategy - if u := tg.Update; u != nil { - // Check the counts are appropriate - if u.MaxParallel > tg.Count { - mErr.Errors = append(mErr.Errors, - fmt.Errorf("Update max parallel count is greater than task group count (%d > %d). "+ - "A destructive change would result in the simultaneous replacement of all allocations.", u.MaxParallel, tg.Count)) - } - } - - return mErr.ErrorOrNil() -} - -// LookupTask finds a task by name -func (tg *TaskGroup) LookupTask(name string) *Task { - for _, t := range tg.Tasks { - if t.Name == name { - return t - } - } - return nil -} - -func (tg *TaskGroup) GoString() string { - return fmt.Sprintf("*%#v", *tg) -} - -// CheckRestart describes if and when a task should be restarted based on -// failing health checks. -type CheckRestart struct { - Limit int // Restart task after this many unhealthy intervals - Grace time.Duration // Grace time to give tasks after starting to get healthy - IgnoreWarnings bool // If true treat checks in `warning` as passing -} - -func (c *CheckRestart) Copy() *CheckRestart { - if c == nil { - return nil - } - - nc := new(CheckRestart) - *nc = *c - return nc -} - -func (c *CheckRestart) Validate() error { - if c == nil { - return nil - } - - var mErr multierror.Error - if c.Limit < 0 { - mErr.Errors = append(mErr.Errors, fmt.Errorf("limit must be greater than or equal to 0 but found %d", c.Limit)) - } - - if c.Grace < 0 { - mErr.Errors = append(mErr.Errors, fmt.Errorf("grace period must be greater than or equal to 0 but found %d", c.Grace)) - } - - return mErr.ErrorOrNil() -} - -const ( - ServiceCheckHTTP = "http" - ServiceCheckTCP = "tcp" - ServiceCheckScript = "script" - - // minCheckInterval is the minimum check interval permitted. Consul - // currently has its MinInterval set to 1s. Mirror that here for - // consistency. - minCheckInterval = 1 * time.Second - - // minCheckTimeout is the minimum check timeout permitted for Consul - // script TTL checks. - minCheckTimeout = 1 * time.Second -) - -// The ServiceCheck data model represents the consul health check that -// Nomad registers for a Task -type ServiceCheck struct { - Name string // Name of the check, defaults to id - Type string // Type of the check - tcp, http, docker and script - Command string // Command is the command to run for script checks - Args []string // Args is a list of argumes for script checks - Path string // path of the health check url for http type check - Protocol string // Protocol to use if check is http, defaults to http - PortLabel string // The port to use for tcp/http checks - Interval time.Duration // Interval of the check - Timeout time.Duration // Timeout of the response from the check before consul fails the check - InitialStatus string // Initial status of the check - TLSSkipVerify bool // Skip TLS verification when Protocol=https - Method string // HTTP Method to use (GET by default) - Header map[string][]string // HTTP Headers for Consul to set when making HTTP checks - CheckRestart *CheckRestart // If and when a task should be restarted based on checks -} - -func (sc *ServiceCheck) Copy() *ServiceCheck { - if sc == nil { - return nil - } - nsc := new(ServiceCheck) - *nsc = *sc - nsc.Args = helper.CopySliceString(sc.Args) - nsc.Header = helper.CopyMapStringSliceString(sc.Header) - nsc.CheckRestart = sc.CheckRestart.Copy() - return nsc -} - -func (sc *ServiceCheck) Canonicalize(serviceName string) { - // Ensure empty maps/slices are treated as null to avoid scheduling - // issues when using DeepEquals. - if len(sc.Args) == 0 { - sc.Args = nil - } - - if len(sc.Header) == 0 { - sc.Header = nil - } else { - for k, v := range sc.Header { - if len(v) == 0 { - sc.Header[k] = nil - } - } - } - - if sc.Name == "" { - sc.Name = fmt.Sprintf("service: %q check", serviceName) - } -} - -// validate a Service's ServiceCheck -func (sc *ServiceCheck) validate() error { - switch strings.ToLower(sc.Type) { - case ServiceCheckTCP: - case ServiceCheckHTTP: - if sc.Path == "" { - return fmt.Errorf("http type must have a valid http path") - } - - case ServiceCheckScript: - if sc.Command == "" { - return fmt.Errorf("script type must have a valid script path") - } - default: - return fmt.Errorf(`invalid type (%+q), must be one of "http", "tcp", or "script" type`, sc.Type) - } - - if sc.Interval == 0 { - return fmt.Errorf("missing required value interval. Interval cannot be less than %v", minCheckInterval) - } else if sc.Interval < minCheckInterval { - return fmt.Errorf("interval (%v) cannot be lower than %v", sc.Interval, minCheckInterval) - } - - if sc.Timeout == 0 { - return fmt.Errorf("missing required value timeout. Timeout cannot be less than %v", minCheckInterval) - } else if sc.Timeout < minCheckTimeout { - return fmt.Errorf("timeout (%v) is lower than required minimum timeout %v", sc.Timeout, minCheckInterval) - } - - switch sc.InitialStatus { - case "": - // case api.HealthUnknown: TODO: Add when Consul releases 0.7.1 - case api.HealthPassing: - case api.HealthWarning: - case api.HealthCritical: - default: - return fmt.Errorf(`invalid initial check state (%s), must be one of %q, %q, %q or empty`, sc.InitialStatus, api.HealthPassing, api.HealthWarning, api.HealthCritical) - - } - - return sc.CheckRestart.Validate() -} - -// RequiresPort returns whether the service check requires the task has a port. -func (sc *ServiceCheck) RequiresPort() bool { - switch sc.Type { - case ServiceCheckHTTP, ServiceCheckTCP: - return true - default: - return false - } -} - -// TriggersRestarts returns true if this check should be watched and trigger a restart -// on failure. -func (sc *ServiceCheck) TriggersRestarts() bool { - return sc.CheckRestart != nil && sc.CheckRestart.Limit > 0 -} - -// Hash all ServiceCheck fields and the check's corresponding service ID to -// create an identifier. The identifier is not guaranteed to be unique as if -// the PortLabel is blank, the Service's PortLabel will be used after Hash is -// called. -func (sc *ServiceCheck) Hash(serviceID string) string { - h := sha1.New() - io.WriteString(h, serviceID) - io.WriteString(h, sc.Name) - io.WriteString(h, sc.Type) - io.WriteString(h, sc.Command) - io.WriteString(h, strings.Join(sc.Args, "")) - io.WriteString(h, sc.Path) - io.WriteString(h, sc.Protocol) - io.WriteString(h, sc.PortLabel) - io.WriteString(h, sc.Interval.String()) - io.WriteString(h, sc.Timeout.String()) - io.WriteString(h, sc.Method) - // Only include TLSSkipVerify if set to maintain ID stability with Nomad <0.6 - if sc.TLSSkipVerify { - io.WriteString(h, "true") - } - - // Since map iteration order isn't stable we need to write k/v pairs to - // a slice and sort it before hashing. - if len(sc.Header) > 0 { - headers := make([]string, 0, len(sc.Header)) - for k, v := range sc.Header { - headers = append(headers, k+strings.Join(v, "")) - } - sort.Strings(headers) - io.WriteString(h, strings.Join(headers, "")) - } - - return fmt.Sprintf("%x", h.Sum(nil)) -} - -const ( - AddressModeAuto = "auto" - AddressModeHost = "host" - AddressModeDriver = "driver" -) - -// Service represents a Consul service definition in Nomad -type Service struct { - // Name of the service registered with Consul. Consul defaults the - // Name to ServiceID if not specified. The Name if specified is used - // as one of the seed values when generating a Consul ServiceID. - Name string - - // PortLabel is either the numeric port number or the `host:port`. - // To specify the port number using the host's Consul Advertise - // address, specify an empty host in the PortLabel (e.g. `:port`). - PortLabel string - - // AddressMode specifies whether or not to use the host ip:port for - // this service. - AddressMode string - - Tags []string // List of tags for the service - Checks []*ServiceCheck // List of checks associated with the service -} - -func (s *Service) Copy() *Service { - if s == nil { - return nil - } - ns := new(Service) - *ns = *s - ns.Tags = helper.CopySliceString(ns.Tags) - - if s.Checks != nil { - checks := make([]*ServiceCheck, len(ns.Checks)) - for i, c := range ns.Checks { - checks[i] = c.Copy() - } - ns.Checks = checks - } - - return ns -} - -// Canonicalize interpolates values of Job, Task Group and Task in the Service -// Name. This also generates check names, service id and check ids. -func (s *Service) Canonicalize(job string, taskGroup string, task string) { - // Ensure empty lists are treated as null to avoid scheduler issues when - // using DeepEquals - if len(s.Tags) == 0 { - s.Tags = nil - } - if len(s.Checks) == 0 { - s.Checks = nil - } - - s.Name = args.ReplaceEnv(s.Name, map[string]string{ - "JOB": job, - "TASKGROUP": taskGroup, - "TASK": task, - "BASE": fmt.Sprintf("%s-%s-%s", job, taskGroup, task), - }, - ) - - for _, check := range s.Checks { - check.Canonicalize(s.Name) - } -} - -// Validate checks if the Check definition is valid -func (s *Service) Validate() error { - var mErr multierror.Error - - // Ensure the service name is valid per the below RFCs but make an exception - // for our interpolation syntax - // RFC-952 §1 (https://tools.ietf.org/html/rfc952), RFC-1123 §2.1 - // (https://tools.ietf.org/html/rfc1123), and RFC-2782 - // (https://tools.ietf.org/html/rfc2782). - re := regexp.MustCompile(`^(?i:[a-z0-9]|[a-z0-9\$][a-zA-Z0-9\-\$\{\}\_\.]*[a-z0-9\}])$`) - if !re.MatchString(s.Name) { - mErr.Errors = append(mErr.Errors, fmt.Errorf("service name must be valid per RFC 1123 and can contain only alphanumeric characters or dashes: %q", s.Name)) - } - - switch s.AddressMode { - case "", AddressModeAuto, AddressModeHost, AddressModeDriver: - // OK - default: - mErr.Errors = append(mErr.Errors, fmt.Errorf("service address_mode must be %q, %q, or %q; not %q", AddressModeAuto, AddressModeHost, AddressModeDriver, s.AddressMode)) - } - - for _, c := range s.Checks { - if s.PortLabel == "" && c.RequiresPort() { - mErr.Errors = append(mErr.Errors, fmt.Errorf("check %s invalid: check requires a port but the service %+q has no port", c.Name, s.Name)) - continue - } - - if err := c.validate(); err != nil { - mErr.Errors = append(mErr.Errors, fmt.Errorf("check %s invalid: %v", c.Name, err)) - } - } - - return mErr.ErrorOrNil() -} - -// ValidateName checks if the services Name is valid and should be called after -// the name has been interpolated -func (s *Service) ValidateName(name string) error { - // Ensure the service name is valid per RFC-952 §1 - // (https://tools.ietf.org/html/rfc952), RFC-1123 §2.1 - // (https://tools.ietf.org/html/rfc1123), and RFC-2782 - // (https://tools.ietf.org/html/rfc2782). - re := regexp.MustCompile(`^(?i:[a-z0-9]|[a-z0-9][a-z0-9\-]{0,61}[a-z0-9])$`) - if !re.MatchString(name) { - return fmt.Errorf("service name must be valid per RFC 1123 and can contain only alphanumeric characters or dashes and must be no longer than 63 characters: %q", name) - } - return nil -} - -// Hash calculates the hash of the check based on it's content and the service -// which owns it -func (s *Service) Hash() string { - h := sha1.New() - io.WriteString(h, s.Name) - io.WriteString(h, strings.Join(s.Tags, "")) - io.WriteString(h, s.PortLabel) - io.WriteString(h, s.AddressMode) - return fmt.Sprintf("%x", h.Sum(nil)) -} - -const ( - // DefaultKillTimeout is the default timeout between signaling a task it - // will be killed and killing it. - DefaultKillTimeout = 5 * time.Second -) - -// LogConfig provides configuration for log rotation -type LogConfig struct { - MaxFiles int - MaxFileSizeMB int -} - -// DefaultLogConfig returns the default LogConfig values. -func DefaultLogConfig() *LogConfig { - return &LogConfig{ - MaxFiles: 10, - MaxFileSizeMB: 10, - } -} - -// Validate returns an error if the log config specified are less than -// the minimum allowed. -func (l *LogConfig) Validate() error { - var mErr multierror.Error - if l.MaxFiles < 1 { - mErr.Errors = append(mErr.Errors, fmt.Errorf("minimum number of files is 1; got %d", l.MaxFiles)) - } - if l.MaxFileSizeMB < 1 { - mErr.Errors = append(mErr.Errors, fmt.Errorf("minimum file size is 1MB; got %d", l.MaxFileSizeMB)) - } - return mErr.ErrorOrNil() -} - -// Task is a single process typically that is executed as part of a task group. -type Task struct { - // Name of the task - Name string - - // Driver is used to control which driver is used - Driver string - - // User is used to determine which user will run the task. It defaults to - // the same user the Nomad client is being run as. - User string - - // Config is provided to the driver to initialize - Config map[string]interface{} - - // Map of environment variables to be used by the driver - Env map[string]string - - // List of service definitions exposed by the Task - Services []*Service - - // Vault is used to define the set of Vault policies that this task should - // have access to. - Vault *Vault - - // Templates are the set of templates to be rendered for the task. - Templates []*Template - - // Constraints can be specified at a task level and apply only to - // the particular task. - Constraints []*Constraint - - // Resources is the resources needed by this task - Resources *Resources - - // DispatchPayload configures how the task retrieves its input from a dispatch - DispatchPayload *DispatchPayloadConfig - - // Meta is used to associate arbitrary metadata with this - // task. This is opaque to Nomad. - Meta map[string]string - - // KillTimeout is the time between signaling a task that it will be - // killed and killing it. - KillTimeout time.Duration - - // LogConfig provides configuration for log rotation - LogConfig *LogConfig - - // Artifacts is a list of artifacts to download and extract before running - // the task. - Artifacts []*TaskArtifact - - // Leader marks the task as the leader within the group. When the leader - // task exits, other tasks will be gracefully terminated. - Leader bool - - // ShutdownDelay is the duration of the delay between deregistering a - // task from Consul and sending it a signal to shutdown. See #2441 - ShutdownDelay time.Duration -} - -func (t *Task) Copy() *Task { - if t == nil { - return nil - } - nt := new(Task) - *nt = *t - nt.Env = helper.CopyMapStringString(nt.Env) - - if t.Services != nil { - services := make([]*Service, len(nt.Services)) - for i, s := range nt.Services { - services[i] = s.Copy() - } - nt.Services = services - } - - nt.Constraints = CopySliceConstraints(nt.Constraints) - - nt.Vault = nt.Vault.Copy() - nt.Resources = nt.Resources.Copy() - nt.Meta = helper.CopyMapStringString(nt.Meta) - nt.DispatchPayload = nt.DispatchPayload.Copy() - - if t.Artifacts != nil { - artifacts := make([]*TaskArtifact, 0, len(t.Artifacts)) - for _, a := range nt.Artifacts { - artifacts = append(artifacts, a.Copy()) - } - nt.Artifacts = artifacts - } - - if i, err := copystructure.Copy(nt.Config); err != nil { - panic(err.Error()) - } else { - nt.Config = i.(map[string]interface{}) - } - - if t.Templates != nil { - templates := make([]*Template, len(t.Templates)) - for i, tmpl := range nt.Templates { - templates[i] = tmpl.Copy() - } - nt.Templates = templates - } - - return nt -} - -// Canonicalize canonicalizes fields in the task. -func (t *Task) Canonicalize(job *Job, tg *TaskGroup) { - // Ensure that an empty and nil map are treated the same to avoid scheduling - // problems since we use reflect DeepEquals. - if len(t.Meta) == 0 { - t.Meta = nil - } - if len(t.Config) == 0 { - t.Config = nil - } - if len(t.Env) == 0 { - t.Env = nil - } - - for _, service := range t.Services { - service.Canonicalize(job.Name, tg.Name, t.Name) - } - - // If Resources are nil initialize them to defaults, otherwise canonicalize - if t.Resources == nil { - t.Resources = DefaultResources() - } else { - t.Resources.Canonicalize() - } - - // Set the default timeout if it is not specified. - if t.KillTimeout == 0 { - t.KillTimeout = DefaultKillTimeout - } - - if t.Vault != nil { - t.Vault.Canonicalize() - } - - for _, template := range t.Templates { - template.Canonicalize() - } -} - -func (t *Task) GoString() string { - return fmt.Sprintf("*%#v", *t) -} - -// Validate is used to sanity check a task -func (t *Task) Validate(ephemeralDisk *EphemeralDisk) error { - var mErr multierror.Error - if t.Name == "" { - mErr.Errors = append(mErr.Errors, errors.New("Missing task name")) - } - if strings.ContainsAny(t.Name, `/\`) { - // We enforce this so that when creating the directory on disk it will - // not have any slashes. - mErr.Errors = append(mErr.Errors, errors.New("Task name cannot include slashes")) - } - if t.Driver == "" { - mErr.Errors = append(mErr.Errors, errors.New("Missing task driver")) - } - if t.KillTimeout < 0 { - mErr.Errors = append(mErr.Errors, errors.New("KillTimeout must be a positive value")) - } - if t.ShutdownDelay < 0 { - mErr.Errors = append(mErr.Errors, errors.New("ShutdownDelay must be a positive value")) - } - - // Validate the resources. - if t.Resources == nil { - mErr.Errors = append(mErr.Errors, errors.New("Missing task resources")) - } else { - if err := t.Resources.MeetsMinResources(); err != nil { - mErr.Errors = append(mErr.Errors, err) - } - - // Ensure the task isn't asking for disk resources - if t.Resources.DiskMB > 0 { - mErr.Errors = append(mErr.Errors, errors.New("Task can't ask for disk resources, they have to be specified at the task group level.")) - } - } - - // Validate the log config - if t.LogConfig == nil { - mErr.Errors = append(mErr.Errors, errors.New("Missing Log Config")) - } else if err := t.LogConfig.Validate(); err != nil { - mErr.Errors = append(mErr.Errors, err) - } - - for idx, constr := range t.Constraints { - if err := constr.Validate(); err != nil { - outer := fmt.Errorf("Constraint %d validation failed: %s", idx+1, err) - mErr.Errors = append(mErr.Errors, outer) - } - - switch constr.Operand { - case ConstraintDistinctHosts, ConstraintDistinctProperty: - outer := fmt.Errorf("Constraint %d has disallowed Operand at task level: %s", idx+1, constr.Operand) - mErr.Errors = append(mErr.Errors, outer) - } - } - - // Validate Services - if err := validateServices(t); err != nil { - mErr.Errors = append(mErr.Errors, err) - } - - if t.LogConfig != nil && ephemeralDisk != nil { - logUsage := (t.LogConfig.MaxFiles * t.LogConfig.MaxFileSizeMB) - if ephemeralDisk.SizeMB <= logUsage { - mErr.Errors = append(mErr.Errors, - fmt.Errorf("log storage (%d MB) must be less than requested disk capacity (%d MB)", - logUsage, ephemeralDisk.SizeMB)) - } - } - - for idx, artifact := range t.Artifacts { - if err := artifact.Validate(); err != nil { - outer := fmt.Errorf("Artifact %d validation failed: %v", idx+1, err) - mErr.Errors = append(mErr.Errors, outer) - } - } - - if t.Vault != nil { - if err := t.Vault.Validate(); err != nil { - mErr.Errors = append(mErr.Errors, fmt.Errorf("Vault validation failed: %v", err)) - } - } - - destinations := make(map[string]int, len(t.Templates)) - for idx, tmpl := range t.Templates { - if err := tmpl.Validate(); err != nil { - outer := fmt.Errorf("Template %d validation failed: %s", idx+1, err) - mErr.Errors = append(mErr.Errors, outer) - } - - if other, ok := destinations[tmpl.DestPath]; ok { - outer := fmt.Errorf("Template %d has same destination as %d", idx+1, other) - mErr.Errors = append(mErr.Errors, outer) - } else { - destinations[tmpl.DestPath] = idx + 1 - } - } - - // Validate the dispatch payload block if there - if t.DispatchPayload != nil { - if err := t.DispatchPayload.Validate(); err != nil { - mErr.Errors = append(mErr.Errors, fmt.Errorf("Dispatch Payload validation failed: %v", err)) - } - } - - return mErr.ErrorOrNil() -} - -// validateServices takes a task and validates the services within it are valid -// and reference ports that exist. -func validateServices(t *Task) error { - var mErr multierror.Error - - // Ensure that services don't ask for non-existent ports and their names are - // unique. - servicePorts := make(map[string][]string) - knownServices := make(map[string]struct{}) - for i, service := range t.Services { - if err := service.Validate(); err != nil { - outer := fmt.Errorf("service[%d] %+q validation failed: %s", i, service.Name, err) - mErr.Errors = append(mErr.Errors, outer) - } - - // Ensure that services with the same name are not being registered for - // the same port - if _, ok := knownServices[service.Name+service.PortLabel]; ok { - mErr.Errors = append(mErr.Errors, fmt.Errorf("service %q is duplicate", service.Name)) - } - knownServices[service.Name+service.PortLabel] = struct{}{} - - if service.PortLabel != "" { - servicePorts[service.PortLabel] = append(servicePorts[service.PortLabel], service.Name) - } - - // Ensure that check names are unique. - knownChecks := make(map[string]struct{}) - for _, check := range service.Checks { - if _, ok := knownChecks[check.Name]; ok { - mErr.Errors = append(mErr.Errors, fmt.Errorf("check %q is duplicate", check.Name)) - } - knownChecks[check.Name] = struct{}{} - } - } - - // Get the set of port labels. - portLabels := make(map[string]struct{}) - if t.Resources != nil { - for _, network := range t.Resources.Networks { - ports := network.PortLabels() - for portLabel, _ := range ports { - portLabels[portLabel] = struct{}{} - } - } - } - - // Ensure all ports referenced in services exist. - for servicePort, services := range servicePorts { - _, ok := portLabels[servicePort] - if !ok { - joined := strings.Join(services, ", ") - err := fmt.Errorf("port label %q referenced by services %v does not exist", servicePort, joined) - mErr.Errors = append(mErr.Errors, err) - } - } - - // Ensure address mode is valid - return mErr.ErrorOrNil() -} - -const ( - // TemplateChangeModeNoop marks that no action should be taken if the - // template is re-rendered - TemplateChangeModeNoop = "noop" - - // TemplateChangeModeSignal marks that the task should be signaled if the - // template is re-rendered - TemplateChangeModeSignal = "signal" - - // TemplateChangeModeRestart marks that the task should be restarted if the - // template is re-rendered - TemplateChangeModeRestart = "restart" -) - -var ( - // TemplateChangeModeInvalidError is the error for when an invalid change - // mode is given - TemplateChangeModeInvalidError = errors.New("Invalid change mode. Must be one of the following: noop, signal, restart") -) - -// Template represents a template configuration to be rendered for a given task -type Template struct { - // SourcePath is the path to the template to be rendered - SourcePath string - - // DestPath is the path to where the template should be rendered - DestPath string - - // EmbeddedTmpl store the raw template. This is useful for smaller templates - // where they are embedded in the job file rather than sent as an artificat - EmbeddedTmpl string - - // ChangeMode indicates what should be done if the template is re-rendered - ChangeMode string - - // ChangeSignal is the signal that should be sent if the change mode - // requires it. - ChangeSignal string - - // Splay is used to avoid coordinated restarts of processes by applying a - // random wait between 0 and the given splay value before signalling the - // application of a change - Splay time.Duration - - // Perms is the permission the file should be written out with. - Perms string - - // LeftDelim and RightDelim are optional configurations to control what - // delimiter is utilized when parsing the template. - LeftDelim string - RightDelim string - - // Envvars enables exposing the template as environment variables - // instead of as a file. The template must be of the form: - // - // VAR_NAME_1={{ key service/my-key }} - // VAR_NAME_2=raw string and {{ env "attr.kernel.name" }} - // - // Lines will be split on the initial "=" with the first part being the - // key name and the second part the value. - // Empty lines and lines starting with # will be ignored, but to avoid - // escaping issues #s within lines will not be treated as comments. - Envvars bool - - // VaultGrace is the grace duration between lease renewal and reacquiring a - // secret. If the lease of a secret is less than the grace, a new secret is - // acquired. - VaultGrace time.Duration -} - -// DefaultTemplate returns a default template. -func DefaultTemplate() *Template { - return &Template{ - ChangeMode: TemplateChangeModeRestart, - Splay: 5 * time.Second, - Perms: "0644", - } -} - -func (t *Template) Copy() *Template { - if t == nil { - return nil - } - copy := new(Template) - *copy = *t - return copy -} - -func (t *Template) Canonicalize() { - if t.ChangeSignal != "" { - t.ChangeSignal = strings.ToUpper(t.ChangeSignal) - } -} - -func (t *Template) Validate() error { - var mErr multierror.Error - - // Verify we have something to render - if t.SourcePath == "" && t.EmbeddedTmpl == "" { - multierror.Append(&mErr, fmt.Errorf("Must specify a source path or have an embedded template")) - } - - // Verify we can render somewhere - if t.DestPath == "" { - multierror.Append(&mErr, fmt.Errorf("Must specify a destination for the template")) - } - - // Verify the destination doesn't escape - escaped, err := PathEscapesAllocDir("task", t.DestPath) - if err != nil { - mErr.Errors = append(mErr.Errors, fmt.Errorf("invalid destination path: %v", err)) - } else if escaped { - mErr.Errors = append(mErr.Errors, fmt.Errorf("destination escapes allocation directory")) - } - - // Verify a proper change mode - switch t.ChangeMode { - case TemplateChangeModeNoop, TemplateChangeModeRestart: - case TemplateChangeModeSignal: - if t.ChangeSignal == "" { - multierror.Append(&mErr, fmt.Errorf("Must specify signal value when change mode is signal")) - } - if t.Envvars { - multierror.Append(&mErr, fmt.Errorf("cannot use signals with env var templates")) - } - default: - multierror.Append(&mErr, TemplateChangeModeInvalidError) - } - - // Verify the splay is positive - if t.Splay < 0 { - multierror.Append(&mErr, fmt.Errorf("Must specify positive splay value")) - } - - // Verify the permissions - if t.Perms != "" { - if _, err := strconv.ParseUint(t.Perms, 8, 12); err != nil { - multierror.Append(&mErr, fmt.Errorf("Failed to parse %q as octal: %v", t.Perms, err)) - } - } - - if t.VaultGrace.Nanoseconds() < 0 { - multierror.Append(&mErr, fmt.Errorf("Vault grace must be greater than zero: %v < 0", t.VaultGrace)) - } - - return mErr.ErrorOrNil() -} - -// Set of possible states for a task. -const ( - TaskStatePending = "pending" // The task is waiting to be run. - TaskStateRunning = "running" // The task is currently running. - TaskStateDead = "dead" // Terminal state of task. -) - -// TaskState tracks the current state of a task and events that caused state -// transitions. -type TaskState struct { - // The current state of the task. - State string - - // Failed marks a task as having failed - Failed bool - - // Restarts is the number of times the task has restarted - Restarts uint64 - - // LastRestart is the time the task last restarted. It is updated each time the - // task restarts - LastRestart time.Time - - // StartedAt is the time the task is started. It is updated each time the - // task starts - StartedAt time.Time - - // FinishedAt is the time at which the task transistioned to dead and will - // not be started again. - FinishedAt time.Time - - // Series of task events that transition the state of the task. - Events []*TaskEvent -} - -func (ts *TaskState) Copy() *TaskState { - if ts == nil { - return nil - } - copy := new(TaskState) - *copy = *ts - - if ts.Events != nil { - copy.Events = make([]*TaskEvent, len(ts.Events)) - for i, e := range ts.Events { - copy.Events[i] = e.Copy() - } - } - return copy -} - -// Successful returns whether a task finished successfully. -func (ts *TaskState) Successful() bool { - l := len(ts.Events) - if ts.State != TaskStateDead || l == 0 { - return false - } - - e := ts.Events[l-1] - if e.Type != TaskTerminated { - return false - } - - return e.ExitCode == 0 -} - -const ( - // TaskSetupFailure indicates that the task could not be started due to a - // a setup failure. - TaskSetupFailure = "Setup Failure" - - // TaskDriveFailure indicates that the task could not be started due to a - // failure in the driver. - TaskDriverFailure = "Driver Failure" - - // TaskReceived signals that the task has been pulled by the client at the - // given timestamp. - TaskReceived = "Received" - - // TaskFailedValidation indicates the task was invalid and as such was not - // run. - TaskFailedValidation = "Failed Validation" - - // TaskStarted signals that the task was started and its timestamp can be - // used to determine the running length of the task. - TaskStarted = "Started" - - // TaskTerminated indicates that the task was started and exited. - TaskTerminated = "Terminated" - - // TaskKilling indicates a kill signal has been sent to the task. - TaskKilling = "Killing" - - // TaskKilled indicates a user has killed the task. - TaskKilled = "Killed" - - // TaskRestarting indicates that task terminated and is being restarted. - TaskRestarting = "Restarting" - - // TaskNotRestarting indicates that the task has failed and is not being - // restarted because it has exceeded its restart policy. - TaskNotRestarting = "Not Restarting" - - // TaskRestartSignal indicates that the task has been signalled to be - // restarted - TaskRestartSignal = "Restart Signaled" - - // TaskSignaling indicates that the task is being signalled. - TaskSignaling = "Signaling" - - // TaskDownloadingArtifacts means the task is downloading the artifacts - // specified in the task. - TaskDownloadingArtifacts = "Downloading Artifacts" - - // TaskArtifactDownloadFailed indicates that downloading the artifacts - // failed. - TaskArtifactDownloadFailed = "Failed Artifact Download" - - // TaskBuildingTaskDir indicates that the task directory/chroot is being - // built. - TaskBuildingTaskDir = "Building Task Directory" - - // TaskSetup indicates the task runner is setting up the task environment - TaskSetup = "Task Setup" - - // TaskDiskExceeded indicates that one of the tasks in a taskgroup has - // exceeded the requested disk resources. - TaskDiskExceeded = "Disk Resources Exceeded" - - // TaskSiblingFailed indicates that a sibling task in the task group has - // failed. - TaskSiblingFailed = "Sibling Task Failed" - - // TaskDriverMessage is an informational event message emitted by - // drivers such as when they're performing a long running action like - // downloading an image. - TaskDriverMessage = "Driver" - - // TaskLeaderDead indicates that the leader task within the has finished. - TaskLeaderDead = "Leader Task Dead" - - // TaskGenericMessage is used by various subsystems to emit a message. - TaskGenericMessage = "Generic" -) - -// TaskEvent is an event that effects the state of a task and contains meta-data -// appropriate to the events type. -type TaskEvent struct { - Type string - Time int64 // Unix Nanosecond timestamp - - // FailsTask marks whether this event fails the task - FailsTask bool - - // Restart fields. - RestartReason string - - // Setup Failure fields. - SetupError string - - // Driver Failure fields. - DriverError string // A driver error occurred while starting the task. - - // Task Terminated Fields. - ExitCode int // The exit code of the task. - Signal int // The signal that terminated the task. - Message string // A possible message explaining the termination of the task. - - // Killing fields - KillTimeout time.Duration - - // Task Killed Fields. - KillError string // Error killing the task. - - // KillReason is the reason the task was killed - KillReason string - - // TaskRestarting fields. - StartDelay int64 // The sleep period before restarting the task in unix nanoseconds. - - // Artifact Download fields - DownloadError string // Error downloading artifacts - - // Validation fields - ValidationError string // Validation error - - // The maximum allowed task disk size. - DiskLimit int64 - - // Name of the sibling task that caused termination of the task that - // the TaskEvent refers to. - FailedSibling string - - // VaultError is the error from token renewal - VaultError string - - // TaskSignalReason indicates the reason the task is being signalled. - TaskSignalReason string - - // TaskSignal is the signal that was sent to the task - TaskSignal string - - // DriverMessage indicates a driver action being taken. - DriverMessage string - - // GenericSource is the source of a message. - GenericSource string -} - -func (te *TaskEvent) GoString() string { - return fmt.Sprintf("%v - %v", te.Time, te.Type) -} - -// SetMessage sets the message of TaskEvent -func (te *TaskEvent) SetMessage(msg string) *TaskEvent { - te.Message = msg - return te -} - -func (te *TaskEvent) Copy() *TaskEvent { - if te == nil { - return nil - } - copy := new(TaskEvent) - *copy = *te - return copy -} - -func NewTaskEvent(event string) *TaskEvent { - return &TaskEvent{ - Type: event, - Time: time.Now().UnixNano(), - } -} - -// SetSetupError is used to store an error that occurred while setting up the -// task -func (e *TaskEvent) SetSetupError(err error) *TaskEvent { - if err != nil { - e.SetupError = err.Error() - } - return e -} - -func (e *TaskEvent) SetFailsTask() *TaskEvent { - e.FailsTask = true - return e -} - -func (e *TaskEvent) SetDriverError(err error) *TaskEvent { - if err != nil { - e.DriverError = err.Error() - } - return e -} - -func (e *TaskEvent) SetExitCode(c int) *TaskEvent { - e.ExitCode = c - return e -} - -func (e *TaskEvent) SetSignal(s int) *TaskEvent { - e.Signal = s - return e -} - -func (e *TaskEvent) SetExitMessage(err error) *TaskEvent { - if err != nil { - e.Message = err.Error() - } - return e -} - -func (e *TaskEvent) SetKillError(err error) *TaskEvent { - if err != nil { - e.KillError = err.Error() - } - return e -} - -func (e *TaskEvent) SetKillReason(r string) *TaskEvent { - e.KillReason = r - return e -} - -func (e *TaskEvent) SetRestartDelay(delay time.Duration) *TaskEvent { - e.StartDelay = int64(delay) - return e -} - -func (e *TaskEvent) SetRestartReason(reason string) *TaskEvent { - e.RestartReason = reason - return e -} - -func (e *TaskEvent) SetTaskSignalReason(r string) *TaskEvent { - e.TaskSignalReason = r - return e -} - -func (e *TaskEvent) SetTaskSignal(s os.Signal) *TaskEvent { - e.TaskSignal = s.String() - return e -} - -func (e *TaskEvent) SetDownloadError(err error) *TaskEvent { - if err != nil { - e.DownloadError = err.Error() - } - return e -} - -func (e *TaskEvent) SetValidationError(err error) *TaskEvent { - if err != nil { - e.ValidationError = err.Error() - } - return e -} - -func (e *TaskEvent) SetKillTimeout(timeout time.Duration) *TaskEvent { - e.KillTimeout = timeout - return e -} - -func (e *TaskEvent) SetDiskLimit(limit int64) *TaskEvent { - e.DiskLimit = limit - return e -} - -func (e *TaskEvent) SetFailedSibling(sibling string) *TaskEvent { - e.FailedSibling = sibling - return e -} - -func (e *TaskEvent) SetVaultRenewalError(err error) *TaskEvent { - if err != nil { - e.VaultError = err.Error() - } - return e -} - -func (e *TaskEvent) SetDriverMessage(m string) *TaskEvent { - e.DriverMessage = m - return e -} - -func (e *TaskEvent) SetGenericSource(s string) *TaskEvent { - e.GenericSource = s - return e -} - -// TaskArtifact is an artifact to download before running the task. -type TaskArtifact struct { - // GetterSource is the source to download an artifact using go-getter - GetterSource string - - // GetterOptions are options to use when downloading the artifact using - // go-getter. - GetterOptions map[string]string - - // GetterMode is the go-getter.ClientMode for fetching resources. - // Defaults to "any" but can be set to "file" or "dir". - GetterMode string - - // RelativeDest is the download destination given relative to the task's - // directory. - RelativeDest string -} - -func (ta *TaskArtifact) Copy() *TaskArtifact { - if ta == nil { - return nil - } - nta := new(TaskArtifact) - *nta = *ta - nta.GetterOptions = helper.CopyMapStringString(ta.GetterOptions) - return nta -} - -func (ta *TaskArtifact) GoString() string { - return fmt.Sprintf("%+v", ta) -} - -// PathEscapesAllocDir returns if the given path escapes the allocation -// directory. The prefix allows adding a prefix if the path will be joined, for -// example a "task/local" prefix may be provided if the path will be joined -// against that prefix. -func PathEscapesAllocDir(prefix, path string) (bool, error) { - // Verify the destination doesn't escape the tasks directory - alloc, err := filepath.Abs(filepath.Join("/", "alloc-dir/", "alloc-id/")) - if err != nil { - return false, err - } - abs, err := filepath.Abs(filepath.Join(alloc, prefix, path)) - if err != nil { - return false, err - } - rel, err := filepath.Rel(alloc, abs) - if err != nil { - return false, err - } - - return strings.HasPrefix(rel, ".."), nil -} - -func (ta *TaskArtifact) Validate() error { - // Verify the source - var mErr multierror.Error - if ta.GetterSource == "" { - mErr.Errors = append(mErr.Errors, fmt.Errorf("source must be specified")) - } - - switch ta.GetterMode { - case "": - // Default to any - ta.GetterMode = GetterModeAny - case GetterModeAny, GetterModeFile, GetterModeDir: - // Ok - default: - mErr.Errors = append(mErr.Errors, fmt.Errorf("invalid artifact mode %q; must be one of: %s, %s, %s", - ta.GetterMode, GetterModeAny, GetterModeFile, GetterModeDir)) - } - - escaped, err := PathEscapesAllocDir("task", ta.RelativeDest) - if err != nil { - mErr.Errors = append(mErr.Errors, fmt.Errorf("invalid destination path: %v", err)) - } else if escaped { - mErr.Errors = append(mErr.Errors, fmt.Errorf("destination escapes allocation directory")) - } - - // Verify the checksum - if check, ok := ta.GetterOptions["checksum"]; ok { - check = strings.TrimSpace(check) - if check == "" { - mErr.Errors = append(mErr.Errors, fmt.Errorf("checksum value cannot be empty")) - return mErr.ErrorOrNil() - } - - parts := strings.Split(check, ":") - if l := len(parts); l != 2 { - mErr.Errors = append(mErr.Errors, fmt.Errorf(`checksum must be given as "type:value"; got %q`, check)) - return mErr.ErrorOrNil() - } - - checksumVal := parts[1] - checksumBytes, err := hex.DecodeString(checksumVal) - if err != nil { - mErr.Errors = append(mErr.Errors, fmt.Errorf("invalid checksum: %v", err)) - return mErr.ErrorOrNil() - } - - checksumType := parts[0] - expectedLength := 0 - switch checksumType { - case "md5": - expectedLength = md5.Size - case "sha1": - expectedLength = sha1.Size - case "sha256": - expectedLength = sha256.Size - case "sha512": - expectedLength = sha512.Size - default: - mErr.Errors = append(mErr.Errors, fmt.Errorf("unsupported checksum type: %s", checksumType)) - return mErr.ErrorOrNil() - } - - if len(checksumBytes) != expectedLength { - mErr.Errors = append(mErr.Errors, fmt.Errorf("invalid %s checksum: %v", checksumType, checksumVal)) - return mErr.ErrorOrNil() - } - } - - return mErr.ErrorOrNil() -} - -const ( - ConstraintDistinctProperty = "distinct_property" - ConstraintDistinctHosts = "distinct_hosts" - ConstraintRegex = "regexp" - ConstraintVersion = "version" - ConstraintSetContains = "set_contains" -) - -// Constraints are used to restrict placement options. -type Constraint struct { - LTarget string // Left-hand target - RTarget string // Right-hand target - Operand string // Constraint operand (<=, <, =, !=, >, >=), contains, near - str string // Memoized string -} - -// Equal checks if two constraints are equal -func (c *Constraint) Equal(o *Constraint) bool { - return c.LTarget == o.LTarget && - c.RTarget == o.RTarget && - c.Operand == o.Operand -} - -func (c *Constraint) Copy() *Constraint { - if c == nil { - return nil - } - nc := new(Constraint) - *nc = *c - return nc -} - -func (c *Constraint) String() string { - if c.str != "" { - return c.str - } - c.str = fmt.Sprintf("%s %s %s", c.LTarget, c.Operand, c.RTarget) - return c.str -} - -func (c *Constraint) Validate() error { - var mErr multierror.Error - if c.Operand == "" { - mErr.Errors = append(mErr.Errors, errors.New("Missing constraint operand")) - } - - // requireLtarget specifies whether the constraint requires an LTarget to be - // provided. - requireLtarget := true - - // Perform additional validation based on operand - switch c.Operand { - case ConstraintDistinctHosts: - requireLtarget = false - case ConstraintSetContains: - if c.RTarget == "" { - mErr.Errors = append(mErr.Errors, fmt.Errorf("Set contains constraint requires an RTarget")) - } - case ConstraintRegex: - if _, err := regexp.Compile(c.RTarget); err != nil { - mErr.Errors = append(mErr.Errors, fmt.Errorf("Regular expression failed to compile: %v", err)) - } - case ConstraintVersion: - if _, err := version.NewConstraint(c.RTarget); err != nil { - mErr.Errors = append(mErr.Errors, fmt.Errorf("Version constraint is invalid: %v", err)) - } - case ConstraintDistinctProperty: - // If a count is set, make sure it is convertible to a uint64 - if c.RTarget != "" { - count, err := strconv.ParseUint(c.RTarget, 10, 64) - if err != nil { - mErr.Errors = append(mErr.Errors, fmt.Errorf("Failed to convert RTarget %q to uint64: %v", c.RTarget, err)) - } else if count < 1 { - mErr.Errors = append(mErr.Errors, fmt.Errorf("Distinct Property must have an allowed count of 1 or greater: %d < 1", count)) - } - } - case "=", "==", "is", "!=", "not", "<", "<=", ">", ">=": - if c.RTarget == "" { - mErr.Errors = append(mErr.Errors, fmt.Errorf("Operator %q requires an RTarget", c.Operand)) - } - default: - mErr.Errors = append(mErr.Errors, fmt.Errorf("Unknown constraint type %q", c.Operand)) - } - - // Ensure we have an LTarget for the constraints that need one - if requireLtarget && c.LTarget == "" { - mErr.Errors = append(mErr.Errors, fmt.Errorf("No LTarget provided but is required by constraint")) - } - - return mErr.ErrorOrNil() -} - -// EphemeralDisk is an ephemeral disk object -type EphemeralDisk struct { - // Sticky indicates whether the allocation is sticky to a node - Sticky bool - - // SizeMB is the size of the local disk - SizeMB int - - // Migrate determines if Nomad client should migrate the allocation dir for - // sticky allocations - Migrate bool -} - -// DefaultEphemeralDisk returns a EphemeralDisk with default configurations -func DefaultEphemeralDisk() *EphemeralDisk { - return &EphemeralDisk{ - SizeMB: 300, - } -} - -// Validate validates EphemeralDisk -func (d *EphemeralDisk) Validate() error { - if d.SizeMB < 10 { - return fmt.Errorf("minimum DiskMB value is 10; got %d", d.SizeMB) - } - return nil -} - -// Copy copies the EphemeralDisk struct and returns a new one -func (d *EphemeralDisk) Copy() *EphemeralDisk { - ld := new(EphemeralDisk) - *ld = *d - return ld -} - -const ( - // VaultChangeModeNoop takes no action when a new token is retrieved. - VaultChangeModeNoop = "noop" - - // VaultChangeModeSignal signals the task when a new token is retrieved. - VaultChangeModeSignal = "signal" - - // VaultChangeModeRestart restarts the task when a new token is retrieved. - VaultChangeModeRestart = "restart" -) - -// Vault stores the set of permissions a task needs access to from Vault. -type Vault struct { - // Policies is the set of policies that the task needs access to - Policies []string - - // Env marks whether the Vault Token should be exposed as an environment - // variable - Env bool - - // ChangeMode is used to configure the task's behavior when the Vault - // token changes because the original token could not be renewed in time. - ChangeMode string - - // ChangeSignal is the signal sent to the task when a new token is - // retrieved. This is only valid when using the signal change mode. - ChangeSignal string -} - -func DefaultVaultBlock() *Vault { - return &Vault{ - Env: true, - ChangeMode: VaultChangeModeRestart, - } -} - -// Copy returns a copy of this Vault block. -func (v *Vault) Copy() *Vault { - if v == nil { - return nil - } - - nv := new(Vault) - *nv = *v - return nv -} - -func (v *Vault) Canonicalize() { - if v.ChangeSignal != "" { - v.ChangeSignal = strings.ToUpper(v.ChangeSignal) - } -} - -// Validate returns if the Vault block is valid. -func (v *Vault) Validate() error { - if v == nil { - return nil - } - - var mErr multierror.Error - if len(v.Policies) == 0 { - multierror.Append(&mErr, fmt.Errorf("Policy list cannot be empty")) - } - - for _, p := range v.Policies { - if p == "root" { - multierror.Append(&mErr, fmt.Errorf("Can not specify \"root\" policy")) - } - } - - switch v.ChangeMode { - case VaultChangeModeSignal: - if v.ChangeSignal == "" { - multierror.Append(&mErr, fmt.Errorf("Signal must be specified when using change mode %q", VaultChangeModeSignal)) - } - case VaultChangeModeNoop, VaultChangeModeRestart: - default: - multierror.Append(&mErr, fmt.Errorf("Unknown change mode %q", v.ChangeMode)) - } - - return mErr.ErrorOrNil() -} - -const ( - // DeploymentStatuses are the various states a deployment can be be in - DeploymentStatusRunning = "running" - DeploymentStatusPaused = "paused" - DeploymentStatusFailed = "failed" - DeploymentStatusSuccessful = "successful" - DeploymentStatusCancelled = "cancelled" - - // DeploymentStatusDescriptions are the various descriptions of the states a - // deployment can be in. - DeploymentStatusDescriptionRunning = "Deployment is running" - DeploymentStatusDescriptionRunningNeedsPromotion = "Deployment is running but requires promotion" - DeploymentStatusDescriptionPaused = "Deployment is paused" - DeploymentStatusDescriptionSuccessful = "Deployment completed successfully" - DeploymentStatusDescriptionStoppedJob = "Cancelled because job is stopped" - DeploymentStatusDescriptionNewerJob = "Cancelled due to newer version of job" - DeploymentStatusDescriptionFailedAllocations = "Failed due to unhealthy allocations" - DeploymentStatusDescriptionFailedByUser = "Deployment marked as failed" -) - -// DeploymentStatusDescriptionRollback is used to get the status description of -// a deployment when rolling back to an older job. -func DeploymentStatusDescriptionRollback(baseDescription string, jobVersion uint64) string { - return fmt.Sprintf("%s - rolling back to job version %d", baseDescription, jobVersion) -} - -// DeploymentStatusDescriptionNoRollbackTarget is used to get the status description of -// a deployment when there is no target to rollback to but autorevet is desired. -func DeploymentStatusDescriptionNoRollbackTarget(baseDescription string) string { - return fmt.Sprintf("%s - no stable job version to auto revert to", baseDescription) -} - -// Deployment is the object that represents a job deployment which is used to -// transition a job between versions. -type Deployment struct { - // ID is a generated UUID for the deployment - ID string - - // Namespace is the namespace the deployment is created in - Namespace string - - // JobID is the job the deployment is created for - JobID string - - // JobVersion is the version of the job at which the deployment is tracking - JobVersion uint64 - - // JobModifyIndex is the modify index of the job at which the deployment is tracking - JobModifyIndex uint64 - - // JobCreateIndex is the create index of the job which the deployment is - // tracking. It is needed so that if the job gets stopped and reran we can - // present the correct list of deployments for the job and not old ones. - JobCreateIndex uint64 - - // TaskGroups is the set of task groups effected by the deployment and their - // current deployment status. - TaskGroups map[string]*DeploymentState - - // The status of the deployment - Status string - - // StatusDescription allows a human readable description of the deployment - // status. - StatusDescription string - - CreateIndex uint64 - ModifyIndex uint64 -} - -// NewDeployment creates a new deployment given the job. -func NewDeployment(job *Job) *Deployment { - return &Deployment{ - ID: GenerateUUID(), - Namespace: job.Namespace, - JobID: job.ID, - JobVersion: job.Version, - JobModifyIndex: job.ModifyIndex, - JobCreateIndex: job.CreateIndex, - Status: DeploymentStatusRunning, - StatusDescription: DeploymentStatusDescriptionRunning, - TaskGroups: make(map[string]*DeploymentState, len(job.TaskGroups)), - } -} - -func (d *Deployment) Copy() *Deployment { - if d == nil { - return nil - } - - c := &Deployment{} - *c = *d - - c.TaskGroups = nil - if l := len(d.TaskGroups); d.TaskGroups != nil { - c.TaskGroups = make(map[string]*DeploymentState, l) - for tg, s := range d.TaskGroups { - c.TaskGroups[tg] = s.Copy() - } - } - - return c -} - -// Active returns whether the deployment is active or terminal. -func (d *Deployment) Active() bool { - switch d.Status { - case DeploymentStatusRunning, DeploymentStatusPaused: - return true - default: - return false - } -} - -// GetID is a helper for getting the ID when the object may be nil -func (d *Deployment) GetID() string { - if d == nil { - return "" - } - return d.ID -} - -// HasPlacedCanaries returns whether the deployment has placed canaries -func (d *Deployment) HasPlacedCanaries() bool { - if d == nil || len(d.TaskGroups) == 0 { - return false - } - for _, group := range d.TaskGroups { - if len(group.PlacedCanaries) != 0 { - return true - } - } - return false -} - -// RequiresPromotion returns whether the deployment requires promotion to -// continue -func (d *Deployment) RequiresPromotion() bool { - if d == nil || len(d.TaskGroups) == 0 || d.Status != DeploymentStatusRunning { - return false - } - for _, group := range d.TaskGroups { - if group.DesiredCanaries > 0 && !group.Promoted { - return true - } - } - return false -} - -func (d *Deployment) GoString() string { - base := fmt.Sprintf("Deployment ID %q for job %q has status %q (%v):", d.ID, d.JobID, d.Status, d.StatusDescription) - for group, state := range d.TaskGroups { - base += fmt.Sprintf("\nTask Group %q has state:\n%#v", group, state) - } - return base -} - -// DeploymentState tracks the state of a deployment for a given task group. -type DeploymentState struct { - // AutoRevert marks whether the task group has indicated the job should be - // reverted on failure - AutoRevert bool - - // Promoted marks whether the canaries have been promoted - Promoted bool - - // PlacedCanaries is the set of placed canary allocations - PlacedCanaries []string - - // DesiredCanaries is the number of canaries that should be created. - DesiredCanaries int - - // DesiredTotal is the total number of allocations that should be created as - // part of the deployment. - DesiredTotal int - - // PlacedAllocs is the number of allocations that have been placed - PlacedAllocs int - - // HealthyAllocs is the number of allocations that have been marked healthy. - HealthyAllocs int - - // UnhealthyAllocs are allocations that have been marked as unhealthy. - UnhealthyAllocs int -} - -func (d *DeploymentState) GoString() string { - base := fmt.Sprintf("\tDesired Total: %d", d.DesiredTotal) - base += fmt.Sprintf("\n\tDesired Canaries: %d", d.DesiredCanaries) - base += fmt.Sprintf("\n\tPlaced Canaries: %#v", d.PlacedCanaries) - base += fmt.Sprintf("\n\tPromoted: %v", d.Promoted) - base += fmt.Sprintf("\n\tPlaced: %d", d.PlacedAllocs) - base += fmt.Sprintf("\n\tHealthy: %d", d.HealthyAllocs) - base += fmt.Sprintf("\n\tUnhealthy: %d", d.UnhealthyAllocs) - base += fmt.Sprintf("\n\tAutoRevert: %v", d.AutoRevert) - return base -} - -func (d *DeploymentState) Copy() *DeploymentState { - c := &DeploymentState{} - *c = *d - c.PlacedCanaries = helper.CopySliceString(d.PlacedCanaries) - return c -} - -// DeploymentStatusUpdate is used to update the status of a given deployment -type DeploymentStatusUpdate struct { - // DeploymentID is the ID of the deployment to update - DeploymentID string - - // Status is the new status of the deployment. - Status string - - // StatusDescription is the new status description of the deployment. - StatusDescription string -} - -const ( - AllocDesiredStatusRun = "run" // Allocation should run - AllocDesiredStatusStop = "stop" // Allocation should stop - AllocDesiredStatusEvict = "evict" // Allocation should stop, and was evicted -) - -const ( - AllocClientStatusPending = "pending" - AllocClientStatusRunning = "running" - AllocClientStatusComplete = "complete" - AllocClientStatusFailed = "failed" - AllocClientStatusLost = "lost" -) - -// Allocation is used to allocate the placement of a task group to a node. -type Allocation struct { - // ID of the allocation (UUID) - ID string - - // Namespace is the namespace the allocation is created in - Namespace string - - // ID of the evaluation that generated this allocation - EvalID string - - // Name is a logical name of the allocation. - Name string - - // NodeID is the node this is being placed on - NodeID string - - // Job is the parent job of the task group being allocated. - // This is copied at allocation time to avoid issues if the job - // definition is updated. - JobID string - Job *Job - - // TaskGroup is the name of the task group that should be run - TaskGroup string - - // Resources is the total set of resources allocated as part - // of this allocation of the task group. - Resources *Resources - - // SharedResources are the resources that are shared by all the tasks in an - // allocation - SharedResources *Resources - - // TaskResources is the set of resources allocated to each - // task. These should sum to the total Resources. - TaskResources map[string]*Resources - - // Metrics associated with this allocation - Metrics *AllocMetric - - // Desired Status of the allocation on the client - DesiredStatus string - - // DesiredStatusDescription is meant to provide more human useful information - DesiredDescription string - - // Status of the allocation on the client - ClientStatus string - - // ClientStatusDescription is meant to provide more human useful information - ClientDescription string - - // TaskStates stores the state of each task, - TaskStates map[string]*TaskState - - // PreviousAllocation is the allocation that this allocation is replacing - PreviousAllocation string - - // DeploymentID identifies an allocation as being created from a - // particular deployment - DeploymentID string - - // DeploymentStatus captures the status of the allocation as part of the - // given deployment - DeploymentStatus *AllocDeploymentStatus - - // Raft Indexes - CreateIndex uint64 - ModifyIndex uint64 - - // AllocModifyIndex is not updated when the client updates allocations. This - // lets the client pull only the allocs updated by the server. - AllocModifyIndex uint64 - - // CreateTime is the time the allocation has finished scheduling and been - // verified by the plan applier. - CreateTime int64 -} - -// Index returns the index of the allocation. If the allocation is from a task -// group with count greater than 1, there will be multiple allocations for it. -func (a *Allocation) Index() uint { - l := len(a.Name) - prefix := len(a.JobID) + len(a.TaskGroup) + 2 - if l <= 3 || l <= prefix { - return uint(0) - } - - strNum := a.Name[prefix : len(a.Name)-1] - num, _ := strconv.Atoi(strNum) - return uint(num) -} - -func (a *Allocation) Copy() *Allocation { - return a.copyImpl(true) -} - -// Copy provides a copy of the allocation but doesn't deep copy the job -func (a *Allocation) CopySkipJob() *Allocation { - return a.copyImpl(false) -} - -func (a *Allocation) copyImpl(job bool) *Allocation { - if a == nil { - return nil - } - na := new(Allocation) - *na = *a - - if job { - na.Job = na.Job.Copy() - } - - na.Resources = na.Resources.Copy() - na.SharedResources = na.SharedResources.Copy() - - if a.TaskResources != nil { - tr := make(map[string]*Resources, len(na.TaskResources)) - for task, resource := range na.TaskResources { - tr[task] = resource.Copy() - } - na.TaskResources = tr - } - - na.Metrics = na.Metrics.Copy() - na.DeploymentStatus = na.DeploymentStatus.Copy() - - if a.TaskStates != nil { - ts := make(map[string]*TaskState, len(na.TaskStates)) - for task, state := range na.TaskStates { - ts[task] = state.Copy() - } - na.TaskStates = ts - } - return na -} - -// TerminalStatus returns if the desired or actual status is terminal and -// will no longer transition. -func (a *Allocation) TerminalStatus() bool { - // First check the desired state and if that isn't terminal, check client - // state. - switch a.DesiredStatus { - case AllocDesiredStatusStop, AllocDesiredStatusEvict: - return true - default: - } - - switch a.ClientStatus { - case AllocClientStatusComplete, AllocClientStatusFailed, AllocClientStatusLost: - return true - default: - return false - } -} - -// Terminated returns if the allocation is in a terminal state on a client. -func (a *Allocation) Terminated() bool { - if a.ClientStatus == AllocClientStatusFailed || - a.ClientStatus == AllocClientStatusComplete || - a.ClientStatus == AllocClientStatusLost { - return true - } - return false -} - -// RanSuccessfully returns whether the client has ran the allocation and all -// tasks finished successfully -func (a *Allocation) RanSuccessfully() bool { - return a.ClientStatus == AllocClientStatusComplete -} - -// ShouldMigrate returns if the allocation needs data migration -func (a *Allocation) ShouldMigrate() bool { - if a.DesiredStatus == AllocDesiredStatusStop || a.DesiredStatus == AllocDesiredStatusEvict { - return false - } - - tg := a.Job.LookupTaskGroup(a.TaskGroup) - - // if the task group is nil or the ephemeral disk block isn't present then - // we won't migrate - if tg == nil || tg.EphemeralDisk == nil { - return false - } - - // We won't migrate any data is the user hasn't enabled migration or the - // disk is not marked as sticky - if !tg.EphemeralDisk.Migrate || !tg.EphemeralDisk.Sticky { - return false - } - - return true -} - -// Stub returns a list stub for the allocation -func (a *Allocation) Stub() *AllocListStub { - return &AllocListStub{ - ID: a.ID, - EvalID: a.EvalID, - Name: a.Name, - NodeID: a.NodeID, - JobID: a.JobID, - JobVersion: a.Job.Version, - TaskGroup: a.TaskGroup, - DesiredStatus: a.DesiredStatus, - DesiredDescription: a.DesiredDescription, - ClientStatus: a.ClientStatus, - ClientDescription: a.ClientDescription, - TaskStates: a.TaskStates, - DeploymentStatus: a.DeploymentStatus, - CreateIndex: a.CreateIndex, - ModifyIndex: a.ModifyIndex, - CreateTime: a.CreateTime, - } -} - -// AllocListStub is used to return a subset of alloc information -type AllocListStub struct { - ID string - EvalID string - Name string - NodeID string - JobID string - JobVersion uint64 - TaskGroup string - DesiredStatus string - DesiredDescription string - ClientStatus string - ClientDescription string - TaskStates map[string]*TaskState - DeploymentStatus *AllocDeploymentStatus - CreateIndex uint64 - ModifyIndex uint64 - CreateTime int64 -} - -// AllocMetric is used to track various metrics while attempting -// to make an allocation. These are used to debug a job, or to better -// understand the pressure within the system. -type AllocMetric struct { - // NodesEvaluated is the number of nodes that were evaluated - NodesEvaluated int - - // NodesFiltered is the number of nodes filtered due to a constraint - NodesFiltered int - - // NodesAvailable is the number of nodes available for evaluation per DC. - NodesAvailable map[string]int - - // ClassFiltered is the number of nodes filtered by class - ClassFiltered map[string]int - - // ConstraintFiltered is the number of failures caused by constraint - ConstraintFiltered map[string]int - - // NodesExhausted is the number of nodes skipped due to being - // exhausted of at least one resource - NodesExhausted int - - // ClassExhausted is the number of nodes exhausted by class - ClassExhausted map[string]int - - // DimensionExhausted provides the count by dimension or reason - DimensionExhausted map[string]int - - // Scores is the scores of the final few nodes remaining - // for placement. The top score is typically selected. - Scores map[string]float64 - - // AllocationTime is a measure of how long the allocation - // attempt took. This can affect performance and SLAs. - AllocationTime time.Duration - - // CoalescedFailures indicates the number of other - // allocations that were coalesced into this failed allocation. - // This is to prevent creating many failed allocations for a - // single task group. - CoalescedFailures int -} - -func (a *AllocMetric) Copy() *AllocMetric { - if a == nil { - return nil - } - na := new(AllocMetric) - *na = *a - na.NodesAvailable = helper.CopyMapStringInt(na.NodesAvailable) - na.ClassFiltered = helper.CopyMapStringInt(na.ClassFiltered) - na.ConstraintFiltered = helper.CopyMapStringInt(na.ConstraintFiltered) - na.ClassExhausted = helper.CopyMapStringInt(na.ClassExhausted) - na.DimensionExhausted = helper.CopyMapStringInt(na.DimensionExhausted) - na.Scores = helper.CopyMapStringFloat64(na.Scores) - return na -} - -func (a *AllocMetric) EvaluateNode() { - a.NodesEvaluated += 1 -} - -func (a *AllocMetric) FilterNode(node *Node, constraint string) { - a.NodesFiltered += 1 - if node != nil && node.NodeClass != "" { - if a.ClassFiltered == nil { - a.ClassFiltered = make(map[string]int) - } - a.ClassFiltered[node.NodeClass] += 1 - } - if constraint != "" { - if a.ConstraintFiltered == nil { - a.ConstraintFiltered = make(map[string]int) - } - a.ConstraintFiltered[constraint] += 1 - } -} - -func (a *AllocMetric) ExhaustedNode(node *Node, dimension string) { - a.NodesExhausted += 1 - if node != nil && node.NodeClass != "" { - if a.ClassExhausted == nil { - a.ClassExhausted = make(map[string]int) - } - a.ClassExhausted[node.NodeClass] += 1 - } - if dimension != "" { - if a.DimensionExhausted == nil { - a.DimensionExhausted = make(map[string]int) - } - a.DimensionExhausted[dimension] += 1 - } -} - -func (a *AllocMetric) ScoreNode(node *Node, name string, score float64) { - if a.Scores == nil { - a.Scores = make(map[string]float64) - } - key := fmt.Sprintf("%s.%s", node.ID, name) - a.Scores[key] = score -} - -// AllocDeploymentStatus captures the status of the allocation as part of the -// deployment. This can include things like if the allocation has been marked as -// heatlhy. -type AllocDeploymentStatus struct { - // Healthy marks whether the allocation has been marked healthy or unhealthy - // as part of a deployment. It can be unset if it has neither been marked - // healthy or unhealthy. - Healthy *bool - - // ModifyIndex is the raft index in which the deployment status was last - // changed. - ModifyIndex uint64 -} - -// IsHealthy returns if the allocation is marked as healthy as part of a -// deployment -func (a *AllocDeploymentStatus) IsHealthy() bool { - if a == nil { - return false - } - - return a.Healthy != nil && *a.Healthy -} - -// IsUnhealthy returns if the allocation is marked as unhealthy as part of a -// deployment -func (a *AllocDeploymentStatus) IsUnhealthy() bool { - if a == nil { - return false - } - - return a.Healthy != nil && !*a.Healthy -} - -func (a *AllocDeploymentStatus) Copy() *AllocDeploymentStatus { - if a == nil { - return nil - } - - c := new(AllocDeploymentStatus) - *c = *a - - if a.Healthy != nil { - c.Healthy = helper.BoolToPtr(*a.Healthy) - } - - return c -} - -const ( - EvalStatusBlocked = "blocked" - EvalStatusPending = "pending" - EvalStatusComplete = "complete" - EvalStatusFailed = "failed" - EvalStatusCancelled = "canceled" -) - -const ( - EvalTriggerJobRegister = "job-register" - EvalTriggerJobDeregister = "job-deregister" - EvalTriggerPeriodicJob = "periodic-job" - EvalTriggerNodeUpdate = "node-update" - EvalTriggerScheduled = "scheduled" - EvalTriggerRollingUpdate = "rolling-update" - EvalTriggerDeploymentWatcher = "deployment-watcher" - EvalTriggerFailedFollowUp = "failed-follow-up" - EvalTriggerMaxPlans = "max-plan-attempts" -) - -const ( - // CoreJobEvalGC is used for the garbage collection of evaluations - // and allocations. We periodically scan evaluations in a terminal state, - // in which all the corresponding allocations are also terminal. We - // delete these out of the system to bound the state. - CoreJobEvalGC = "eval-gc" - - // CoreJobNodeGC is used for the garbage collection of failed nodes. - // We periodically scan nodes in a terminal state, and if they have no - // corresponding allocations we delete these out of the system. - CoreJobNodeGC = "node-gc" - - // CoreJobJobGC is used for the garbage collection of eligible jobs. We - // periodically scan garbage collectible jobs and check if both their - // evaluations and allocations are terminal. If so, we delete these out of - // the system. - CoreJobJobGC = "job-gc" - - // CoreJobDeploymentGC is used for the garbage collection of eligible - // deployments. We periodically scan garbage collectible deployments and - // check if they are terminal. If so, we delete these out of the system. - CoreJobDeploymentGC = "deployment-gc" - - // CoreJobForceGC is used to force garbage collection of all GCable objects. - CoreJobForceGC = "force-gc" -) - -// Evaluation is used anytime we need to apply business logic as a result -// of a change to our desired state (job specification) or the emergent state -// (registered nodes). When the inputs change, we need to "evaluate" them, -// potentially taking action (allocation of work) or doing nothing if the state -// of the world does not require it. -type Evaluation struct { - // ID is a randonly generated UUID used for this evaluation. This - // is assigned upon the creation of the evaluation. - ID string - - // Namespace is the namespace the evaluation is created in - Namespace string - - // Priority is used to control scheduling importance and if this job - // can preempt other jobs. - Priority int - - // Type is used to control which schedulers are available to handle - // this evaluation. - Type string - - // TriggeredBy is used to give some insight into why this Eval - // was created. (Job change, node failure, alloc failure, etc). - TriggeredBy string - - // JobID is the job this evaluation is scoped to. Evaluations cannot - // be run in parallel for a given JobID, so we serialize on this. - JobID string - - // JobModifyIndex is the modify index of the job at the time - // the evaluation was created - JobModifyIndex uint64 - - // NodeID is the node that was affected triggering the evaluation. - NodeID string - - // NodeModifyIndex is the modify index of the node at the time - // the evaluation was created - NodeModifyIndex uint64 - - // DeploymentID is the ID of the deployment that triggered the evaluation. - DeploymentID string - - // Status of the evaluation - Status string - - // StatusDescription is meant to provide more human useful information - StatusDescription string - - // Wait is a minimum wait time for running the eval. This is used to - // support a rolling upgrade. - Wait time.Duration - - // NextEval is the evaluation ID for the eval created to do a followup. - // This is used to support rolling upgrades, where we need a chain of evaluations. - NextEval string - - // PreviousEval is the evaluation ID for the eval creating this one to do a followup. - // This is used to support rolling upgrades, where we need a chain of evaluations. - PreviousEval string - - // BlockedEval is the evaluation ID for a created blocked eval. A - // blocked eval will be created if all allocations could not be placed due - // to constraints or lacking resources. - BlockedEval string - - // FailedTGAllocs are task groups which have allocations that could not be - // made, but the metrics are persisted so that the user can use the feedback - // to determine the cause. - FailedTGAllocs map[string]*AllocMetric - - // ClassEligibility tracks computed node classes that have been explicitly - // marked as eligible or ineligible. - ClassEligibility map[string]bool - - // EscapedComputedClass marks whether the job has constraints that are not - // captured by computed node classes. - EscapedComputedClass bool - - // AnnotatePlan triggers the scheduler to provide additional annotations - // during the evaluation. This should not be set during normal operations. - AnnotatePlan bool - - // QueuedAllocations is the number of unplaced allocations at the time the - // evaluation was processed. The map is keyed by Task Group names. - QueuedAllocations map[string]int - - // SnapshotIndex is the Raft index of the snapshot used to process the - // evaluation. As such it will only be set once it has gone through the - // scheduler. - SnapshotIndex uint64 - - // Raft Indexes - CreateIndex uint64 - ModifyIndex uint64 -} - -// TerminalStatus returns if the current status is terminal and -// will no longer transition. -func (e *Evaluation) TerminalStatus() bool { - switch e.Status { - case EvalStatusComplete, EvalStatusFailed, EvalStatusCancelled: - return true - default: - return false - } -} - -func (e *Evaluation) GoString() string { - return fmt.Sprintf("", e.ID, e.JobID, e.Namespace) -} - -func (e *Evaluation) Copy() *Evaluation { - if e == nil { - return nil - } - ne := new(Evaluation) - *ne = *e - - // Copy ClassEligibility - if e.ClassEligibility != nil { - classes := make(map[string]bool, len(e.ClassEligibility)) - for class, elig := range e.ClassEligibility { - classes[class] = elig - } - ne.ClassEligibility = classes - } - - // Copy FailedTGAllocs - if e.FailedTGAllocs != nil { - failedTGs := make(map[string]*AllocMetric, len(e.FailedTGAllocs)) - for tg, metric := range e.FailedTGAllocs { - failedTGs[tg] = metric.Copy() - } - ne.FailedTGAllocs = failedTGs - } - - // Copy queued allocations - if e.QueuedAllocations != nil { - queuedAllocations := make(map[string]int, len(e.QueuedAllocations)) - for tg, num := range e.QueuedAllocations { - queuedAllocations[tg] = num - } - ne.QueuedAllocations = queuedAllocations - } - - return ne -} - -// ShouldEnqueue checks if a given evaluation should be enqueued into the -// eval_broker -func (e *Evaluation) ShouldEnqueue() bool { - switch e.Status { - case EvalStatusPending: - return true - case EvalStatusComplete, EvalStatusFailed, EvalStatusBlocked, EvalStatusCancelled: - return false - default: - panic(fmt.Sprintf("unhandled evaluation (%s) status %s", e.ID, e.Status)) - } -} - -// ShouldBlock checks if a given evaluation should be entered into the blocked -// eval tracker. -func (e *Evaluation) ShouldBlock() bool { - switch e.Status { - case EvalStatusBlocked: - return true - case EvalStatusComplete, EvalStatusFailed, EvalStatusPending, EvalStatusCancelled: - return false - default: - panic(fmt.Sprintf("unhandled evaluation (%s) status %s", e.ID, e.Status)) - } -} - -// MakePlan is used to make a plan from the given evaluation -// for a given Job -func (e *Evaluation) MakePlan(j *Job) *Plan { - p := &Plan{ - EvalID: e.ID, - Priority: e.Priority, - Job: j, - NodeUpdate: make(map[string][]*Allocation), - NodeAllocation: make(map[string][]*Allocation), - } - if j != nil { - p.AllAtOnce = j.AllAtOnce - } - return p -} - -// NextRollingEval creates an evaluation to followup this eval for rolling updates -func (e *Evaluation) NextRollingEval(wait time.Duration) *Evaluation { - return &Evaluation{ - ID: GenerateUUID(), - Namespace: e.Namespace, - Priority: e.Priority, - Type: e.Type, - TriggeredBy: EvalTriggerRollingUpdate, - JobID: e.JobID, - JobModifyIndex: e.JobModifyIndex, - Status: EvalStatusPending, - Wait: wait, - PreviousEval: e.ID, - } -} - -// CreateBlockedEval creates a blocked evaluation to followup this eval to place any -// failed allocations. It takes the classes marked explicitly eligible or -// ineligible and whether the job has escaped computed node classes. -func (e *Evaluation) CreateBlockedEval(classEligibility map[string]bool, escaped bool) *Evaluation { - return &Evaluation{ - ID: GenerateUUID(), - Namespace: e.Namespace, - Priority: e.Priority, - Type: e.Type, - TriggeredBy: e.TriggeredBy, - JobID: e.JobID, - JobModifyIndex: e.JobModifyIndex, - Status: EvalStatusBlocked, - PreviousEval: e.ID, - ClassEligibility: classEligibility, - EscapedComputedClass: escaped, - } -} - -// CreateFailedFollowUpEval creates a follow up evaluation when the current one -// has been marked as failed because it has hit the delivery limit and will not -// be retried by the eval_broker. -func (e *Evaluation) CreateFailedFollowUpEval(wait time.Duration) *Evaluation { - return &Evaluation{ - ID: GenerateUUID(), - Namespace: e.Namespace, - Priority: e.Priority, - Type: e.Type, - TriggeredBy: EvalTriggerFailedFollowUp, - JobID: e.JobID, - JobModifyIndex: e.JobModifyIndex, - Status: EvalStatusPending, - Wait: wait, - PreviousEval: e.ID, - } -} - -// Plan is used to submit a commit plan for task allocations. These -// are submitted to the leader which verifies that resources have -// not been overcommitted before admiting the plan. -type Plan struct { - // EvalID is the evaluation ID this plan is associated with - EvalID string - - // EvalToken is used to prevent a split-brain processing of - // an evaluation. There should only be a single scheduler running - // an Eval at a time, but this could be violated after a leadership - // transition. This unique token is used to reject plans that are - // being submitted from a different leader. - EvalToken string - - // Priority is the priority of the upstream job - Priority int - - // AllAtOnce is used to control if incremental scheduling of task groups - // is allowed or if we must do a gang scheduling of the entire job. - // If this is false, a plan may be partially applied. Otherwise, the - // entire plan must be able to make progress. - AllAtOnce bool - - // Job is the parent job of all the allocations in the Plan. - // Since a Plan only involves a single Job, we can reduce the size - // of the plan by only including it once. - Job *Job - - // NodeUpdate contains all the allocations for each node. For each node, - // this is a list of the allocations to update to either stop or evict. - NodeUpdate map[string][]*Allocation - - // NodeAllocation contains all the allocations for each node. - // The evicts must be considered prior to the allocations. - NodeAllocation map[string][]*Allocation - - // Annotations contains annotations by the scheduler to be used by operators - // to understand the decisions made by the scheduler. - Annotations *PlanAnnotations - - // Deployment is the deployment created or updated by the scheduler that - // should be applied by the planner. - Deployment *Deployment - - // DeploymentUpdates is a set of status updates to apply to the given - // deployments. This allows the scheduler to cancel any unneeded deployment - // because the job is stopped or the update block is removed. - DeploymentUpdates []*DeploymentStatusUpdate -} - -// AppendUpdate marks the allocation for eviction. The clientStatus of the -// allocation may be optionally set by passing in a non-empty value. -func (p *Plan) AppendUpdate(alloc *Allocation, desiredStatus, desiredDesc, clientStatus string) { - newAlloc := new(Allocation) - *newAlloc = *alloc - - // If the job is not set in the plan we are deregistering a job so we - // extract the job from the allocation. - if p.Job == nil && newAlloc.Job != nil { - p.Job = newAlloc.Job - } - - // Normalize the job - newAlloc.Job = nil - - // Strip the resources as it can be rebuilt. - newAlloc.Resources = nil - - newAlloc.DesiredStatus = desiredStatus - newAlloc.DesiredDescription = desiredDesc - - if clientStatus != "" { - newAlloc.ClientStatus = clientStatus - } - - node := alloc.NodeID - existing := p.NodeUpdate[node] - p.NodeUpdate[node] = append(existing, newAlloc) -} - -func (p *Plan) PopUpdate(alloc *Allocation) { - existing := p.NodeUpdate[alloc.NodeID] - n := len(existing) - if n > 0 && existing[n-1].ID == alloc.ID { - existing = existing[:n-1] - if len(existing) > 0 { - p.NodeUpdate[alloc.NodeID] = existing - } else { - delete(p.NodeUpdate, alloc.NodeID) - } - } -} - -func (p *Plan) AppendAlloc(alloc *Allocation) { - node := alloc.NodeID - existing := p.NodeAllocation[node] - p.NodeAllocation[node] = append(existing, alloc) -} - -// IsNoOp checks if this plan would do nothing -func (p *Plan) IsNoOp() bool { - return len(p.NodeUpdate) == 0 && - len(p.NodeAllocation) == 0 && - p.Deployment == nil && - len(p.DeploymentUpdates) == 0 -} - -// PlanResult is the result of a plan submitted to the leader. -type PlanResult struct { - // NodeUpdate contains all the updates that were committed. - NodeUpdate map[string][]*Allocation - - // NodeAllocation contains all the allocations that were committed. - NodeAllocation map[string][]*Allocation - - // Deployment is the deployment that was committed. - Deployment *Deployment - - // DeploymentUpdates is the set of deployment updates that were committed. - DeploymentUpdates []*DeploymentStatusUpdate - - // RefreshIndex is the index the worker should refresh state up to. - // This allows all evictions and allocations to be materialized. - // If any allocations were rejected due to stale data (node state, - // over committed) this can be used to force a worker refresh. - RefreshIndex uint64 - - // AllocIndex is the Raft index in which the evictions and - // allocations took place. This is used for the write index. - AllocIndex uint64 -} - -// IsNoOp checks if this plan result would do nothing -func (p *PlanResult) IsNoOp() bool { - return len(p.NodeUpdate) == 0 && len(p.NodeAllocation) == 0 && - len(p.DeploymentUpdates) == 0 && p.Deployment == nil -} - -// FullCommit is used to check if all the allocations in a plan -// were committed as part of the result. Returns if there was -// a match, and the number of expected and actual allocations. -func (p *PlanResult) FullCommit(plan *Plan) (bool, int, int) { - expected := 0 - actual := 0 - for name, allocList := range plan.NodeAllocation { - didAlloc, _ := p.NodeAllocation[name] - expected += len(allocList) - actual += len(didAlloc) - } - return actual == expected, expected, actual -} - -// PlanAnnotations holds annotations made by the scheduler to give further debug -// information to operators. -type PlanAnnotations struct { - // DesiredTGUpdates is the set of desired updates per task group. - DesiredTGUpdates map[string]*DesiredUpdates -} - -// DesiredUpdates is the set of changes the scheduler would like to make given -// sufficient resources and cluster capacity. -type DesiredUpdates struct { - Ignore uint64 - Place uint64 - Migrate uint64 - Stop uint64 - InPlaceUpdate uint64 - DestructiveUpdate uint64 - Canary uint64 -} - -func (d *DesiredUpdates) GoString() string { - return fmt.Sprintf("(place %d) (inplace %d) (destructive %d) (stop %d) (migrate %d) (ignore %d) (canary %d)", - d.Place, d.InPlaceUpdate, d.DestructiveUpdate, d.Stop, d.Migrate, d.Ignore, d.Canary) -} - -// msgpackHandle is a shared handle for encoding/decoding of structs -var MsgpackHandle = func() *codec.MsgpackHandle { - h := &codec.MsgpackHandle{RawToString: true} - - // Sets the default type for decoding a map into a nil interface{}. - // This is necessary in particular because we store the driver configs as a - // nil interface{}. - h.MapType = reflect.TypeOf(map[string]interface{}(nil)) - return h -}() - -var ( - // JsonHandle and JsonHandlePretty are the codec handles to JSON encode - // structs. The pretty handle will add indents for easier human consumption. - JsonHandle = &codec.JsonHandle{ - HTMLCharsAsIs: true, - } - JsonHandlePretty = &codec.JsonHandle{ - HTMLCharsAsIs: true, - Indent: 4, - } -) - -var HashiMsgpackHandle = func() *hcodec.MsgpackHandle { - h := &hcodec.MsgpackHandle{RawToString: true} - - // Sets the default type for decoding a map into a nil interface{}. - // This is necessary in particular because we store the driver configs as a - // nil interface{}. - h.MapType = reflect.TypeOf(map[string]interface{}(nil)) - return h -}() - -// Decode is used to decode a MsgPack encoded object -func Decode(buf []byte, out interface{}) error { - return codec.NewDecoder(bytes.NewReader(buf), MsgpackHandle).Decode(out) -} - -// Encode is used to encode a MsgPack object with type prefix -func Encode(t MessageType, msg interface{}) ([]byte, error) { - var buf bytes.Buffer - buf.WriteByte(uint8(t)) - err := codec.NewEncoder(&buf, MsgpackHandle).Encode(msg) - return buf.Bytes(), err -} - -// KeyringResponse is a unified key response and can be used for install, -// remove, use, as well as listing key queries. -type KeyringResponse struct { - Messages map[string]string - Keys map[string]int - NumNodes int -} - -// KeyringRequest is request objects for serf key operations. -type KeyringRequest struct { - Key string -} - -// RecoverableError wraps an error and marks whether it is recoverable and could -// be retried or it is fatal. -type RecoverableError struct { - Err string - Recoverable bool -} - -// NewRecoverableError is used to wrap an error and mark it as recoverable or -// not. -func NewRecoverableError(e error, recoverable bool) error { - if e == nil { - return nil - } - - return &RecoverableError{ - Err: e.Error(), - Recoverable: recoverable, - } -} - -// WrapRecoverable wraps an existing error in a new RecoverableError with a new -// message. If the error was recoverable before the returned error is as well; -// otherwise it is unrecoverable. -func WrapRecoverable(msg string, err error) error { - return &RecoverableError{Err: msg, Recoverable: IsRecoverable(err)} -} - -func (r *RecoverableError) Error() string { - return r.Err -} - -func (r *RecoverableError) IsRecoverable() bool { - return r.Recoverable -} - -// Recoverable is an interface for errors to implement to indicate whether or -// not they are fatal or recoverable. -type Recoverable interface { - error - IsRecoverable() bool -} - -// IsRecoverable returns true if error is a RecoverableError with -// Recoverable=true. Otherwise false is returned. -func IsRecoverable(e error) bool { - if re, ok := e.(Recoverable); ok { - return re.IsRecoverable() - } - return false -} - -// ACLPolicy is used to represent an ACL policy -type ACLPolicy struct { - Name string // Unique name - Description string // Human readable - Rules string // HCL or JSON format - Hash []byte - CreateIndex uint64 - ModifyIndex uint64 -} - -// SetHash is used to compute and set the hash of the ACL policy -func (c *ACLPolicy) SetHash() []byte { - // Initialize a 256bit Blake2 hash (32 bytes) - hash, err := blake2b.New256(nil) - if err != nil { - panic(err) - } - - // Write all the user set fields - hash.Write([]byte(c.Name)) - hash.Write([]byte(c.Description)) - hash.Write([]byte(c.Rules)) - - // Finalize the hash - hashVal := hash.Sum(nil) - - // Set and return the hash - c.Hash = hashVal - return hashVal -} - -func (a *ACLPolicy) Stub() *ACLPolicyListStub { - return &ACLPolicyListStub{ - Name: a.Name, - Description: a.Description, - Hash: a.Hash, - CreateIndex: a.CreateIndex, - ModifyIndex: a.ModifyIndex, - } -} - -func (a *ACLPolicy) Validate() error { - var mErr multierror.Error - if !validPolicyName.MatchString(a.Name) { - err := fmt.Errorf("invalid name '%s'", a.Name) - mErr.Errors = append(mErr.Errors, err) - } - if _, err := acl.Parse(a.Rules); err != nil { - err = fmt.Errorf("failed to parse rules: %v", err) - mErr.Errors = append(mErr.Errors, err) - } - if len(a.Description) > maxPolicyDescriptionLength { - err := fmt.Errorf("description longer than %d", maxPolicyDescriptionLength) - mErr.Errors = append(mErr.Errors, err) - } - return mErr.ErrorOrNil() -} - -// ACLPolicyListStub is used to for listing ACL policies -type ACLPolicyListStub struct { - Name string - Description string - Hash []byte - CreateIndex uint64 - ModifyIndex uint64 -} - -// ACLPolicyListRequest is used to request a list of policies -type ACLPolicyListRequest struct { - QueryOptions -} - -// ACLPolicySpecificRequest is used to query a specific policy -type ACLPolicySpecificRequest struct { - Name string - QueryOptions -} - -// ACLPolicySetRequest is used to query a set of policies -type ACLPolicySetRequest struct { - Names []string - QueryOptions -} - -// ACLPolicyListResponse is used for a list request -type ACLPolicyListResponse struct { - Policies []*ACLPolicyListStub - QueryMeta -} - -// SingleACLPolicyResponse is used to return a single policy -type SingleACLPolicyResponse struct { - Policy *ACLPolicy - QueryMeta -} - -// ACLPolicySetResponse is used to return a set of policies -type ACLPolicySetResponse struct { - Policies map[string]*ACLPolicy - QueryMeta -} - -// ACLPolicyDeleteRequest is used to delete a set of policies -type ACLPolicyDeleteRequest struct { - Names []string - WriteRequest -} - -// ACLPolicyUpsertRequest is used to upsert a set of policies -type ACLPolicyUpsertRequest struct { - Policies []*ACLPolicy - WriteRequest -} - -// ACLToken represents a client token which is used to Authenticate -type ACLToken struct { - AccessorID string // Public Accessor ID (UUID) - SecretID string // Secret ID, private (UUID) - Name string // Human friendly name - Type string // Client or Management - Policies []string // Policies this token ties to - Global bool // Global or Region local - Hash []byte - CreateTime time.Time // Time of creation - CreateIndex uint64 - ModifyIndex uint64 -} - -var ( - // AnonymousACLToken is used no SecretID is provided, and the - // request is made anonymously. - AnonymousACLToken = &ACLToken{ - AccessorID: "anonymous", - Name: "Anonymous Token", - Type: ACLClientToken, - Policies: []string{"anonymous"}, - Global: false, - } -) - -type ACLTokenListStub struct { - AccessorID string - Name string - Type string - Policies []string - Global bool - Hash []byte - CreateTime time.Time - CreateIndex uint64 - ModifyIndex uint64 -} - -// SetHash is used to compute and set the hash of the ACL token -func (a *ACLToken) SetHash() []byte { - // Initialize a 256bit Blake2 hash (32 bytes) - hash, err := blake2b.New256(nil) - if err != nil { - panic(err) - } - - // Write all the user set fields - hash.Write([]byte(a.Name)) - hash.Write([]byte(a.Type)) - for _, policyName := range a.Policies { - hash.Write([]byte(policyName)) - } - if a.Global { - hash.Write([]byte("global")) - } else { - hash.Write([]byte("local")) - } - - // Finalize the hash - hashVal := hash.Sum(nil) - - // Set and return the hash - a.Hash = hashVal - return hashVal -} - -func (a *ACLToken) Stub() *ACLTokenListStub { - return &ACLTokenListStub{ - AccessorID: a.AccessorID, - Name: a.Name, - Type: a.Type, - Policies: a.Policies, - Global: a.Global, - Hash: a.Hash, - CreateTime: a.CreateTime, - CreateIndex: a.CreateIndex, - ModifyIndex: a.ModifyIndex, - } -} - -// Validate is used to sanity check a token -func (a *ACLToken) Validate() error { - var mErr multierror.Error - if len(a.Name) > maxTokenNameLength { - mErr.Errors = append(mErr.Errors, fmt.Errorf("token name too long")) - } - switch a.Type { - case ACLClientToken: - if len(a.Policies) == 0 { - mErr.Errors = append(mErr.Errors, fmt.Errorf("client token missing policies")) - } - case ACLManagementToken: - if len(a.Policies) != 0 { - mErr.Errors = append(mErr.Errors, fmt.Errorf("management token cannot be associated with policies")) - } - default: - mErr.Errors = append(mErr.Errors, fmt.Errorf("token type must be client or management")) - } - return mErr.ErrorOrNil() -} - -// PolicySubset checks if a given set of policies is a subset of the token -func (a *ACLToken) PolicySubset(policies []string) bool { - // Hot-path the management tokens, superset of all policies. - if a.Type == ACLManagementToken { - return true - } - associatedPolicies := make(map[string]struct{}, len(a.Policies)) - for _, policy := range a.Policies { - associatedPolicies[policy] = struct{}{} - } - for _, policy := range policies { - if _, ok := associatedPolicies[policy]; !ok { - return false - } - } - return true -} - -// ACLTokenListRequest is used to request a list of tokens -type ACLTokenListRequest struct { - GlobalOnly bool - QueryOptions -} - -// ACLTokenSpecificRequest is used to query a specific token -type ACLTokenSpecificRequest struct { - AccessorID string - QueryOptions -} - -// ACLTokenSetRequest is used to query a set of tokens -type ACLTokenSetRequest struct { - AccessorIDS []string - QueryOptions -} - -// ACLTokenListResponse is used for a list request -type ACLTokenListResponse struct { - Tokens []*ACLTokenListStub - QueryMeta -} - -// SingleACLTokenResponse is used to return a single token -type SingleACLTokenResponse struct { - Token *ACLToken - QueryMeta -} - -// ACLTokenSetResponse is used to return a set of token -type ACLTokenSetResponse struct { - Tokens map[string]*ACLToken // Keyed by Accessor ID - QueryMeta -} - -// ResolveACLTokenRequest is used to resolve a specific token -type ResolveACLTokenRequest struct { - SecretID string - QueryOptions -} - -// ResolveACLTokenResponse is used to resolve a single token -type ResolveACLTokenResponse struct { - Token *ACLToken - QueryMeta -} - -// ACLTokenDeleteRequest is used to delete a set of tokens -type ACLTokenDeleteRequest struct { - AccessorIDs []string - WriteRequest -} - -// ACLTokenBootstrapRequest is used to bootstrap ACLs -type ACLTokenBootstrapRequest struct { - Token *ACLToken // Not client specifiable - ResetIndex uint64 // Reset index is used to clear the bootstrap token - WriteRequest -} - -// ACLTokenUpsertRequest is used to upsert a set of tokens -type ACLTokenUpsertRequest struct { - Tokens []*ACLToken - WriteRequest -} - -// ACLTokenUpsertResponse is used to return from an ACLTokenUpsertRequest -type ACLTokenUpsertResponse struct { - Tokens []*ACLToken - WriteMeta -} diff --git a/vendor/github.com/hashicorp/nomad/nomad/structs/structs_codegen.go b/vendor/github.com/hashicorp/nomad/nomad/structs/structs_codegen.go deleted file mode 100644 index bdc324bb5..000000000 --- a/vendor/github.com/hashicorp/nomad/nomad/structs/structs_codegen.go +++ /dev/null @@ -1,3 +0,0 @@ -package structs - -//go:generate codecgen -d 100 -o structs.generated.go structs.go diff --git a/vendor/vendor.json b/vendor/vendor.json index 2551f9e61..d8a1887a1 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -1147,28 +1147,28 @@ "revisionTime": "2017-09-14T15:46:24Z" }, { - "checksumSHA1": "4tY6k1MqB50R66TJJH/rsG69Yd4=", + "checksumSHA1": "euodRTxiXS6udU7N9xRCQL6YDCg=", "path": "github.com/hashicorp/nomad/api", - "revision": "a1e08240a1cff7d1bd10c89d722110a053f8dc6e", - "revisionTime": "2017-09-20T19:48:06Z" + "revision": "ebc79fba332501f71f272ade9382d67a47c5b4e1", + "revisionTime": "2017-09-29T21:44:31Z" }, { "checksumSHA1": "Is7OvHxCEEkKpdQnW8olCxL0444=", "path": "github.com/hashicorp/nomad/api/contexts", - "revision": "a1e08240a1cff7d1bd10c89d722110a053f8dc6e", - "revisionTime": "2017-09-20T19:48:06Z" + "revision": "ebc79fba332501f71f272ade9382d67a47c5b4e1", + "revisionTime": "2017-09-29T21:44:31Z" }, { - "checksumSHA1": "GpikwcF9oi5Rrs/58xDSfiMy/I8=", + "checksumSHA1": "DE+4s/X+r987Ia93s9633mGekzg=", "path": "github.com/hashicorp/nomad/helper", - "revision": "a1e08240a1cff7d1bd10c89d722110a053f8dc6e", - "revisionTime": "2017-09-20T19:48:06Z" + "revision": "ebc79fba332501f71f272ade9382d67a47c5b4e1", + "revisionTime": "2017-09-29T21:44:31Z" }, { - "checksumSHA1": "hrzGvgMsH9p6MKOu3zYS8fooL3g=", - "path": "github.com/hashicorp/nomad/nomad/structs", - "revision": "a1e08240a1cff7d1bd10c89d722110a053f8dc6e", - "revisionTime": "2017-09-20T19:48:06Z" + "checksumSHA1": "mSCo/iZUEOSpeX5NsGZZzFMJqto=", + "path": "github.com/hashicorp/nomad/helper/uuid", + "revision": "ebc79fba332501f71f272ade9382d67a47c5b4e1", + "revisionTime": "2017-09-29T21:44:31Z" }, { "checksumSHA1": "/oss17GO4hXGM7QnUdI3VzcAHzA=", From 29ccc35dd5b3c7d31f7a9b67bf34803404533fb1 Mon Sep 17 00:00:00 2001 From: Chris Hoffman Date: Mon, 2 Oct 2017 13:47:03 -0400 Subject: [PATCH 16/52] fixing dependencies --- .../hashicorp/nomad/helper/uuid/uuid.go | 21 +++++++++++++++++++ 1 file changed, 21 insertions(+) create mode 100644 vendor/github.com/hashicorp/nomad/helper/uuid/uuid.go diff --git a/vendor/github.com/hashicorp/nomad/helper/uuid/uuid.go b/vendor/github.com/hashicorp/nomad/helper/uuid/uuid.go new file mode 100644 index 000000000..145c81780 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/helper/uuid/uuid.go @@ -0,0 +1,21 @@ +package uuid + +import ( + crand "crypto/rand" + "fmt" +) + +// Generate is used to generate a random UUID +func Generate() string { + buf := make([]byte, 16) + if _, err := crand.Read(buf); err != nil { + panic(fmt.Errorf("failed to read random bytes: %v", err)) + } + + return fmt.Sprintf("%08x-%04x-%04x-%04x-%12x", + buf[0:4], + buf[4:6], + buf[6:8], + buf[8:10], + buf[10:16]) +} From 3380fd647d04a566e9404bd43a86a33951082071 Mon Sep 17 00:00:00 2001 From: Nicolas Corrarello Date: Fri, 6 Oct 2017 16:03:06 +0100 Subject: [PATCH 17/52] Adding Nomad docs to the nav. Minor cosmetics fixes --- website/source/docs/secrets/nomad/index.html.md | 1 + website/source/layouts/api.erb | 3 +++ website/source/layouts/docs.erb | 4 ++++ 3 files changed, 8 insertions(+) diff --git a/website/source/docs/secrets/nomad/index.html.md b/website/source/docs/secrets/nomad/index.html.md index d5b87d107..3f11148ca 100644 --- a/website/source/docs/secrets/nomad/index.html.md +++ b/website/source/docs/secrets/nomad/index.html.md @@ -32,6 +32,7 @@ Successfully mounted 'nomad' at 'nomad'! For a quick start, you can use the SecretID token provided by the [Nomad ACL bootstrap process](https://www.nomadproject.io/guides/acl.html#generate-the-initial-token), although this is discouraged for production deployments. + ``` $ nomad acl bootstrap Accessor ID = 95a0ee55-eaa6-2c0a-a900-ed94c156754e diff --git a/website/source/layouts/api.erb b/website/source/layouts/api.erb index ffea6b0e4..26c394e96 100644 --- a/website/source/layouts/api.erb +++ b/website/source/layouts/api.erb @@ -19,6 +19,9 @@ > Consul + > + Nomad + > Cubbyhole diff --git a/website/source/layouts/docs.erb b/website/source/layouts/docs.erb index 9481cf319..2707dd871 100644 --- a/website/source/layouts/docs.erb +++ b/website/source/layouts/docs.erb @@ -179,6 +179,10 @@ Consul + > + Nomad + + > Cubbyhole From 0fc65cabc7b153226f1fd5cbdb11c22dc2d36ad3 Mon Sep 17 00:00:00 2001 From: Nicolas Corrarello Date: Tue, 31 Oct 2017 19:11:24 +0000 Subject: [PATCH 18/52] Minor/Cosmetic fixes --- builtin/logical/nomad/backend_test.go | 11 +++++------ builtin/logical/nomad/path_config.go | 5 +---- website/source/docs/secrets/nomad/index.html.md | 5 ++--- website/source/layouts/api.erb | 6 +++--- website/source/layouts/docs.erb | 8 ++++---- 5 files changed, 15 insertions(+), 20 deletions(-) diff --git a/builtin/logical/nomad/backend_test.go b/builtin/logical/nomad/backend_test.go index b64dc1bea..8f4758580 100644 --- a/builtin/logical/nomad/backend_test.go +++ b/builtin/logical/nomad/backend_test.go @@ -2,7 +2,6 @@ package nomad import ( "fmt" - "log" "os" "reflect" "testing" @@ -64,7 +63,7 @@ func prepareTestContainer(t *testing.T) (cleanup func(), retAddress string, noma t.Fatalf("err: %v", err) } nomadToken = aclbootstrap.SecretID - log.Printf("[WARN] Generated Master token: %s", nomadToken) + t.Log("[WARN] Generated Master token: %s", nomadToken) policy := &nomadapi.ACLPolicy{ Name: "test", Description: "test", @@ -143,7 +142,7 @@ func TestBackend_config_access(t *testing.T) { expected := map[string]interface{}{ "address": connData["address"].(string), - "scheme": "http", + "scheme": "https", } if !reflect.DeepEqual(expected, resp.Data) { t.Fatalf("bad: expected:%#v\nactual:%#v\n", expected, resp.Data) @@ -213,7 +212,7 @@ func TestBackend_renew_revoke(t *testing.T) { if err := mapstructure.Decode(resp.Data, &d); err != nil { t.Fatal(err) } - log.Printf("[WARN] Generated token: %s with accesor %s", d.Token, d.Accessor) + t.Log("[WARN] Generated token: %s with accesor %s", d.Token, d.Accessor) // Build a client and verify that the credentials work nomadapiConfig := nomadapi.DefaultConfig() @@ -224,7 +223,7 @@ func TestBackend_renew_revoke(t *testing.T) { t.Fatal(err) } - log.Printf("[WARN] Verifying that the generated token works...") + t.Log("[WARN] Verifying that the generated token works...") _, err = client.Agent().Members, nil if err != nil { t.Fatal(err) @@ -256,7 +255,7 @@ func TestBackend_renew_revoke(t *testing.T) { Namespace: "default", } - log.Printf("[WARN] Verifying that the generated token does not exist...") + t.Log("[WARN] Verifying that the generated token does not exist...") _, _, err = mgmtclient.ACLTokens().Info(d.Accessor, q) if err == nil { t.Fatal("err: expected error") diff --git a/builtin/logical/nomad/path_config.go b/builtin/logical/nomad/path_config.go index d9e6dc128..bcd3e1547 100644 --- a/builtin/logical/nomad/path_config.go +++ b/builtin/logical/nomad/path_config.go @@ -20,10 +20,7 @@ func pathConfigAccess() *framework.Path { Type: framework.TypeString, Description: "URI scheme for the Nomad address", - // https would be a better default but Consul on its own - // defaults to HTTP access, and when HTTPS is enabled it - // disables HTTP, so there isn't really any harm done here. - Default: "http", + Default: "https", }, "token": &framework.FieldSchema{ diff --git a/website/source/docs/secrets/nomad/index.html.md b/website/source/docs/secrets/nomad/index.html.md index 3f11148ca..4201f98c6 100644 --- a/website/source/docs/secrets/nomad/index.html.md +++ b/website/source/docs/secrets/nomad/index.html.md @@ -17,8 +17,7 @@ API tokens dynamically based on pre-existing Nomad ACL policies. This page will show a quick start for this backend. For detailed documentation on every path, use `vault path-help` after mounting the backend. -~> **Version information** ACLs are only available on Nomad 0.7.0 and above, -which is currently in beta. +~> **Version information** ACLs are only available on Nomad 0.7.0 and above. ## Quick Start @@ -46,7 +45,7 @@ Create Index = 7 Modify Index = 7 ``` The suggested pattern is to generate a token specifically for Vault, following the -[Nomad ACL guide](https://www.consul.io/docs/agent/http/acl.html) +[Nomad ACL guide](https://www.nomadproject.io/guides/acl.html) Next, we must configure Vault to know how to contact Nomad. This is done by writing the access information: diff --git a/website/source/layouts/api.erb b/website/source/layouts/api.erb index 26c394e96..2150cd1ef 100644 --- a/website/source/layouts/api.erb +++ b/website/source/layouts/api.erb @@ -19,9 +19,6 @@ > Consul - > - Nomad - > Cubbyhole @@ -59,6 +56,9 @@ > Identity + > + Nomad + > PKI diff --git a/website/source/layouts/docs.erb b/website/source/layouts/docs.erb index 2707dd871..03e9eb3bd 100644 --- a/website/source/layouts/docs.erb +++ b/website/source/layouts/docs.erb @@ -179,10 +179,6 @@ Consul - > - Nomad - - > Cubbyhole @@ -225,6 +221,10 @@ Identity + > + Nomad + + > PKI (Certificates) From d540985926d10bc1f85d1985031b8dcd3fcef27b Mon Sep 17 00:00:00 2001 From: Nicolas Corrarello Date: Tue, 31 Oct 2017 20:56:56 +0000 Subject: [PATCH 19/52] Unifying Storage and API path in role --- builtin/logical/nomad/backend_test.go | 2 +- builtin/logical/nomad/path_roles.go | 12 ++++++------ builtin/logical/nomad/path_token.go | 2 +- website/source/api/secret/nomad/index.html.md | 18 +++++++++--------- .../source/docs/secrets/nomad/index.html.md | 4 ++-- 5 files changed, 19 insertions(+), 19 deletions(-) diff --git a/builtin/logical/nomad/backend_test.go b/builtin/logical/nomad/backend_test.go index 8f4758580..d88b5cfca 100644 --- a/builtin/logical/nomad/backend_test.go +++ b/builtin/logical/nomad/backend_test.go @@ -178,7 +178,7 @@ func TestBackend_renew_revoke(t *testing.T) { t.Fatal(err) } - req.Path = "roles/test" + req.Path = "role/test" req.Data = map[string]interface{}{ "policy": []string{"policy"}, "lease": "6h", diff --git a/builtin/logical/nomad/path_roles.go b/builtin/logical/nomad/path_roles.go index 53fb7119d..3e1e6841e 100644 --- a/builtin/logical/nomad/path_roles.go +++ b/builtin/logical/nomad/path_roles.go @@ -10,7 +10,7 @@ import ( func pathListRoles(b *backend) *framework.Path { return &framework.Path{ - Pattern: "roles/?$", + Pattern: "role/?$", Callbacks: map[logical.Operation]framework.OperationFunc{ logical.ListOperation: b.pathRoleList, @@ -20,7 +20,7 @@ func pathListRoles(b *backend) *framework.Path { func pathRoles() *framework.Path { return &framework.Path{ - Pattern: "roles/" + framework.GenericNameRegex("name"), + Pattern: "role/" + framework.GenericNameRegex("name"), Fields: map[string]*framework.FieldSchema{ "name": &framework.FieldSchema{ Type: framework.TypeString, @@ -62,7 +62,7 @@ Defaults to 'client'.`, func (b *backend) pathRoleList( req *logical.Request, d *framework.FieldData) (*logical.Response, error) { - entries, err := req.Storage.List("policy/") + entries, err := req.Storage.List("role/") if err != nil { return nil, err } @@ -74,7 +74,7 @@ func pathRolesRead( req *logical.Request, d *framework.FieldData) (*logical.Response, error) { name := d.Get("name").(string) - entry, err := req.Storage.Get("policy/" + name) + entry, err := req.Storage.Get("role/" + name) if err != nil { return nil, err } @@ -138,7 +138,7 @@ func pathRolesWrite( } } - entry, err := logical.StorageEntryJSON("policy/"+name, roleConfig{ + entry, err := logical.StorageEntryJSON("role/"+name, roleConfig{ Policy: policy, Lease: lease, TokenType: tokenType, @@ -158,7 +158,7 @@ func pathRolesWrite( func pathRolesDelete( req *logical.Request, d *framework.FieldData) (*logical.Response, error) { name := d.Get("name").(string) - if err := req.Storage.Delete("policy/" + name); err != nil { + if err := req.Storage.Delete("role/" + name); err != nil { return nil, err } return nil, nil diff --git a/builtin/logical/nomad/path_token.go b/builtin/logical/nomad/path_token.go index 1a1c66fde..b8f5efa35 100644 --- a/builtin/logical/nomad/path_token.go +++ b/builtin/logical/nomad/path_token.go @@ -29,7 +29,7 @@ func (b *backend) pathTokenRead( req *logical.Request, d *framework.FieldData) (*logical.Response, error) { name := d.Get("name").(string) - entry, err := req.Storage.Get("policy/" + name) + entry, err := req.Storage.Get("role/" + name) if err != nil { return nil, fmt.Errorf("error retrieving role: %s", err) } diff --git a/website/source/api/secret/nomad/index.html.md b/website/source/api/secret/nomad/index.html.md index 9cdf59b97..d3379b285 100644 --- a/website/source/api/secret/nomad/index.html.md +++ b/website/source/api/secret/nomad/index.html.md @@ -59,7 +59,7 @@ updated attributes. | Method | Path | Produces | | :------- | :--------------------------- | :--------------------- | -| `POST` | `/nomad/roles/:name` | `204 (empty body)` | +| `POST` | `/nomad/role/:name` | `204 (empty body)` | ### Parameters @@ -95,7 +95,7 @@ $ curl \ --request POST \ --header "X-Vault-Token: ..." \ --data @payload.json \ - https://vault.rocks/v1/nomad/roles/monitoring + https://vault.rocks/v1/nomad/role/monitoring ``` ## Read Role @@ -105,7 +105,7 @@ If no role exists with that name, a 404 is returned. | Method | Path | Produces | | :------- | :--------------------------- | :--------------------- | -| `GET` | `/nomad/roles/:name` | `200 application/json` | +| `GET` | `/nomad/role/:name` | `200 application/json` | ### Parameters @@ -117,7 +117,7 @@ If no role exists with that name, a 404 is returned. ``` $ curl \ --header "X-Vault-Token: ..." \ - https://vault.rocks/v1/nomad/roles/monitoring + https://vault.rocks/v1/nomad/role/monitoring ``` ### Sample Response @@ -147,8 +147,8 @@ This endpoint lists all existing roles in the backend. | Method | Path | Produces | | :------- | :--------------------------- | :--------------------- | -| `LIST` | `/nomad/roles` | `200 application/json` | -| `GET` | `/nomad/roles?list=true` | `200 application/json` | +| `LIST` | `/nomad/role` | `200 application/json` | +| `GET` | `/nomad/role?list=true` | `200 application/json` | ### Sample Request @@ -156,7 +156,7 @@ This endpoint lists all existing roles in the backend. $ curl \ --header "X-Vault-Token: ..." \ --request LIST \ - https://vault.rocks/v1/nomad/roles + https://vault.rocks/v1/nomad/role ``` ### Sample Response @@ -185,7 +185,7 @@ not exist, this endpoint will still return a successful response. | Method | Path | Produces | | :------- | :--------------------------- | :--------------------- | -| `DELETE` | `/nomad/roles/:name` | `204 (empty body)` | +| `DELETE` | `/nomad/role/:name` | `204 (empty body)` | ### Parameters @@ -198,7 +198,7 @@ not exist, this endpoint will still return a successful response. $ curl \ --request DELETE \ --header "X-Vault-Token: ..." \ - https://vault.rocks/v1/nomad/roles/example-role + https://vault.rocks/v1/nomad/role/example-role ``` ## Generate Credential diff --git a/website/source/docs/secrets/nomad/index.html.md b/website/source/docs/secrets/nomad/index.html.md index 4201f98c6..754426762 100644 --- a/website/source/docs/secrets/nomad/index.html.md +++ b/website/source/docs/secrets/nomad/index.html.md @@ -67,8 +67,8 @@ to a set of policy names used to generate those credentials. For example, lets c an "monitoring" role that maps to a "readonly" policy: ``` -$ vault write nomad/roles/monitoring policy=readonly -Success! Data written to: nomad/roles/monitoring +$ vault write nomad/role/monitoring policy=readonly +Success! Data written to: nomad/role/monitoring ``` The backend expects either a single or a comma separated list of policy names. From afb5d123b9b526c3663c24056fb5c98864d05aaa Mon Sep 17 00:00:00 2001 From: Nicolas Corrarello Date: Tue, 31 Oct 2017 21:12:14 +0000 Subject: [PATCH 20/52] Should return an error if trying create a management token with policies attached --- builtin/logical/nomad/path_roles.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/builtin/logical/nomad/path_roles.go b/builtin/logical/nomad/path_roles.go index 3e1e6841e..e4e745600 100644 --- a/builtin/logical/nomad/path_roles.go +++ b/builtin/logical/nomad/path_roles.go @@ -126,6 +126,11 @@ func pathRolesWrite( return logical.ErrorResponse( "policy cannot be empty when not using management tokens"), nil } + } else { + if len(policy) != 0 { + return logical.ErrorResponse( + "policy should be empty when using management tokens"), nil + } } var lease time.Duration From 3ce4da75ac13d68232abe6b07275a3cd42d0b4a3 Mon Sep 17 00:00:00 2001 From: Nicolas Corrarello Date: Wed, 1 Nov 2017 07:36:14 +0000 Subject: [PATCH 21/52] tokenType can never be nil/empty string as there are default values --- builtin/logical/nomad/path_roles.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/builtin/logical/nomad/path_roles.go b/builtin/logical/nomad/path_roles.go index e4e745600..3beec7175 100644 --- a/builtin/logical/nomad/path_roles.go +++ b/builtin/logical/nomad/path_roles.go @@ -87,10 +87,6 @@ func pathRolesRead( return nil, err } - if result.TokenType == "" { - result.TokenType = "client" - } - // Generate the response resp := &logical.Response{ Data: map[string]interface{}{ From 5f748a121725078b8e31ad54adcc36d44d9f34c5 Mon Sep 17 00:00:00 2001 From: Nicolas Corrarello Date: Wed, 1 Nov 2017 07:41:58 +0000 Subject: [PATCH 22/52] Ignoring userErr as it will be nil anyway --- builtin/logical/nomad/path_config.go | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/builtin/logical/nomad/path_config.go b/builtin/logical/nomad/path_config.go index bcd3e1547..453fbbe63 100644 --- a/builtin/logical/nomad/path_config.go +++ b/builtin/logical/nomad/path_config.go @@ -57,13 +57,10 @@ func readConfigAccess(storage logical.Storage) (*accessConfig, error, error) { func pathConfigAccessRead( req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - conf, userErr, intErr := readConfigAccess(req.Storage) + conf, _, intErr := readConfigAccess(req.Storage) if intErr != nil { return nil, intErr } - if userErr != nil { - return logical.ErrorResponse(userErr.Error()), nil - } if conf == nil { return nil, fmt.Errorf("no user error reported but nomad access configuration not found") } From 55dd69437aa9c255264391c20dcdf749d37335a9 Mon Sep 17 00:00:00 2001 From: Nicolas Corrarello Date: Wed, 1 Nov 2017 07:50:17 +0000 Subject: [PATCH 23/52] Refactored config error to just have a single error exit path --- builtin/logical/nomad/path_config.go | 7 +++---- builtin/logical/nomad/secret_token.go | 9 +-------- 2 files changed, 4 insertions(+), 12 deletions(-) diff --git a/builtin/logical/nomad/path_config.go b/builtin/logical/nomad/path_config.go index 453fbbe63..4b23f4819 100644 --- a/builtin/logical/nomad/path_config.go +++ b/builtin/logical/nomad/path_config.go @@ -42,9 +42,8 @@ func readConfigAccess(storage logical.Storage) (*accessConfig, error, error) { return nil, nil, err } if entry == nil { - return nil, fmt.Errorf( - "Access credentials for the backend itself haven't been configured. Please configure them at the '/config/access' endpoint"), - nil + return nil, nil, fmt.Errorf( + "Access credentials for the backend itself haven't been configured. Please configure them at the '/config/access' endpoint") } conf := &accessConfig{} @@ -62,7 +61,7 @@ func pathConfigAccessRead( return nil, intErr } if conf == nil { - return nil, fmt.Errorf("no user error reported but nomad access configuration not found") + return nil, fmt.Errorf("no user or internal error reported but nomad access configuration not found") } return &logical.Response{ diff --git a/builtin/logical/nomad/secret_token.go b/builtin/logical/nomad/secret_token.go index ad02b16fe..00df0fef7 100644 --- a/builtin/logical/nomad/secret_token.go +++ b/builtin/logical/nomad/secret_token.go @@ -41,14 +41,7 @@ func secretTokenRevoke( return nil, userErr } - tokenRaw, ok := req.Secret.InternalData["accessor_id"] - if !ok { - // We return nil here because this is a pre-0.5.3 problem and there is - // nothing we can do about it. We already can't revoke the lease - // properly if it has been renewed and this is documented pre-0.5.3 - // behavior with a security bulletin about it. - return nil, nil - } + tokenRaw, _ := req.Secret.InternalData["accessor_id"] _, err := c.ACLTokens().Delete(tokenRaw.(string), nil) if err != nil { From eb7a0c0e83a99da42e885e295bc6acc393da07c4 Mon Sep 17 00:00:00 2001 From: Nicolas Corrarello Date: Wed, 1 Nov 2017 08:49:31 +0000 Subject: [PATCH 24/52] Refactoring readAcessConfig to return a single type of error instead of two --- builtin/logical/nomad/client.go | 5 +---- builtin/logical/nomad/path_config.go | 14 +++++++------- 2 files changed, 8 insertions(+), 11 deletions(-) diff --git a/builtin/logical/nomad/client.go b/builtin/logical/nomad/client.go index 2101d31dc..80879d76d 100644 --- a/builtin/logical/nomad/client.go +++ b/builtin/logical/nomad/client.go @@ -8,13 +8,10 @@ import ( ) func client(s logical.Storage) (*api.Client, error, error) { - conf, userErr, intErr := readConfigAccess(s) + conf, intErr := readConfigAccess(s) if intErr != nil { return nil, nil, intErr } - if userErr != nil { - return nil, userErr, nil - } if conf == nil { return nil, nil, fmt.Errorf("no error received but no configuration found") } diff --git a/builtin/logical/nomad/path_config.go b/builtin/logical/nomad/path_config.go index 4b23f4819..4e9eb151f 100644 --- a/builtin/logical/nomad/path_config.go +++ b/builtin/logical/nomad/path_config.go @@ -36,27 +36,27 @@ func pathConfigAccess() *framework.Path { } } -func readConfigAccess(storage logical.Storage) (*accessConfig, error, error) { +func readConfigAccess(storage logical.Storage) (*accessConfig, error) { entry, err := storage.Get("config/access") if err != nil { - return nil, nil, err + return nil, err } if entry == nil { - return nil, nil, fmt.Errorf( - "Access credentials for the backend itself haven't been configured. Please configure them at the '/config/access' endpoint") + return nil, fmt.Errorf( + "Access credentials for the backend itself haven't been configured. Please configure them at the '/config/access' endpoint") } conf := &accessConfig{} if err := entry.DecodeJSON(conf); err != nil { - return nil, nil, fmt.Errorf("error reading nomad access configuration: %s", err) + return nil, fmt.Errorf("error reading nomad access configuration: %s", err) } - return conf, nil, nil + return conf, nil } func pathConfigAccessRead( req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - conf, _, intErr := readConfigAccess(req.Storage) + conf, intErr := readConfigAccess(req.Storage) if intErr != nil { return nil, intErr } From 4b572c064cc6e4ac88bcf8111314420047fd6c50 Mon Sep 17 00:00:00 2001 From: Nicolas Corrarello Date: Fri, 3 Nov 2017 07:19:49 +0000 Subject: [PATCH 25/52] Overhauling the client method and attaching it to the backend --- builtin/logical/nomad/backend.go | 53 +++++++++++++++++++++++++++ builtin/logical/nomad/client.go | 25 ------------- builtin/logical/nomad/path_token.go | 5 +-- builtin/logical/nomad/secret_token.go | 10 ++--- 4 files changed, 57 insertions(+), 36 deletions(-) delete mode 100644 builtin/logical/nomad/client.go diff --git a/builtin/logical/nomad/backend.go b/builtin/logical/nomad/backend.go index 99386d095..ea507e08f 100644 --- a/builtin/logical/nomad/backend.go +++ b/builtin/logical/nomad/backend.go @@ -1,6 +1,10 @@ package nomad import ( + "fmt" + "sync" + + "github.com/hashicorp/nomad/api" "github.com/hashicorp/vault/logical" "github.com/hashicorp/vault/logical/framework" ) @@ -34,4 +38,53 @@ func Backend() *backend { type backend struct { *framework.Backend + + client *api.Client + lock sync.RWMutex +} + +func (b *backend) Client(s logical.Storage) (*api.Client, error) { + + b.lock.RLock() + + // If we already have a client, return it + if b.client != nil { + b.lock.RUnlock() + return b.client, nil + } + + b.lock.RUnlock() + + conf, intErr := readConfigAccess(s) + if intErr != nil { + return nil, intErr + } + if conf == nil { + return nil, fmt.Errorf("no error received but no configuration found") + } + + nomadConf := api.DefaultConfig() + nomadConf.Address = conf.Address + nomadConf.SecretID = conf.Token + + b.lock.Lock() + defer b.lock.Unlock() + + // If the client was creted during the lock switch, return it + if b.client != nil { + return b.client, nil + } + var err error + b.client, err = api.NewClient(nomadConf) + if err != nil { + return nil, err + } + return b.client, nil +} + +func (b *backend) resetClient() { + b.lock.Lock() + defer b.lock.Unlock() + + b.client = nil } diff --git a/builtin/logical/nomad/client.go b/builtin/logical/nomad/client.go deleted file mode 100644 index 80879d76d..000000000 --- a/builtin/logical/nomad/client.go +++ /dev/null @@ -1,25 +0,0 @@ -package nomad - -import ( - "fmt" - - "github.com/hashicorp/nomad/api" - "github.com/hashicorp/vault/logical" -) - -func client(s logical.Storage) (*api.Client, error, error) { - conf, intErr := readConfigAccess(s) - if intErr != nil { - return nil, nil, intErr - } - if conf == nil { - return nil, nil, fmt.Errorf("no error received but no configuration found") - } - - nomadConf := api.DefaultConfig() - nomadConf.Address = conf.Address - nomadConf.SecretID = conf.Token - - client, err := api.NewClient(nomadConf) - return client, nil, err -} diff --git a/builtin/logical/nomad/path_token.go b/builtin/logical/nomad/path_token.go index b8f5efa35..37e268f43 100644 --- a/builtin/logical/nomad/path_token.go +++ b/builtin/logical/nomad/path_token.go @@ -47,13 +47,10 @@ func (b *backend) pathTokenRead( } // Get the nomad client - c, userErr, intErr := client(req.Storage) + c, intErr := b.Client(req.Storage) if intErr != nil { return nil, intErr } - if userErr != nil { - return logical.ErrorResponse(userErr.Error()), nil - } // Generate a name for the token tokenName := fmt.Sprintf("Vault %s %s %d", name, req.DisplayName, time.Now().UnixNano()) diff --git a/builtin/logical/nomad/secret_token.go b/builtin/logical/nomad/secret_token.go index 00df0fef7..5ba048a8e 100644 --- a/builtin/logical/nomad/secret_token.go +++ b/builtin/logical/nomad/secret_token.go @@ -20,7 +20,7 @@ func secretToken(b *backend) *framework.Secret { }, Renew: b.secretTokenRenew, - Revoke: secretTokenRevoke, + Revoke: b.secretTokenRevoke, } } @@ -30,16 +30,12 @@ func (b *backend) secretTokenRenew( return framework.LeaseExtend(0, 0, b.System())(req, d) } -func secretTokenRevoke( +func (b *backend) secretTokenRevoke( req *logical.Request, d *framework.FieldData) (*logical.Response, error) { - c, userErr, intErr := client(req.Storage) + c, intErr := b.Client(req.Storage) if intErr != nil { return nil, intErr } - if userErr != nil { - // Returning logical.ErrorResponse from revocation function is risky - return nil, userErr - } tokenRaw, _ := req.Secret.InternalData["accessor_id"] From 783b38c9c41d8bd6cdad93a8b72f77cd9219c767 Mon Sep 17 00:00:00 2001 From: Nicolas Corrarello Date: Fri, 3 Nov 2017 07:25:47 +0000 Subject: [PATCH 26/52] Not storing the Nomad token as we have the accesor for administrative operations --- builtin/logical/nomad/path_token.go | 1 - 1 file changed, 1 deletion(-) diff --git a/builtin/logical/nomad/path_token.go b/builtin/logical/nomad/path_token.go index 37e268f43..28a397858 100644 --- a/builtin/logical/nomad/path_token.go +++ b/builtin/logical/nomad/path_token.go @@ -71,7 +71,6 @@ func (b *backend) pathTokenRead( "secret_id": token.SecretID, "accessor_id": token.AccessorID, }, map[string]interface{}{ - "secret_id": token.SecretID, "accessor_id": token.AccessorID, }) s.Secret.TTL = result.Lease From 6dc8edf09ffda95003242e4f129d126829922664 Mon Sep 17 00:00:00 2001 From: Nicolas Corrarello Date: Mon, 6 Nov 2017 14:28:30 +0000 Subject: [PATCH 27/52] Attaching secretToken to backend --- builtin/logical/nomad/backend.go | 1 + 1 file changed, 1 insertion(+) diff --git a/builtin/logical/nomad/backend.go b/builtin/logical/nomad/backend.go index ea507e08f..94e3f4e7d 100644 --- a/builtin/logical/nomad/backend.go +++ b/builtin/logical/nomad/backend.go @@ -31,6 +31,7 @@ func Backend() *backend { secretToken(&b), }, BackendType: logical.TypeLogical, + Clean: b.resetClient, } return &b From c70bfff23a72125456595254020f61e4fd363615 Mon Sep 17 00:00:00 2001 From: Nicolas Corrarello Date: Mon, 6 Nov 2017 15:09:56 +0000 Subject: [PATCH 28/52] Refactored Lease into the Backend configuration --- builtin/logical/nomad/backend.go | 19 +++++ .../{path_config.go => path_config_access.go} | 0 builtin/logical/nomad/path_config_lease.go | 83 +++++++++++++++++++ builtin/logical/nomad/path_roles.go | 27 +----- builtin/logical/nomad/path_token.go | 1 - builtin/logical/nomad/secret_token.go | 9 +- 6 files changed, 113 insertions(+), 26 deletions(-) rename builtin/logical/nomad/{path_config.go => path_config_access.go} (100%) create mode 100644 builtin/logical/nomad/path_config_lease.go diff --git a/builtin/logical/nomad/backend.go b/builtin/logical/nomad/backend.go index 94e3f4e7d..e346f827e 100644 --- a/builtin/logical/nomad/backend.go +++ b/builtin/logical/nomad/backend.go @@ -22,6 +22,7 @@ func Backend() *backend { b.Backend = &framework.Backend{ Paths: []*framework.Path{ pathConfigAccess(), + pathConfigLease(&b), pathListRoles(&b), pathRoles(), pathToken(&b), @@ -89,3 +90,21 @@ func (b *backend) resetClient() { b.client = nil } + +// Lease returns the lease information +func (b *backend) Lease(s logical.Storage) (*configLease, error) { + entry, err := s.Get("config/lease") + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + + var result configLease + if err := entry.DecodeJSON(&result); err != nil { + return nil, err + } + + return &result, nil +} diff --git a/builtin/logical/nomad/path_config.go b/builtin/logical/nomad/path_config_access.go similarity index 100% rename from builtin/logical/nomad/path_config.go rename to builtin/logical/nomad/path_config_access.go diff --git a/builtin/logical/nomad/path_config_lease.go b/builtin/logical/nomad/path_config_lease.go new file mode 100644 index 000000000..8625aa6fe --- /dev/null +++ b/builtin/logical/nomad/path_config_lease.go @@ -0,0 +1,83 @@ +package nomad + +import ( + "time" + + "github.com/fatih/structs" + "github.com/hashicorp/vault/logical" + "github.com/hashicorp/vault/logical/framework" +) + +func pathConfigLease(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "config/lease", + Fields: map[string]*framework.FieldSchema{ + "ttl": &framework.FieldSchema{ + Type: framework.TypeDurationSecond, + Default: 0, + Description: "Duration before which the issued token needs renewal", + }, + "max_ttl": &framework.FieldSchema{ + Type: framework.TypeDurationSecond, + Default: 0, + Description: `Duration after which the issued token should not be allowed to be renewed`, + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.pathLeaseRead, + logical.UpdateOperation: b.pathLeaseUpdate, + }, + + HelpSynopsis: pathConfigLeaseHelpSyn, + HelpDescription: pathConfigLeaseHelpDesc, + } +} + +// Sets the lease configuration parameters +func (b *backend) pathLeaseUpdate(req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + entry, err := logical.StorageEntryJSON("config/lease", &configLease{ + TTL: time.Second * time.Duration(d.Get("ttl").(int)), + MaxTTL: time.Second * time.Duration(d.Get("max_ttl").(int)), + }) + if err != nil { + return nil, err + } + if err := req.Storage.Put(entry); err != nil { + return nil, err + } + + return nil, nil +} + +// Returns the lease configuration parameters +func (b *backend) pathLeaseRead(req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + lease, err := b.Lease(req.Storage) + if err != nil { + return nil, err + } + if lease == nil { + return nil, nil + } + + lease.TTL = lease.TTL / time.Second + lease.MaxTTL = lease.MaxTTL / time.Second + + return &logical.Response{ + Data: structs.New(lease).Map(), + }, nil +} + +// Lease configuration information for the secrets issued by this backend +type configLease struct { + TTL time.Duration `json:"ttl" structs:"ttl" mapstructure:"ttl"` + MaxTTL time.Duration `json:"max_ttl" structs:"max_ttl" mapstructure:"max_ttl"` +} + +var pathConfigLeaseHelpSyn = "Configure the lease parameters for generated tokens" + +var pathConfigLeaseHelpDesc = ` +Sets the ttl and max_ttl values for the secrets to be issued by this backend. +Both ttl and max_ttl takes in an integer number of seconds as input as well as +inputs like "1h". +` diff --git a/builtin/logical/nomad/path_roles.go b/builtin/logical/nomad/path_roles.go index 3beec7175..f1c183490 100644 --- a/builtin/logical/nomad/path_roles.go +++ b/builtin/logical/nomad/path_roles.go @@ -1,9 +1,6 @@ package nomad import ( - "fmt" - "time" - "github.com/hashicorp/vault/logical" "github.com/hashicorp/vault/logical/framework" ) @@ -45,11 +42,6 @@ or 'management'. If a 'management' token, the "policy" parameter is not required. Defaults to 'client'.`, }, - - "lease": &framework.FieldSchema{ - Type: framework.TypeString, - Description: "Lease time of the role.", - }, }, Callbacks: map[logical.Operation]framework.OperationFunc{ @@ -90,7 +82,6 @@ func pathRolesRead( // Generate the response resp := &logical.Response{ Data: map[string]interface{}{ - "lease": result.Lease.String(), "token_type": result.TokenType, "global": result.Global, }, @@ -129,19 +120,8 @@ func pathRolesWrite( } } - var lease time.Duration - leaseParam := d.Get("lease").(string) - if leaseParam != "" { - lease, err = time.ParseDuration(leaseParam) - if err != nil { - return logical.ErrorResponse(fmt.Sprintf( - "error parsing given lease of %s: %s", leaseParam, err)), nil - } - } - entry, err := logical.StorageEntryJSON("role/"+name, roleConfig{ Policy: policy, - Lease: lease, TokenType: tokenType, Global: global, }) @@ -166,8 +146,7 @@ func pathRolesDelete( } type roleConfig struct { - Policy []string `json:"policy"` - Lease time.Duration `json:"lease"` - TokenType string `json:"token_type"` - Global bool `json:"global"` + Policy []string `json:"policy"` + TokenType string `json:"token_type"` + Global bool `json:"global"` } diff --git a/builtin/logical/nomad/path_token.go b/builtin/logical/nomad/path_token.go index 28a397858..ac69e0d0c 100644 --- a/builtin/logical/nomad/path_token.go +++ b/builtin/logical/nomad/path_token.go @@ -73,7 +73,6 @@ func (b *backend) pathTokenRead( }, map[string]interface{}{ "accessor_id": token.AccessorID, }) - s.Secret.TTL = result.Lease return s, nil } diff --git a/builtin/logical/nomad/secret_token.go b/builtin/logical/nomad/secret_token.go index 5ba048a8e..d3455cd68 100644 --- a/builtin/logical/nomad/secret_token.go +++ b/builtin/logical/nomad/secret_token.go @@ -26,8 +26,15 @@ func secretToken(b *backend) *framework.Secret { func (b *backend) secretTokenRenew( req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + lease, err := b.Lease(req.Storage) + if err != nil { + return nil, err + } + if lease == nil { + lease = &configLease{} + } - return framework.LeaseExtend(0, 0, b.System())(req, d) + return framework.LeaseExtend(lease.TTL, lease.MaxTTL, b.System())(req, d) } func (b *backend) secretTokenRevoke( From 5a317a1a32904dc6d65797f8ab73b511c5f2e587 Mon Sep 17 00:00:00 2001 From: Nicolas Corrarello Date: Mon, 6 Nov 2017 15:13:50 +0000 Subject: [PATCH 29/52] Updated documentation --- website/source/api/secret/nomad/index.html.md | 33 +++++++++++++++++++ .../source/docs/secrets/nomad/index.html.md | 9 +++++ 2 files changed, 42 insertions(+) diff --git a/website/source/api/secret/nomad/index.html.md b/website/source/api/secret/nomad/index.html.md index d3379b285..1b46c4ece 100644 --- a/website/source/api/secret/nomad/index.html.md +++ b/website/source/api/secret/nomad/index.html.md @@ -52,6 +52,39 @@ $ curl \ https://vault.rocks/v1/nomad/config/access ``` +## Configure Lease + +This endpoint configures the lease settings for generated tokens. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/nomad/config/lease` | `204 (empty body)` | + +### Parameters + +- `ttl` `(int: 0)` – Specifies the lease ttl provided in seconds. + +- `max_ttl` `(int: 0)` – Specifies the maximum ttl provided in seconds. + +### Sample Payload + +```json +{ + "ttl": 1800, + "max_ttl": 3600 +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/nomad/config/lease +``` + ## Create/Update Role This endpoint creates or updates the Nomad role definition in Vault. If the role does not exist, it will be created. If the role already exists, it will receive diff --git a/website/source/docs/secrets/nomad/index.html.md b/website/source/docs/secrets/nomad/index.html.md index 754426762..aa987e779 100644 --- a/website/source/docs/secrets/nomad/index.html.md +++ b/website/source/docs/secrets/nomad/index.html.md @@ -28,6 +28,15 @@ Unlike the `generic` backend, the `nomad` backend is not mounted by default. $ vault mount nomad Successfully mounted 'nomad' at 'nomad'! ``` + +Optionally, we can configure the lease settings for credentials generated +by Vault. This is done by writing to the `config/lease` key: + +``` +$ vault write nomad/config/lease ttl=3600 max_ttl=86400 +Success! Data written to: nomad/config/lease +``` + For a quick start, you can use the SecretID token provided by the [Nomad ACL bootstrap process](https://www.nomadproject.io/guides/acl.html#generate-the-initial-token), although this is discouraged for production deployments. From de8c0dce997e4c4901be0a55d6de064ae05e92c5 Mon Sep 17 00:00:00 2001 From: Chris Hoffman Date: Mon, 6 Nov 2017 16:34:20 -0500 Subject: [PATCH 30/52] minor cleanup --- builtin/logical/nomad/backend.go | 54 ++++--------------- builtin/logical/nomad/path_config_access.go | 14 ++--- builtin/logical/nomad/path_config_lease.go | 17 +++--- builtin/logical/nomad/path_roles.go | 45 +++++++--------- builtin/logical/nomad/path_token.go | 10 ++-- builtin/logical/nomad/secret_token.go | 7 ++- website/source/api/secret/nomad/index.html.md | 2 +- 7 files changed, 50 insertions(+), 99 deletions(-) diff --git a/builtin/logical/nomad/backend.go b/builtin/logical/nomad/backend.go index e346f827e..9ae2d7970 100644 --- a/builtin/logical/nomad/backend.go +++ b/builtin/logical/nomad/backend.go @@ -1,9 +1,6 @@ package nomad import ( - "fmt" - "sync" - "github.com/hashicorp/nomad/api" "github.com/hashicorp/vault/logical" "github.com/hashicorp/vault/logical/framework" @@ -21,10 +18,10 @@ func Backend() *backend { var b backend b.Backend = &framework.Backend{ Paths: []*framework.Path{ - pathConfigAccess(), + pathConfigAccess(&b), pathConfigLease(&b), pathListRoles(&b), - pathRoles(), + pathRoles(&b), pathToken(&b), }, @@ -32,7 +29,6 @@ func Backend() *backend { secretToken(&b), }, BackendType: logical.TypeLogical, - Clean: b.resetClient, } return &b @@ -40,59 +36,27 @@ func Backend() *backend { type backend struct { *framework.Backend - - client *api.Client - lock sync.RWMutex } -func (b *backend) Client(s logical.Storage) (*api.Client, error) { - - b.lock.RLock() - - // If we already have a client, return it - if b.client != nil { - b.lock.RUnlock() - return b.client, nil - } - - b.lock.RUnlock() - - conf, intErr := readConfigAccess(s) - if intErr != nil { - return nil, intErr - } - if conf == nil { - return nil, fmt.Errorf("no error received but no configuration found") +func (b *backend) client(s logical.Storage) (*api.Client, error) { + conf, err := b.readConfigAccess(s) + if err != nil { + return nil, err } nomadConf := api.DefaultConfig() nomadConf.Address = conf.Address nomadConf.SecretID = conf.Token - b.lock.Lock() - defer b.lock.Unlock() - - // If the client was creted during the lock switch, return it - if b.client != nil { - return b.client, nil - } - var err error - b.client, err = api.NewClient(nomadConf) + client, err := api.NewClient(nomadConf) if err != nil { return nil, err } - return b.client, nil -} - -func (b *backend) resetClient() { - b.lock.Lock() - defer b.lock.Unlock() - - b.client = nil + return client, nil } // Lease returns the lease information -func (b *backend) Lease(s logical.Storage) (*configLease, error) { +func (b *backend) LeaseConfig(s logical.Storage) (*configLease, error) { entry, err := s.Get("config/lease") if err != nil { return nil, err diff --git a/builtin/logical/nomad/path_config_access.go b/builtin/logical/nomad/path_config_access.go index 4e9eb151f..441d80d75 100644 --- a/builtin/logical/nomad/path_config_access.go +++ b/builtin/logical/nomad/path_config_access.go @@ -7,7 +7,7 @@ import ( "github.com/hashicorp/vault/logical/framework" ) -func pathConfigAccess() *framework.Path { +func pathConfigAccess(b *backend) *framework.Path { return &framework.Path{ Pattern: "config/access", Fields: map[string]*framework.FieldSchema{ @@ -30,13 +30,13 @@ func pathConfigAccess() *framework.Path { }, Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.ReadOperation: pathConfigAccessRead, - logical.UpdateOperation: pathConfigAccessWrite, + logical.ReadOperation: b.pathConfigAccessRead, + logical.UpdateOperation: b.pathConfigAccessWrite, }, } } -func readConfigAccess(storage logical.Storage) (*accessConfig, error) { +func (b *backend) readConfigAccess(storage logical.Storage) (*accessConfig, error) { entry, err := storage.Get("config/access") if err != nil { return nil, err @@ -54,9 +54,9 @@ func readConfigAccess(storage logical.Storage) (*accessConfig, error) { return conf, nil } -func pathConfigAccessRead( +func (b *backend) pathConfigAccessRead( req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - conf, intErr := readConfigAccess(req.Storage) + conf, intErr := b.readConfigAccess(req.Storage) if intErr != nil { return nil, intErr } @@ -72,7 +72,7 @@ func pathConfigAccessRead( }, nil } -func pathConfigAccessWrite( +func (b *backend) pathConfigAccessWrite( req *logical.Request, data *framework.FieldData) (*logical.Response, error) { entry, err := logical.StorageEntryJSON("config/access", accessConfig{ Address: data.Get("address").(string), diff --git a/builtin/logical/nomad/path_config_lease.go b/builtin/logical/nomad/path_config_lease.go index 8625aa6fe..af8c86429 100644 --- a/builtin/logical/nomad/path_config_lease.go +++ b/builtin/logical/nomad/path_config_lease.go @@ -3,7 +3,6 @@ package nomad import ( "time" - "github.com/fatih/structs" "github.com/hashicorp/vault/logical" "github.com/hashicorp/vault/logical/framework" ) @@ -14,12 +13,10 @@ func pathConfigLease(b *backend) *framework.Path { Fields: map[string]*framework.FieldSchema{ "ttl": &framework.FieldSchema{ Type: framework.TypeDurationSecond, - Default: 0, Description: "Duration before which the issued token needs renewal", }, "max_ttl": &framework.FieldSchema{ Type: framework.TypeDurationSecond, - Default: 0, Description: `Duration after which the issued token should not be allowed to be renewed`, }, }, @@ -52,7 +49,7 @@ func (b *backend) pathLeaseUpdate(req *logical.Request, d *framework.FieldData) // Returns the lease configuration parameters func (b *backend) pathLeaseRead(req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - lease, err := b.Lease(req.Storage) + lease, err := b.LeaseConfig(req.Storage) if err != nil { return nil, err } @@ -60,18 +57,18 @@ func (b *backend) pathLeaseRead(req *logical.Request, data *framework.FieldData) return nil, nil } - lease.TTL = lease.TTL / time.Second - lease.MaxTTL = lease.MaxTTL / time.Second - return &logical.Response{ - Data: structs.New(lease).Map(), + Data: map[string]interface{}{ + "ttl": lease.TTL.Seconds(), + "max_ttl": lease.MaxTTL.Seconds(), + }, }, nil } // Lease configuration information for the secrets issued by this backend type configLease struct { - TTL time.Duration `json:"ttl" structs:"ttl" mapstructure:"ttl"` - MaxTTL time.Duration `json:"max_ttl" structs:"max_ttl" mapstructure:"max_ttl"` + TTL time.Duration `json:"ttl" mapstructure:"ttl"` + MaxTTL time.Duration `json:"max_ttl" mapstructure:"max_ttl"` } var pathConfigLeaseHelpSyn = "Configure the lease parameters for generated tokens" diff --git a/builtin/logical/nomad/path_roles.go b/builtin/logical/nomad/path_roles.go index f1c183490..b977c8192 100644 --- a/builtin/logical/nomad/path_roles.go +++ b/builtin/logical/nomad/path_roles.go @@ -15,7 +15,7 @@ func pathListRoles(b *backend) *framework.Path { } } -func pathRoles() *framework.Path { +func pathRoles(b *backend) *framework.Path { return &framework.Path{ Pattern: "role/" + framework.GenericNameRegex("name"), Fields: map[string]*framework.FieldSchema{ @@ -34,7 +34,7 @@ func pathRoles() *framework.Path { Description: "Policy name as previously created in Nomad. Required", }, - "token_type": &framework.FieldSchema{ + "type": &framework.FieldSchema{ Type: framework.TypeString, Default: "client", Description: `Which type of token to create: 'client' @@ -45,9 +45,9 @@ Defaults to 'client'.`, }, Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.ReadOperation: pathRolesRead, - logical.UpdateOperation: pathRolesWrite, - logical.DeleteOperation: pathRolesDelete, + logical.ReadOperation: b.pathRolesRead, + logical.UpdateOperation: b.pathRolesWrite, + logical.DeleteOperation: b.pathRolesDelete, }, } } @@ -62,7 +62,7 @@ func (b *backend) pathRoleList( return logical.ListResponse(entries), nil } -func pathRolesRead( +func (b *backend) pathRolesRead( req *logical.Request, d *framework.FieldData) (*logical.Response, error) { name := d.Get("name").(string) @@ -82,8 +82,8 @@ func pathRolesRead( // Generate the response resp := &logical.Response{ Data: map[string]interface{}{ - "token_type": result.TokenType, - "global": result.Global, + "type": result.TokenType, + "global": result.Global, }, } if len(result.Policy) != 0 { @@ -92,32 +92,27 @@ func pathRolesRead( return resp, nil } -func pathRolesWrite( +func (b *backend) pathRolesWrite( req *logical.Request, d *framework.FieldData) (*logical.Response, error) { - tokenType := d.Get("token_type").(string) - - switch tokenType { - case "client": - case "management": - default: - return logical.ErrorResponse( - "token_type must be \"client\" or \"management\""), nil - } - + tokenType := d.Get("type").(string) name := d.Get("name").(string) global := d.Get("global").(bool) policy := d.Get("policy").([]string) - var err error - if tokenType != "management" { + + switch tokenType { + case "client": if len(policy) == 0 { return logical.ErrorResponse( - "policy cannot be empty when not using management tokens"), nil + "policy cannot be empty when using client tokens"), nil } - } else { + case "management": if len(policy) != 0 { return logical.ErrorResponse( "policy should be empty when using management tokens"), nil } + default: + return logical.ErrorResponse( + "type must be \"client\" or \"management\""), nil } entry, err := logical.StorageEntryJSON("role/"+name, roleConfig{ @@ -136,7 +131,7 @@ func pathRolesWrite( return nil, nil } -func pathRolesDelete( +func (b *backend) pathRolesDelete( req *logical.Request, d *framework.FieldData) (*logical.Response, error) { name := d.Get("name").(string) if err := req.Storage.Delete("role/" + name); err != nil { @@ -147,6 +142,6 @@ func pathRolesDelete( type roleConfig struct { Policy []string `json:"policy"` - TokenType string `json:"token_type"` + TokenType string `json:"type"` Global bool `json:"global"` } diff --git a/builtin/logical/nomad/path_token.go b/builtin/logical/nomad/path_token.go index ac69e0d0c..65ecca48c 100644 --- a/builtin/logical/nomad/path_token.go +++ b/builtin/logical/nomad/path_token.go @@ -42,14 +42,10 @@ func (b *backend) pathTokenRead( return nil, err } - if result.TokenType == "" { - result.TokenType = "client" - } - // Get the nomad client - c, intErr := b.Client(req.Storage) - if intErr != nil { - return nil, intErr + c, err := b.client(req.Storage) + if err != nil { + return nil, err } // Generate a name for the token diff --git a/builtin/logical/nomad/secret_token.go b/builtin/logical/nomad/secret_token.go index d3455cd68..79f2d7d36 100644 --- a/builtin/logical/nomad/secret_token.go +++ b/builtin/logical/nomad/secret_token.go @@ -26,7 +26,7 @@ func secretToken(b *backend) *framework.Secret { func (b *backend) secretTokenRenew( req *logical.Request, d *framework.FieldData) (*logical.Response, error) { - lease, err := b.Lease(req.Storage) + lease, err := b.LeaseConfig(req.Storage) if err != nil { return nil, err } @@ -39,13 +39,12 @@ func (b *backend) secretTokenRenew( func (b *backend) secretTokenRevoke( req *logical.Request, d *framework.FieldData) (*logical.Response, error) { - c, intErr := b.Client(req.Storage) + c, intErr := b.client(req.Storage) if intErr != nil { return nil, intErr } - tokenRaw, _ := req.Secret.InternalData["accessor_id"] - + tokenRaw := req.Secret.InternalData["accessor_id"] _, err := c.ACLTokens().Delete(tokenRaw.(string), nil) if err != nil { return nil, err diff --git a/website/source/api/secret/nomad/index.html.md b/website/source/api/secret/nomad/index.html.md index 1b46c4ece..a79db8389 100644 --- a/website/source/api/secret/nomad/index.html.md +++ b/website/source/api/secret/nomad/index.html.md @@ -108,7 +108,7 @@ updated attributes. - `global` `(bool: "")` – Specifies if the token should be global, as defined in the [Nomad Documentation](https://www.nomadproject.io/guides/acl.html#acl-tokens). ma -- `token_type` `(string: "client")` - Specifies the type of token to create when +- `type` `(string: "client")` - Specifies the type of token to create when using this role. Valid values are `"client"` or `"management"`. ### Sample Payload From 1b387f75e35a6c73ad6cc0a5384eccfad3ee9fa1 Mon Sep 17 00:00:00 2001 From: Chris Hoffman Date: Mon, 6 Nov 2017 16:36:37 -0500 Subject: [PATCH 31/52] minor cleanup --- builtin/logical/nomad/path_config_access.go | 6 +++--- builtin/logical/nomad/secret_token.go | 8 ++++---- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/builtin/logical/nomad/path_config_access.go b/builtin/logical/nomad/path_config_access.go index 441d80d75..397cb2bed 100644 --- a/builtin/logical/nomad/path_config_access.go +++ b/builtin/logical/nomad/path_config_access.go @@ -56,9 +56,9 @@ func (b *backend) readConfigAccess(storage logical.Storage) (*accessConfig, erro func (b *backend) pathConfigAccessRead( req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - conf, intErr := b.readConfigAccess(req.Storage) - if intErr != nil { - return nil, intErr + conf, err := b.readConfigAccess(req.Storage) + if err != nil { + return nil, err } if conf == nil { return nil, fmt.Errorf("no user or internal error reported but nomad access configuration not found") diff --git a/builtin/logical/nomad/secret_token.go b/builtin/logical/nomad/secret_token.go index 79f2d7d36..7e22fce08 100644 --- a/builtin/logical/nomad/secret_token.go +++ b/builtin/logical/nomad/secret_token.go @@ -39,13 +39,13 @@ func (b *backend) secretTokenRenew( func (b *backend) secretTokenRevoke( req *logical.Request, d *framework.FieldData) (*logical.Response, error) { - c, intErr := b.client(req.Storage) - if intErr != nil { - return nil, intErr + c, err := b.client(req.Storage) + if err != nil { + return nil, err } tokenRaw := req.Secret.InternalData["accessor_id"] - _, err := c.ACLTokens().Delete(tokenRaw.(string), nil) + _, err = c.ACLTokens().Delete(tokenRaw.(string), nil) if err != nil { return nil, err } From 210fe50b68ac310093ce10e843cbcca95bd59d95 Mon Sep 17 00:00:00 2001 From: Chris Hoffman Date: Tue, 7 Nov 2017 09:58:19 -0500 Subject: [PATCH 32/52] adding ttl to secret, refactoring for consistency --- builtin/logical/nomad/backend.go | 2 +- .../{path_token.go => path_creds_create.go} | 25 +++++++------ builtin/logical/nomad/path_roles.go | 35 +++++++++++++------ 3 files changed, 40 insertions(+), 22 deletions(-) rename builtin/logical/nomad/{path_token.go => path_creds_create.go} (74%) diff --git a/builtin/logical/nomad/backend.go b/builtin/logical/nomad/backend.go index 9ae2d7970..736810215 100644 --- a/builtin/logical/nomad/backend.go +++ b/builtin/logical/nomad/backend.go @@ -22,7 +22,7 @@ func Backend() *backend { pathConfigLease(&b), pathListRoles(&b), pathRoles(&b), - pathToken(&b), + pathCredsCreate(&b), }, Secrets: []*framework.Secret{ diff --git a/builtin/logical/nomad/path_token.go b/builtin/logical/nomad/path_creds_create.go similarity index 74% rename from builtin/logical/nomad/path_token.go rename to builtin/logical/nomad/path_creds_create.go index 65ecca48c..cad5020d9 100644 --- a/builtin/logical/nomad/path_token.go +++ b/builtin/logical/nomad/path_creds_create.go @@ -9,7 +9,7 @@ import ( "github.com/hashicorp/vault/logical/framework" ) -func pathToken(b *backend) *framework.Path { +func pathCredsCreate(b *backend) *framework.Path { return &framework.Path{ Pattern: "creds/" + framework.GenericNameRegex("name"), Fields: map[string]*framework.FieldSchema{ @@ -29,18 +29,22 @@ func (b *backend) pathTokenRead( req *logical.Request, d *framework.FieldData) (*logical.Response, error) { name := d.Get("name").(string) - entry, err := req.Storage.Get("role/" + name) + role, err := b.Role(req.Storage, name) if err != nil { return nil, fmt.Errorf("error retrieving role: %s", err) } - if entry == nil { + if role == nil { return logical.ErrorResponse(fmt.Sprintf("Role '%s' not found", name)), nil } - var result roleConfig - if err := entry.DecodeJSON(&result); err != nil { + // Determine if we have a lease configuration + leaseConfig, err := b.LeaseConfig(req.Storage) + if err != nil { return nil, err } + if leaseConfig == nil { + leaseConfig = &configLease{} + } // Get the nomad client c, err := b.client(req.Storage) @@ -54,21 +58,22 @@ func (b *backend) pathTokenRead( // Create it token, _, err := c.ACLTokens().Create(&api.ACLToken{ Name: tokenName, - Type: result.TokenType, - Policies: result.Policy, - Global: result.Global, + Type: role.TokenType, + Policies: role.Policy, + Global: role.Global, }, nil) if err != nil { return logical.ErrorResponse(err.Error()), nil } // Use the helper to create the secret - s := b.Secret(SecretTokenType).Response(map[string]interface{}{ + resp := b.Secret(SecretTokenType).Response(map[string]interface{}{ "secret_id": token.SecretID, "accessor_id": token.AccessorID, }, map[string]interface{}{ "accessor_id": token.AccessorID, }) + resp.Secret.TTL = leaseConfig.TTL - return s, nil + return resp, nil } diff --git a/builtin/logical/nomad/path_roles.go b/builtin/logical/nomad/path_roles.go index b977c8192..2b41191b7 100644 --- a/builtin/logical/nomad/path_roles.go +++ b/builtin/logical/nomad/path_roles.go @@ -1,6 +1,8 @@ package nomad import ( + "fmt" + "github.com/hashicorp/vault/logical" "github.com/hashicorp/vault/logical/framework" ) @@ -52,6 +54,22 @@ Defaults to 'client'.`, } } +func (b *backend) Role(storage logical.Storage, name string) (*roleConfig, error) { + entry, err := storage.Get("role/" + name) + if err != nil { + return nil, fmt.Errorf("error retrieving role: %s", err) + } + if entry == nil { + return nil, nil + } + + var result roleConfig + if err := entry.DecodeJSON(&result); err != nil { + return nil, err + } + return &result, nil +} + func (b *backend) pathRoleList( req *logical.Request, d *framework.FieldData) (*logical.Response, error) { entries, err := req.Storage.List("role/") @@ -66,28 +84,23 @@ func (b *backend) pathRolesRead( req *logical.Request, d *framework.FieldData) (*logical.Response, error) { name := d.Get("name").(string) - entry, err := req.Storage.Get("role/" + name) + role, err := b.Role(req.Storage, name) if err != nil { return nil, err } - if entry == nil { + if role == nil { return nil, nil } - var result roleConfig - if err := entry.DecodeJSON(&result); err != nil { - return nil, err - } - // Generate the response resp := &logical.Response{ Data: map[string]interface{}{ - "type": result.TokenType, - "global": result.Global, + "type": role.TokenType, + "global": role.Global, }, } - if len(result.Policy) != 0 { - resp.Data["policy"] = result.Policy + if len(role.Policy) > 0 { + resp.Data["policy"] = role.Policy } return resp, nil } From aab72464d637ceb52ff8b559a934da098b35938f Mon Sep 17 00:00:00 2001 From: Nicolas Corrarello Date: Wed, 29 Nov 2017 10:29:39 +0000 Subject: [PATCH 33/52] Removing legacy field scheme that belonged to the Consul API --- builtin/logical/nomad/backend_test.go | 1 - builtin/logical/nomad/path_config_access.go | 10 ---------- 2 files changed, 11 deletions(-) diff --git a/builtin/logical/nomad/backend_test.go b/builtin/logical/nomad/backend_test.go index d88b5cfca..a7e0b632f 100644 --- a/builtin/logical/nomad/backend_test.go +++ b/builtin/logical/nomad/backend_test.go @@ -142,7 +142,6 @@ func TestBackend_config_access(t *testing.T) { expected := map[string]interface{}{ "address": connData["address"].(string), - "scheme": "https", } if !reflect.DeepEqual(expected, resp.Data) { t.Fatalf("bad: expected:%#v\nactual:%#v\n", expected, resp.Data) diff --git a/builtin/logical/nomad/path_config_access.go b/builtin/logical/nomad/path_config_access.go index 397cb2bed..1b827dae6 100644 --- a/builtin/logical/nomad/path_config_access.go +++ b/builtin/logical/nomad/path_config_access.go @@ -16,13 +16,6 @@ func pathConfigAccess(b *backend) *framework.Path { Description: "Nomad server address", }, - "scheme": &framework.FieldSchema{ - Type: framework.TypeString, - Description: "URI scheme for the Nomad address", - - Default: "https", - }, - "token": &framework.FieldSchema{ Type: framework.TypeString, Description: "Token for API calls", @@ -67,7 +60,6 @@ func (b *backend) pathConfigAccessRead( return &logical.Response{ Data: map[string]interface{}{ "address": conf.Address, - "scheme": conf.Scheme, }, }, nil } @@ -76,7 +68,6 @@ func (b *backend) pathConfigAccessWrite( req *logical.Request, data *framework.FieldData) (*logical.Response, error) { entry, err := logical.StorageEntryJSON("config/access", accessConfig{ Address: data.Get("address").(string), - Scheme: data.Get("scheme").(string), Token: data.Get("token").(string), }) if err != nil { @@ -92,6 +83,5 @@ func (b *backend) pathConfigAccessWrite( type accessConfig struct { Address string `json:"address"` - Scheme string `json:"scheme"` Token string `json:"token"` } From fc81d8a07c6ee4fb291f5b6067555c76e154ba54 Mon Sep 17 00:00:00 2001 From: Nicolas Corrarello Date: Wed, 29 Nov 2017 10:36:34 +0000 Subject: [PATCH 34/52] Validating that Address and Token are provided in path_config_access.go --- builtin/logical/nomad/path_config_access.go | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/builtin/logical/nomad/path_config_access.go b/builtin/logical/nomad/path_config_access.go index 1b827dae6..f45826c96 100644 --- a/builtin/logical/nomad/path_config_access.go +++ b/builtin/logical/nomad/path_config_access.go @@ -66,9 +66,17 @@ func (b *backend) pathConfigAccessRead( func (b *backend) pathConfigAccessWrite( req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + address := data.Get("address").(string) + if address == "" { + return logical.ErrorResponse("missing nomad server address"), nil + } + token := data.Get("token").(string) + if token == "" { + return logical.ErrorResponse("missing nomad management token"), nil + } entry, err := logical.StorageEntryJSON("config/access", accessConfig{ - Address: data.Get("address").(string), - Token: data.Get("token").(string), + Address: address, + Token: token, }) if err != nil { return nil, err From 34b5919931a40d17e6798069d0b7adf874d50c4e Mon Sep 17 00:00:00 2001 From: Nicolas Corrarello Date: Wed, 29 Nov 2017 10:44:40 +0000 Subject: [PATCH 35/52] Updating descriptions, defaults for roles --- builtin/logical/nomad/path_roles.go | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/builtin/logical/nomad/path_roles.go b/builtin/logical/nomad/path_roles.go index 2b41191b7..ef2efd8e1 100644 --- a/builtin/logical/nomad/path_roles.go +++ b/builtin/logical/nomad/path_roles.go @@ -28,12 +28,13 @@ func pathRoles(b *backend) *framework.Path { "policy": &framework.FieldSchema{ Type: framework.TypeCommaStringSlice, - Description: "Policy name as previously created in Nomad. Required", + Description: "Comma separated list of policies as previously created in Nomad. Required", }, "global": &framework.FieldSchema{ Type: framework.TypeBool, - Description: "Policy name as previously created in Nomad. Required", + Default: false, + Description: "Boolean value describing if the token should be global or not. Defaults to false", }, "type": &framework.FieldSchema{ @@ -97,11 +98,9 @@ func (b *backend) pathRolesRead( Data: map[string]interface{}{ "type": role.TokenType, "global": role.Global, + "policy": role.Policy, }, } - if len(role.Policy) > 0 { - resp.Data["policy"] = role.Policy - } return resp, nil } From 604ead3a3711a23643985ac8d244cf1e7fa5d902 Mon Sep 17 00:00:00 2001 From: Nicolas Corrarello Date: Wed, 29 Nov 2017 10:48:55 +0000 Subject: [PATCH 36/52] Renaming tokenRaw to accessorIDRaw to avoid confusion, as the token is not being used for revoking itself --- builtin/logical/nomad/secret_token.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/builtin/logical/nomad/secret_token.go b/builtin/logical/nomad/secret_token.go index 7e22fce08..7052232f0 100644 --- a/builtin/logical/nomad/secret_token.go +++ b/builtin/logical/nomad/secret_token.go @@ -44,8 +44,8 @@ func (b *backend) secretTokenRevoke( return nil, err } - tokenRaw := req.Secret.InternalData["accessor_id"] - _, err = c.ACLTokens().Delete(tokenRaw.(string), nil) + accessorIDRaw := req.Secret.InternalData["accessor_id"] + _, err = c.ACLTokens().Delete(accessorIDRaw.(string), nil) if err != nil { return nil, err } From e2be4bfd74c8d91ce39d2a04987426c47842f51a Mon Sep 17 00:00:00 2001 From: Nicolas Corrarello Date: Wed, 29 Nov 2017 10:58:02 +0000 Subject: [PATCH 37/52] Sanitizing error outputs --- builtin/logical/nomad/path_creds_create.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/builtin/logical/nomad/path_creds_create.go b/builtin/logical/nomad/path_creds_create.go index cad5020d9..ccca87f5a 100644 --- a/builtin/logical/nomad/path_creds_create.go +++ b/builtin/logical/nomad/path_creds_create.go @@ -31,10 +31,10 @@ func (b *backend) pathTokenRead( role, err := b.Role(req.Storage, name) if err != nil { - return nil, fmt.Errorf("error retrieving role: %s", err) + return nil, fmt.Errorf("error retrieving role: %v", err) } if role == nil { - return logical.ErrorResponse(fmt.Sprintf("Role '%s' not found", name)), nil + return logical.ErrorResponse(fmt.Sprintf("Role '%q' not found", name)), nil } // Determine if we have a lease configuration @@ -53,7 +53,7 @@ func (b *backend) pathTokenRead( } // Generate a name for the token - tokenName := fmt.Sprintf("Vault %s %s %d", name, req.DisplayName, time.Now().UnixNano()) + tokenName := fmt.Sprintf("Vault-%s-%s-%d", name, req.DisplayName, time.Now().UnixNano()) // Create it token, _, err := c.ACLTokens().Create(&api.ACLToken{ @@ -63,7 +63,7 @@ func (b *backend) pathTokenRead( Global: role.Global, }, nil) if err != nil { - return logical.ErrorResponse(err.Error()), nil + return nil, err } // Use the helper to create the secret From 4f91a71c293fa2a2b9a77d0dbdc07ccc67702119 Mon Sep 17 00:00:00 2001 From: Nicolas Corrarello Date: Wed, 29 Nov 2017 11:01:31 +0000 Subject: [PATCH 38/52] Return error before creating a client if conf is nil --- builtin/logical/nomad/backend.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/builtin/logical/nomad/backend.go b/builtin/logical/nomad/backend.go index 736810215..1a3a0b16f 100644 --- a/builtin/logical/nomad/backend.go +++ b/builtin/logical/nomad/backend.go @@ -44,6 +44,10 @@ func (b *backend) client(s logical.Storage) (*api.Client, error) { return nil, err } + if conf == nil { + return nil, err + } + nomadConf := api.DefaultConfig() nomadConf.Address = conf.Address nomadConf.SecretID = conf.Token From 2a4f63e4a5b7ce276d5ddfa7e66af02c510af417 Mon Sep 17 00:00:00 2001 From: Nicolas Corrarello Date: Wed, 29 Nov 2017 11:07:17 +0000 Subject: [PATCH 39/52] Moving LeaseConfig function to path_config_lease.go Signed-off-by: Nicolas Corrarello --- builtin/logical/nomad/backend.go | 18 ------------------ builtin/logical/nomad/path_config_lease.go | 18 ++++++++++++++++++ 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/builtin/logical/nomad/backend.go b/builtin/logical/nomad/backend.go index 1a3a0b16f..cbfd8be49 100644 --- a/builtin/logical/nomad/backend.go +++ b/builtin/logical/nomad/backend.go @@ -58,21 +58,3 @@ func (b *backend) client(s logical.Storage) (*api.Client, error) { } return client, nil } - -// Lease returns the lease information -func (b *backend) LeaseConfig(s logical.Storage) (*configLease, error) { - entry, err := s.Get("config/lease") - if err != nil { - return nil, err - } - if entry == nil { - return nil, nil - } - - var result configLease - if err := entry.DecodeJSON(&result); err != nil { - return nil, err - } - - return &result, nil -} diff --git a/builtin/logical/nomad/path_config_lease.go b/builtin/logical/nomad/path_config_lease.go index af8c86429..c13a88f4e 100644 --- a/builtin/logical/nomad/path_config_lease.go +++ b/builtin/logical/nomad/path_config_lease.go @@ -65,6 +65,24 @@ func (b *backend) pathLeaseRead(req *logical.Request, data *framework.FieldData) }, nil } +// Lease returns the lease information +func (b *backend) LeaseConfig(s logical.Storage) (*configLease, error) { + entry, err := s.Get("config/lease") + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + + var result configLease + if err := entry.DecodeJSON(&result); err != nil { + return nil, err + } + + return &result, nil +} + // Lease configuration information for the secrets issued by this backend type configLease struct { TTL time.Duration `json:"ttl" mapstructure:"ttl"` From 031f244922b3db725ab95be29594ac217d80cb25 Mon Sep 17 00:00:00 2001 From: Nicolas Corrarello Date: Wed, 29 Nov 2017 11:15:54 +0000 Subject: [PATCH 40/52] Returning nil config if is actually nil, and catching the error before creating the client in backend.go Signed-off-by: Nicolas Corrarello --- builtin/logical/nomad/path_config_access.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/builtin/logical/nomad/path_config_access.go b/builtin/logical/nomad/path_config_access.go index f45826c96..8321e62ca 100644 --- a/builtin/logical/nomad/path_config_access.go +++ b/builtin/logical/nomad/path_config_access.go @@ -35,8 +35,7 @@ func (b *backend) readConfigAccess(storage logical.Storage) (*accessConfig, erro return nil, err } if entry == nil { - return nil, fmt.Errorf( - "Access credentials for the backend itself haven't been configured. Please configure them at the '/config/access' endpoint") + return nil, nil } conf := &accessConfig{} @@ -54,7 +53,7 @@ func (b *backend) pathConfigAccessRead( return nil, err } if conf == nil { - return nil, fmt.Errorf("no user or internal error reported but nomad access configuration not found") + return nil, nil } return &logical.Response{ From 89466815ba0c18c0b05c434ce4d69fcf6707096c Mon Sep 17 00:00:00 2001 From: Nicolas Corrarello Date: Wed, 29 Nov 2017 15:18:03 +0000 Subject: [PATCH 41/52] Return an error if accesor_id is nil Signed-off-by: Nicolas Corrarello --- builtin/logical/nomad/secret_token.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/builtin/logical/nomad/secret_token.go b/builtin/logical/nomad/secret_token.go index 7052232f0..492183d45 100644 --- a/builtin/logical/nomad/secret_token.go +++ b/builtin/logical/nomad/secret_token.go @@ -1,6 +1,8 @@ package nomad import ( + "fmt" + "github.com/hashicorp/vault/logical" "github.com/hashicorp/vault/logical/framework" ) @@ -45,6 +47,10 @@ func (b *backend) secretTokenRevoke( } accessorIDRaw := req.Secret.InternalData["accessor_id"] + if accessorIDRaw == nil { + return nil, fmt.Errorf("accessor id is missing on the lease") + } + _, err = c.ACLTokens().Delete(accessorIDRaw.(string), nil) if err != nil { return nil, err From 62fe10204a522a36417f9eeeb4763e340291fdc6 Mon Sep 17 00:00:00 2001 From: Nicolas Corrarello Date: Wed, 29 Nov 2017 15:58:39 +0000 Subject: [PATCH 42/52] Refactoring check for empty accessor as per Vishals suggestion Signed-off-by: Nicolas Corrarello --- builtin/logical/nomad/secret_token.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/builtin/logical/nomad/secret_token.go b/builtin/logical/nomad/secret_token.go index 492183d45..6e892659e 100644 --- a/builtin/logical/nomad/secret_token.go +++ b/builtin/logical/nomad/secret_token.go @@ -46,9 +46,9 @@ func (b *backend) secretTokenRevoke( return nil, err } - accessorIDRaw := req.Secret.InternalData["accessor_id"] - if accessorIDRaw == nil { - return nil, fmt.Errorf("accessor id is missing on the lease") + accessorIDRaw, ok := req.Secret.InternalData["accessor_id"] + if !ok { + return nil, fmt.Errorf("accessor_id is missing on the lease") } _, err = c.ACLTokens().Delete(accessorIDRaw.(string), nil) From 239a9a99857c45a781205c485a0ae8fd66d83c3d Mon Sep 17 00:00:00 2001 From: Nicolas Corrarello Date: Wed, 29 Nov 2017 16:19:31 +0000 Subject: [PATCH 43/52] %q quotes automatically Signed-off-by: Nicolas Corrarello --- builtin/logical/nomad/path_creds_create.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builtin/logical/nomad/path_creds_create.go b/builtin/logical/nomad/path_creds_create.go index ccca87f5a..bfdfdf35c 100644 --- a/builtin/logical/nomad/path_creds_create.go +++ b/builtin/logical/nomad/path_creds_create.go @@ -34,7 +34,7 @@ func (b *backend) pathTokenRead( return nil, fmt.Errorf("error retrieving role: %v", err) } if role == nil { - return logical.ErrorResponse(fmt.Sprintf("Role '%q' not found", name)), nil + return logical.ErrorResponse(fmt.Sprintf("Role %q not found", name)), nil } // Determine if we have a lease configuration From 0d8f812dc8d7532f024437514a4575330b2692f8 Mon Sep 17 00:00:00 2001 From: Nicolas Corrarello Date: Wed, 29 Nov 2017 16:23:03 +0000 Subject: [PATCH 44/52] Checking if client is not nil before deleting token Signed-off-by: Nicolas Corrarello --- builtin/logical/nomad/secret_token.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/builtin/logical/nomad/secret_token.go b/builtin/logical/nomad/secret_token.go index 6e892659e..70f261b7b 100644 --- a/builtin/logical/nomad/secret_token.go +++ b/builtin/logical/nomad/secret_token.go @@ -46,6 +46,10 @@ func (b *backend) secretTokenRevoke( return nil, err } + if c == nil { + return nil, fmt.Errorf("Error connecting with Nomad") + } + accessorIDRaw, ok := req.Secret.InternalData["accessor_id"] if !ok { return nil, fmt.Errorf("accessor_id is missing on the lease") From b3799697a28c246d1744c2ebbeae5b82d5193e8b Mon Sep 17 00:00:00 2001 From: Nicolas Corrarello Date: Wed, 29 Nov 2017 16:31:17 +0000 Subject: [PATCH 45/52] Rename policy into policies --- builtin/logical/nomad/backend_test.go | 4 ++-- builtin/logical/nomad/path_creds_create.go | 2 +- builtin/logical/nomad/path_roles.go | 24 +++++++++---------- builtin/logical/nomad/secret_token.go | 6 ++--- website/source/api/secret/nomad/index.html.md | 8 +++---- .../source/docs/secrets/nomad/index.html.md | 2 +- 6 files changed, 23 insertions(+), 23 deletions(-) diff --git a/builtin/logical/nomad/backend_test.go b/builtin/logical/nomad/backend_test.go index a7e0b632f..60c863937 100644 --- a/builtin/logical/nomad/backend_test.go +++ b/builtin/logical/nomad/backend_test.go @@ -179,8 +179,8 @@ func TestBackend_renew_revoke(t *testing.T) { req.Path = "role/test" req.Data = map[string]interface{}{ - "policy": []string{"policy"}, - "lease": "6h", + "policies": []string{"policy"}, + "lease": "6h", } resp, err = b.HandleRequest(req) if err != nil { diff --git a/builtin/logical/nomad/path_creds_create.go b/builtin/logical/nomad/path_creds_create.go index bfdfdf35c..eef77d131 100644 --- a/builtin/logical/nomad/path_creds_create.go +++ b/builtin/logical/nomad/path_creds_create.go @@ -59,7 +59,7 @@ func (b *backend) pathTokenRead( token, _, err := c.ACLTokens().Create(&api.ACLToken{ Name: tokenName, Type: role.TokenType, - Policies: role.Policy, + Policies: role.Policies, Global: role.Global, }, nil) if err != nil { diff --git a/builtin/logical/nomad/path_roles.go b/builtin/logical/nomad/path_roles.go index ef2efd8e1..aa67c6888 100644 --- a/builtin/logical/nomad/path_roles.go +++ b/builtin/logical/nomad/path_roles.go @@ -26,7 +26,7 @@ func pathRoles(b *backend) *framework.Path { Description: "Name of the role", }, - "policy": &framework.FieldSchema{ + "policies": &framework.FieldSchema{ Type: framework.TypeCommaStringSlice, Description: "Comma separated list of policies as previously created in Nomad. Required", }, @@ -42,7 +42,7 @@ func pathRoles(b *backend) *framework.Path { Default: "client", Description: `Which type of token to create: 'client' or 'management'. If a 'management' token, -the "policy" parameter is not required. +the "policies" parameter is not required. Defaults to 'client'.`, }, }, @@ -96,9 +96,9 @@ func (b *backend) pathRolesRead( // Generate the response resp := &logical.Response{ Data: map[string]interface{}{ - "type": role.TokenType, - "global": role.Global, - "policy": role.Policy, + "type": role.TokenType, + "global": role.Global, + "policies": role.Policies, }, } return resp, nil @@ -109,18 +109,18 @@ func (b *backend) pathRolesWrite( tokenType := d.Get("type").(string) name := d.Get("name").(string) global := d.Get("global").(bool) - policy := d.Get("policy").([]string) + policies := d.Get("policies").([]string) switch tokenType { case "client": - if len(policy) == 0 { + if len(policies) == 0 { return logical.ErrorResponse( - "policy cannot be empty when using client tokens"), nil + "policies cannot be empty when using client tokens"), nil } case "management": - if len(policy) != 0 { + if len(policies) != 0 { return logical.ErrorResponse( - "policy should be empty when using management tokens"), nil + "policies should be empty when using management tokens"), nil } default: return logical.ErrorResponse( @@ -128,7 +128,7 @@ func (b *backend) pathRolesWrite( } entry, err := logical.StorageEntryJSON("role/"+name, roleConfig{ - Policy: policy, + Policies: policies, TokenType: tokenType, Global: global, }) @@ -153,7 +153,7 @@ func (b *backend) pathRolesDelete( } type roleConfig struct { - Policy []string `json:"policy"` + Policies []string `json:"policies"` TokenType string `json:"type"` Global bool `json:"global"` } diff --git a/builtin/logical/nomad/secret_token.go b/builtin/logical/nomad/secret_token.go index 70f261b7b..2ba4d195c 100644 --- a/builtin/logical/nomad/secret_token.go +++ b/builtin/logical/nomad/secret_token.go @@ -46,9 +46,9 @@ func (b *backend) secretTokenRevoke( return nil, err } - if c == nil { - return nil, fmt.Errorf("Error connecting with Nomad") - } + if c == nil { + return nil, fmt.Errorf("Error connecting with Nomad") + } accessorIDRaw, ok := req.Secret.InternalData["accessor_id"] if !ok { diff --git a/website/source/api/secret/nomad/index.html.md b/website/source/api/secret/nomad/index.html.md index a79db8389..75020569e 100644 --- a/website/source/api/secret/nomad/index.html.md +++ b/website/source/api/secret/nomad/index.html.md @@ -103,9 +103,9 @@ updated attributes. as a string duration with a time suffix like `"30s"` or `"1h"`. If not provided, the default Vault lease is used. -- `policy` `(string: "")` – Comma separated list of Nomad policies the token is going to be created against. These need to be created beforehand in Nomad. +- `policies` `(string: "")` – Comma separated list of Nomad policies the token is going to be created against. These need to be created beforehand in Nomad. -- `global` `(bool: "")` – Specifies if the token should be global, as defined in the [Nomad Documentation](https://www.nomadproject.io/guides/acl.html#acl-tokens). +- `global` `(bool: "false")` – Specifies if the token should be global, as defined in the [Nomad Documentation](https://www.nomadproject.io/guides/acl.html#acl-tokens). ma - `type` `(string: "client")` - Specifies the type of token to create when @@ -117,7 +117,7 @@ To create a client token with a custom policy: ```json { - "policy": "readonly" + "policies": "readonly" } ``` @@ -160,7 +160,7 @@ $ curl \ "auth": null, "data": { "lease": "0s", - "policy": [ + "policies": [ "example" ], "token_type": "client" diff --git a/website/source/docs/secrets/nomad/index.html.md b/website/source/docs/secrets/nomad/index.html.md index aa987e779..167cc31db 100644 --- a/website/source/docs/secrets/nomad/index.html.md +++ b/website/source/docs/secrets/nomad/index.html.md @@ -76,7 +76,7 @@ to a set of policy names used to generate those credentials. For example, lets c an "monitoring" role that maps to a "readonly" policy: ``` -$ vault write nomad/role/monitoring policy=readonly +$ vault write nomad/role/monitoring policies=readonly Success! Data written to: nomad/role/monitoring ``` From 7b14f41872c44a32ee49efcf7712c40a4f0a1615 Mon Sep 17 00:00:00 2001 From: Nicolas Corrarello Date: Wed, 29 Nov 2017 16:53:42 +0000 Subject: [PATCH 46/52] Fix docs up to current standards Signed-off-by: Nicolas Corrarello --- website/source/api/secret/nomad/index.html.md | 82 ++++++++++++++----- 1 file changed, 60 insertions(+), 22 deletions(-) diff --git a/website/source/api/secret/nomad/index.html.md b/website/source/api/secret/nomad/index.html.md index 75020569e..b4b91b585 100644 --- a/website/source/api/secret/nomad/index.html.md +++ b/website/source/api/secret/nomad/index.html.md @@ -52,13 +52,39 @@ $ curl \ https://vault.rocks/v1/nomad/config/access ``` +## Read Access Configuration + +This endpoint queries for information about the Nomad connection. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `GET` | `/nomad/config/access` | `200 application/json` | + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + https://vault.rocks/v1/nomad/config/access +``` + +### Sample Response + +```json +[...] + "data": { + "address": "http://localhost:4646/" + } +[...] +``` + ## Configure Lease This endpoint configures the lease settings for generated tokens. | Method | Path | Produces | | :------- | :--------------------------- | :--------------------- | -| `POST` | `/nomad/config/lease` | `204 (empty body)` | +| `POST` | `/nomad/config/lease` | `204 (empty body)` | ### Parameters @@ -85,6 +111,33 @@ $ curl \ https://vault.rocks/v1/nomad/config/lease ``` +## Read Lease Configuration + +This endpoint queries for information about the Lease TTL for the specified mount. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `GET` | `/nomad/config/lease` | `200 application/json` | + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + https://vault.rocks/v1/nomad/config/lease +``` + +### Sample Response + +```json +[...] + "data": { + "max_ttl": 86400, + "ttl": 86400 + }, +[...] +``` + ## Create/Update Role This endpoint creates or updates the Nomad role definition in Vault. If the role does not exist, it will be created. If the role already exists, it will receive @@ -157,7 +210,7 @@ $ curl \ ```json { - "auth": null, +[...] "data": { "lease": "0s", "policies": [ @@ -165,12 +218,7 @@ $ curl \ ], "token_type": "client" }, - "lease_duration": 0, - "lease_id": "", - "renewable": false, - "request_id": "f4c7ee18-72aa-3b20-a910-93b6274a9dc0", - "warnings": null, - "wrap_info": null +[...] } ``` @@ -196,18 +244,13 @@ $ curl \ ```json { - "auth": null, +[...] "data": { "keys": [ "example" ] }, - "lease_duration": 0, - "lease_id": "", - "renewable": false, - "request_id": "d7bb167b-81c5-9606-c214-b34fcda45634", - "warnings": null, - "wrap_info": null +[...] } ``` @@ -260,16 +303,11 @@ $ curl \ ```json { - "auth": null, +[...] "data": { "accessor_id": "c834ba40-8d84-b0c1-c084-3a31d3383c03", "secret_id": "65af6f07-7f57-bb24-cdae-a27f86a894ce" }, - "lease_duration": 2764800, - "lease_id": "nomad/creds/example/c2686da3-2431-b6d6-7bbf-c5b9496dd6d7", - "renewable": true, - "request_id": "37a06ca1-8a1d-7f17-bda8-4661289c392b", - "warnings": null, - "wrap_info": null +[...] } ``` From b5fd1ce9539e8c5d20c334df45c597ae348be166 Mon Sep 17 00:00:00 2001 From: Nicolas Corrarello Date: Wed, 29 Nov 2017 21:53:21 +0000 Subject: [PATCH 47/52] Adding SealWrap configuration, protecting the config/access path Signed-off-by: Nicolas Corrarello --- builtin/logical/nomad/backend.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/builtin/logical/nomad/backend.go b/builtin/logical/nomad/backend.go index cbfd8be49..4961970e2 100644 --- a/builtin/logical/nomad/backend.go +++ b/builtin/logical/nomad/backend.go @@ -17,6 +17,12 @@ func Factory(conf *logical.BackendConfig) (logical.Backend, error) { func Backend() *backend { var b backend b.Backend = &framework.Backend{ + PathsSpecial: &logical.Paths{ + SealWrapStorage: []string{ + "config/access", + }, + }, + Paths: []*framework.Path{ pathConfigAccess(&b), pathConfigLease(&b), From c71f596fbdac5152eae5d659b45c72882e134da8 Mon Sep 17 00:00:00 2001 From: Chris Hoffman Date: Fri, 15 Dec 2017 17:06:56 -0500 Subject: [PATCH 48/52] address some feedback --- builtin/logical/nomad/path_config_access.go | 5 ++-- builtin/logical/nomad/path_config_lease.go | 17 +++++++++-- builtin/logical/nomad/path_creds_create.go | 7 +++-- builtin/logical/nomad/path_roles.go | 11 ++++--- builtin/logical/nomad/secret_token.go | 10 +++++-- website/source/api/secret/nomad/index.html.md | 29 ++++++++++++++++--- 6 files changed, 57 insertions(+), 22 deletions(-) diff --git a/builtin/logical/nomad/path_config_access.go b/builtin/logical/nomad/path_config_access.go index 8321e62ca..c86749035 100644 --- a/builtin/logical/nomad/path_config_access.go +++ b/builtin/logical/nomad/path_config_access.go @@ -1,8 +1,7 @@ package nomad import ( - "fmt" - + "github.com/hashicorp/errwrap" "github.com/hashicorp/vault/logical" "github.com/hashicorp/vault/logical/framework" ) @@ -40,7 +39,7 @@ func (b *backend) readConfigAccess(storage logical.Storage) (*accessConfig, erro conf := &accessConfig{} if err := entry.DecodeJSON(conf); err != nil { - return nil, fmt.Errorf("error reading nomad access configuration: %s", err) + return nil, errwrap.Wrapf("error reading nomad access configuration: {{err}}", err) } return conf, nil diff --git a/builtin/logical/nomad/path_config_lease.go b/builtin/logical/nomad/path_config_lease.go index c13a88f4e..521252bc9 100644 --- a/builtin/logical/nomad/path_config_lease.go +++ b/builtin/logical/nomad/path_config_lease.go @@ -7,6 +7,8 @@ import ( "github.com/hashicorp/vault/logical/framework" ) +const leaseConfigKey = "config/lease" + func pathConfigLease(b *backend) *framework.Path { return &framework.Path{ Pattern: "config/lease", @@ -24,6 +26,7 @@ func pathConfigLease(b *backend) *framework.Path { Callbacks: map[logical.Operation]framework.OperationFunc{ logical.ReadOperation: b.pathLeaseRead, logical.UpdateOperation: b.pathLeaseUpdate, + logical.DeleteOperation: b.pathLeaseDelete, }, HelpSynopsis: pathConfigLeaseHelpSyn, @@ -47,6 +50,14 @@ func (b *backend) pathLeaseUpdate(req *logical.Request, d *framework.FieldData) return nil, nil } +func (b *backend) pathLeaseDelete(req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + if err := req.Storage.Delete(leaseConfigKey); err != nil { + return nil, err + } + + return nil, nil +} + // Returns the lease configuration parameters func (b *backend) pathLeaseRead(req *logical.Request, data *framework.FieldData) (*logical.Response, error) { lease, err := b.LeaseConfig(req.Storage) @@ -59,15 +70,15 @@ func (b *backend) pathLeaseRead(req *logical.Request, data *framework.FieldData) return &logical.Response{ Data: map[string]interface{}{ - "ttl": lease.TTL.Seconds(), - "max_ttl": lease.MaxTTL.Seconds(), + "ttl": int64(lease.TTL.Seconds()), + "max_ttl": int64(lease.MaxTTL.Seconds()), }, }, nil } // Lease returns the lease information func (b *backend) LeaseConfig(s logical.Storage) (*configLease, error) { - entry, err := s.Get("config/lease") + entry, err := s.Get(leaseConfigKey) if err != nil { return nil, err } diff --git a/builtin/logical/nomad/path_creds_create.go b/builtin/logical/nomad/path_creds_create.go index eef77d131..7d25ac6b0 100644 --- a/builtin/logical/nomad/path_creds_create.go +++ b/builtin/logical/nomad/path_creds_create.go @@ -4,6 +4,7 @@ import ( "fmt" "time" + "github.com/hashicorp/errwrap" "github.com/hashicorp/nomad/api" "github.com/hashicorp/vault/logical" "github.com/hashicorp/vault/logical/framework" @@ -31,10 +32,10 @@ func (b *backend) pathTokenRead( role, err := b.Role(req.Storage, name) if err != nil { - return nil, fmt.Errorf("error retrieving role: %v", err) + return nil, errwrap.Wrapf("error retrieving role: {{err}}", err) } if role == nil { - return logical.ErrorResponse(fmt.Sprintf("Role %q not found", name)), nil + return logical.ErrorResponse(fmt.Sprintf("role %q not found", name)), nil } // Determine if we have a lease configuration @@ -53,7 +54,7 @@ func (b *backend) pathTokenRead( } // Generate a name for the token - tokenName := fmt.Sprintf("Vault-%s-%s-%d", name, req.DisplayName, time.Now().UnixNano()) + tokenName := fmt.Sprintf("vault-%s-%s-%d", name, req.DisplayName, time.Now().UnixNano()) // Create it token, _, err := c.ACLTokens().Create(&api.ACLToken{ diff --git a/builtin/logical/nomad/path_roles.go b/builtin/logical/nomad/path_roles.go index aa67c6888..be16a2ac9 100644 --- a/builtin/logical/nomad/path_roles.go +++ b/builtin/logical/nomad/path_roles.go @@ -1,8 +1,7 @@ package nomad import ( - "fmt" - + "github.com/hashicorp/errwrap" "github.com/hashicorp/vault/logical" "github.com/hashicorp/vault/logical/framework" ) @@ -28,13 +27,13 @@ func pathRoles(b *backend) *framework.Path { "policies": &framework.FieldSchema{ Type: framework.TypeCommaStringSlice, - Description: "Comma separated list of policies as previously created in Nomad. Required", + Description: "Comma-separated string or list of policies as previously created in Nomad. Required for 'client' token.", }, "global": &framework.FieldSchema{ Type: framework.TypeBool, Default: false, - Description: "Boolean value describing if the token should be global or not. Defaults to false", + Description: "Boolean value describing if the token should be global or not. Defaults to false.", }, "type": &framework.FieldSchema{ @@ -58,7 +57,7 @@ Defaults to 'client'.`, func (b *backend) Role(storage logical.Storage, name string) (*roleConfig, error) { entry, err := storage.Get("role/" + name) if err != nil { - return nil, fmt.Errorf("error retrieving role: %s", err) + return nil, errwrap.Wrapf("error retrieving role: {{err}}", err) } if entry == nil { return nil, nil @@ -124,7 +123,7 @@ func (b *backend) pathRolesWrite( } default: return logical.ErrorResponse( - "type must be \"client\" or \"management\""), nil + `type must be "client" or "management"`), nil } entry, err := logical.StorageEntryJSON("role/"+name, roleConfig{ diff --git a/builtin/logical/nomad/secret_token.go b/builtin/logical/nomad/secret_token.go index 2ba4d195c..78ca0bfb9 100644 --- a/builtin/logical/nomad/secret_token.go +++ b/builtin/logical/nomad/secret_token.go @@ -1,6 +1,7 @@ package nomad import ( + "errors" "fmt" "github.com/hashicorp/vault/logical" @@ -47,15 +48,18 @@ func (b *backend) secretTokenRevoke( } if c == nil { - return nil, fmt.Errorf("Error connecting with Nomad") + return nil, fmt.Errorf("error getting Nomad client") } accessorIDRaw, ok := req.Secret.InternalData["accessor_id"] if !ok { return nil, fmt.Errorf("accessor_id is missing on the lease") } - - _, err = c.ACLTokens().Delete(accessorIDRaw.(string), nil) + accessorID, ok := accessorIDRaw.(string) + if !ok { + return nil, errors.New("unable to convert accessor_id") + } + _, err = c.ACLTokens().Delete(accessorID, nil) if err != nil { return nil, err } diff --git a/website/source/api/secret/nomad/index.html.md b/website/source/api/secret/nomad/index.html.md index b4b91b585..9e5e2f9f3 100644 --- a/website/source/api/secret/nomad/index.html.md +++ b/website/source/api/secret/nomad/index.html.md @@ -88,9 +88,13 @@ This endpoint configures the lease settings for generated tokens. ### Parameters -- `ttl` `(int: 0)` – Specifies the lease ttl provided in seconds. +- `ttl` `(string: "")` – Specifies the ttl for the lease. This is provided + as a string duration with a time suffix like `"30s"` or `"1h"` or as total + seconds. -- `max_ttl` `(int: 0)` – Specifies the maximum ttl provided in seconds. +- `max_ttl` `(string: "")` – Specifies the max ttl for the lease. This is + provided as a string duration with a time suffix like `"30s"` or `"1h"` or as + total seconds. ### Sample Payload @@ -138,6 +142,23 @@ $ curl \ [...] ``` +## Delete Lease Configuration + +This endpoint deletes the lease configuration. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `DELETE` | `/nomad/config/lease` | `204 (empty body)` | + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request DELETE \ + https://vault.rocks/v1/nomad/config/lease +``` + ## Create/Update Role This endpoint creates or updates the Nomad role definition in Vault. If the role does not exist, it will be created. If the role already exists, it will receive @@ -153,8 +174,8 @@ updated attributes. which to create this Nomad tokens. This is part of the request URL. - `lease` `(string: "")` – Specifies the lease for this role. This is provided - as a string duration with a time suffix like `"30s"` or `"1h"`. If not - provided, the default Vault lease is used. + as a string duration with a time suffix like `"30s"` or `"1h"` or as total + seconds. If not provided, the default Vault lease is used. - `policies` `(string: "")` – Comma separated list of Nomad policies the token is going to be created against. These need to be created beforehand in Nomad. From b904d28d8238893373c4c87036b4ed9bb0cd0747 Mon Sep 17 00:00:00 2001 From: Chris Hoffman Date: Fri, 15 Dec 2017 19:18:32 -0500 Subject: [PATCH 49/52] adding access config existence check and delete endpoint --- builtin/logical/nomad/path_config_access.go | 62 ++++++++++++++++----- 1 file changed, 49 insertions(+), 13 deletions(-) diff --git a/builtin/logical/nomad/path_config_access.go b/builtin/logical/nomad/path_config_access.go index c86749035..9ad4a3ae2 100644 --- a/builtin/logical/nomad/path_config_access.go +++ b/builtin/logical/nomad/path_config_access.go @@ -6,6 +6,8 @@ import ( "github.com/hashicorp/vault/logical/framework" ) +const configAccessKey = "config/access" + func pathConfigAccess(b *backend) *framework.Path { return &framework.Path{ Pattern: "config/access", @@ -23,13 +25,26 @@ func pathConfigAccess(b *backend) *framework.Path { Callbacks: map[logical.Operation]framework.OperationFunc{ logical.ReadOperation: b.pathConfigAccessRead, + logical.CreateOperation: b.pathConfigAccessWrite, logical.UpdateOperation: b.pathConfigAccessWrite, + logical.DeleteOperation: b.pathConfigAccessDelete, }, + + ExistenceCheck: b.configExistenceCheck, } } +func (b *backend) configExistenceCheck(req *logical.Request, data *framework.FieldData) (bool, error) { + entry, err := b.readConfigAccess(req.Storage) + if err != nil { + return false, err + } + + return entry != nil, nil +} + func (b *backend) readConfigAccess(storage logical.Storage) (*accessConfig, error) { - entry, err := storage.Get("config/access") + entry, err := storage.Get(configAccessKey) if err != nil { return nil, err } @@ -64,22 +79,35 @@ func (b *backend) pathConfigAccessRead( func (b *backend) pathConfigAccessWrite( req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - address := data.Get("address").(string) - if address == "" { - return logical.ErrorResponse("missing nomad server address"), nil - } - token := data.Get("token").(string) - if token == "" { - return logical.ErrorResponse("missing nomad management token"), nil - } - entry, err := logical.StorageEntryJSON("config/access", accessConfig{ - Address: address, - Token: token, - }) + conf, err := b.readConfigAccess(req.Storage) if err != nil { return nil, err } + if conf == nil { + conf = &accessConfig{} + } + address, ok := data.GetOk("address") + if ok { + conf.Address = address.(string) + } else { + if req.Operation == logical.CreateOperation { + return logical.ErrorResponse("missing nomad server address"), nil + } + } + token, ok := data.GetOk("token") + if ok { + conf.Token = token.(string) + } else { + if req.Operation == logical.CreateOperation { + return logical.ErrorResponse("missing nomad management token"), nil + } + } + + entry, err := logical.StorageEntryJSON("config/access", conf) + if err != nil { + return nil, err + } if err := req.Storage.Put(entry); err != nil { return nil, err } @@ -87,6 +115,14 @@ func (b *backend) pathConfigAccessWrite( return nil, nil } +func (b *backend) pathConfigAccessDelete( + req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + if err := req.Storage.Delete(configAccessKey); err != nil { + return nil, err + } + return nil, nil +} + type accessConfig struct { Address string `json:"address"` Token string `json:"token"` From b08606b320e6b12ca5b276417d518fef760e0fe4 Mon Sep 17 00:00:00 2001 From: Chris Hoffman Date: Fri, 15 Dec 2017 19:50:20 -0500 Subject: [PATCH 50/52] adding existence check for roles --- builtin/logical/nomad/backend_test.go | 4 +- builtin/logical/nomad/path_roles.go | 55 +++++++++++++++++++++------ 2 files changed, 45 insertions(+), 14 deletions(-) diff --git a/builtin/logical/nomad/backend_test.go b/builtin/logical/nomad/backend_test.go index 60c863937..1c56db630 100644 --- a/builtin/logical/nomad/backend_test.go +++ b/builtin/logical/nomad/backend_test.go @@ -63,7 +63,7 @@ func prepareTestContainer(t *testing.T) (cleanup func(), retAddress string, noma t.Fatalf("err: %v", err) } nomadToken = aclbootstrap.SecretID - t.Log("[WARN] Generated Master token: %s", nomadToken) + t.Logf("[WARN] Generated Master token: %s", nomadToken) policy := &nomadapi.ACLPolicy{ Name: "test", Description: "test", @@ -211,7 +211,7 @@ func TestBackend_renew_revoke(t *testing.T) { if err := mapstructure.Decode(resp.Data, &d); err != nil { t.Fatal(err) } - t.Log("[WARN] Generated token: %s with accesor %s", d.Token, d.Accessor) + t.Logf("[WARN] Generated token: %s with accesor %s", d.Token, d.Accessor) // Build a client and verify that the credentials work nomadapiConfig := nomadapi.DefaultConfig() diff --git a/builtin/logical/nomad/path_roles.go b/builtin/logical/nomad/path_roles.go index be16a2ac9..a75a5215f 100644 --- a/builtin/logical/nomad/path_roles.go +++ b/builtin/logical/nomad/path_roles.go @@ -1,6 +1,8 @@ package nomad import ( + "errors" + "github.com/hashicorp/errwrap" "github.com/hashicorp/vault/logical" "github.com/hashicorp/vault/logical/framework" @@ -32,7 +34,6 @@ func pathRoles(b *backend) *framework.Path { "global": &framework.FieldSchema{ Type: framework.TypeBool, - Default: false, Description: "Boolean value describing if the token should be global or not. Defaults to false.", }, @@ -48,13 +49,31 @@ Defaults to 'client'.`, Callbacks: map[logical.Operation]framework.OperationFunc{ logical.ReadOperation: b.pathRolesRead, + logical.CreateOperation: b.pathRolesWrite, logical.UpdateOperation: b.pathRolesWrite, logical.DeleteOperation: b.pathRolesDelete, }, + + ExistenceCheck: b.rolesExistenceCheck, } } +// Establishes dichotomy of request operation between CreateOperation and UpdateOperation. +// Returning 'true' forces an UpdateOperation, CreateOperation otherwise. +func (b *backend) rolesExistenceCheck(req *logical.Request, d *framework.FieldData) (bool, error) { + name := d.Get("name").(string) + entry, err := b.Role(req.Storage, name) + if err != nil { + return false, err + } + return entry != nil, nil +} + func (b *backend) Role(storage logical.Storage, name string) (*roleConfig, error) { + if name == "" { + return nil, errors.New("invalid role name") + } + entry, err := storage.Get("role/" + name) if err != nil { return nil, errwrap.Wrapf("error retrieving role: {{err}}", err) @@ -105,19 +124,30 @@ func (b *backend) pathRolesRead( func (b *backend) pathRolesWrite( req *logical.Request, d *framework.FieldData) (*logical.Response, error) { - tokenType := d.Get("type").(string) name := d.Get("name").(string) - global := d.Get("global").(bool) - policies := d.Get("policies").([]string) - switch tokenType { + role, err := b.Role(req.Storage, name) + if err != nil { + return nil, err + } + if role == nil { + role = new(roleConfig) + } + + policies, ok := d.GetOk("policies") + if ok { + role.Policies = policies.([]string) + } + + role.TokenType = d.Get("type").(string) + switch role.TokenType { case "client": - if len(policies) == 0 { + if len(role.Policies) == 0 { return logical.ErrorResponse( "policies cannot be empty when using client tokens"), nil } case "management": - if len(policies) != 0 { + if len(role.Policies) != 0 { return logical.ErrorResponse( "policies should be empty when using management tokens"), nil } @@ -126,11 +156,12 @@ func (b *backend) pathRolesWrite( `type must be "client" or "management"`), nil } - entry, err := logical.StorageEntryJSON("role/"+name, roleConfig{ - Policies: policies, - TokenType: tokenType, - Global: global, - }) + global, ok := d.GetOk("global") + if ok { + role.Global = global.(bool) + } + + entry, err := logical.StorageEntryJSON("role/"+name, role) if err != nil { return nil, err } From f6bed8b925bdd3bc8f9f08b604a8481b5b5ea1f5 Mon Sep 17 00:00:00 2001 From: Chris Hoffman Date: Sun, 17 Dec 2017 09:10:56 -0500 Subject: [PATCH 51/52] fixing up config to allow environment vars supported by api client --- builtin/logical/nomad/backend.go | 14 ++-- builtin/logical/nomad/path_config_access.go | 8 --- website/source/api/secret/nomad/index.html.md | 64 +++++++++---------- 3 files changed, 37 insertions(+), 49 deletions(-) diff --git a/builtin/logical/nomad/backend.go b/builtin/logical/nomad/backend.go index 4961970e2..630218818 100644 --- a/builtin/logical/nomad/backend.go +++ b/builtin/logical/nomad/backend.go @@ -50,14 +50,16 @@ func (b *backend) client(s logical.Storage) (*api.Client, error) { return nil, err } - if conf == nil { - return nil, err + nomadConf := new(api.Config) + if conf != nil { + if conf.Address != "" { + nomadConf.Address = conf.Address + } + if conf.Token != "" { + nomadConf.SecretID = conf.Token + } } - nomadConf := api.DefaultConfig() - nomadConf.Address = conf.Address - nomadConf.SecretID = conf.Token - client, err := api.NewClient(nomadConf) if err != nil { return nil, err diff --git a/builtin/logical/nomad/path_config_access.go b/builtin/logical/nomad/path_config_access.go index 9ad4a3ae2..7a62445df 100644 --- a/builtin/logical/nomad/path_config_access.go +++ b/builtin/logical/nomad/path_config_access.go @@ -90,18 +90,10 @@ func (b *backend) pathConfigAccessWrite( address, ok := data.GetOk("address") if ok { conf.Address = address.(string) - } else { - if req.Operation == logical.CreateOperation { - return logical.ErrorResponse("missing nomad server address"), nil - } } token, ok := data.GetOk("token") if ok { conf.Token = token.(string) - } else { - if req.Operation == logical.CreateOperation { - return logical.ErrorResponse("missing nomad management token"), nil - } } entry, err := logical.StorageEntryJSON("config/access", conf) diff --git a/website/source/api/secret/nomad/index.html.md b/website/source/api/secret/nomad/index.html.md index 9e5e2f9f3..baa07cbd9 100644 --- a/website/source/api/secret/nomad/index.html.md +++ b/website/source/api/secret/nomad/index.html.md @@ -28,10 +28,14 @@ Nomad tokens. ### Parameters -- `address` `(string: )` – Specifies the address of the Nomad +- `address` `(string: "")` – Specifies the address of the Nomad instance, provided as `"protocol://host:port"` like `"http://127.0.0.1:4646"`. + This value can also be provided on individual calls with the NOMAD_ADDR + environment variable. -- `token` `(string: )` – Specifies the Nomad Management token to use. +- `token` `(string: "")` – Specifies the Nomad Management token to use. + This value can also be provided on individual calls with the NOMAD_TOKEN + environment variable. ### Sample Payload @@ -71,11 +75,9 @@ $ curl \ ### Sample Response ```json -[...] - "data": { - "address": "http://localhost:4646/" - } -[...] + "data": { + "address": "http://localhost:4646/" + } ``` ## Configure Lease @@ -134,12 +136,10 @@ $ curl \ ### Sample Response ```json -[...] - "data": { - "max_ttl": 86400, - "ttl": 86400 - }, -[...] + "data": { + "max_ttl": 86400, + "ttl": 86400 + } ``` ## Delete Lease Configuration @@ -231,15 +231,13 @@ $ curl \ ```json { -[...] - "data": { - "lease": "0s", - "policies": [ - "example" - ], - "token_type": "client" - }, -[...] + "data": { + "lease": "0s", + "policies": [ + "example" + ], + "token_type": "client" + } } ``` @@ -265,13 +263,11 @@ $ curl \ ```json { -[...] - "data": { - "keys": [ - "example" - ] - }, -[...] + "data": { + "keys": [ + "example" + ] + } } ``` @@ -324,11 +320,9 @@ $ curl \ ```json { -[...] - "data": { - "accessor_id": "c834ba40-8d84-b0c1-c084-3a31d3383c03", - "secret_id": "65af6f07-7f57-bb24-cdae-a27f86a894ce" - }, -[...] + "data": { + "accessor_id": "c834ba40-8d84-b0c1-c084-3a31d3383c03", + "secret_id": "65af6f07-7f57-bb24-cdae-a27f86a894ce" + } } ``` From 400d7384039cee7bd48bae798d619f08f334ddb9 Mon Sep 17 00:00:00 2001 From: Chris Hoffman Date: Sun, 17 Dec 2017 10:51:39 -0500 Subject: [PATCH 52/52] use defaultconfig as base, adding env var test --- builtin/logical/nomad/backend.go | 2 +- builtin/logical/nomad/backend_test.go | 40 +++++++++++++++++++++++++++ 2 files changed, 41 insertions(+), 1 deletion(-) diff --git a/builtin/logical/nomad/backend.go b/builtin/logical/nomad/backend.go index 630218818..82618a22e 100644 --- a/builtin/logical/nomad/backend.go +++ b/builtin/logical/nomad/backend.go @@ -50,7 +50,7 @@ func (b *backend) client(s logical.Storage) (*api.Client, error) { return nil, err } - nomadConf := new(api.Config) + nomadConf := api.DefaultConfig() if conf != nil { if conf.Address != "" { nomadConf.Address = conf.Address diff --git a/builtin/logical/nomad/backend_test.go b/builtin/logical/nomad/backend_test.go index 1c56db630..bbff49c15 100644 --- a/builtin/logical/nomad/backend_test.go +++ b/builtin/logical/nomad/backend_test.go @@ -260,3 +260,43 @@ func TestBackend_renew_revoke(t *testing.T) { t.Fatal("err: expected error") } } + +func TestBackend_CredsCreateEnvVar(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(config) + if err != nil { + t.Fatal(err) + } + + cleanup, connURL, connToken := prepareTestContainer(t) + defer cleanup() + + req := logical.TestRequest(t, logical.UpdateOperation, "role/test") + req.Data = map[string]interface{}{ + "policies": []string{"policy"}, + "lease": "6h", + } + resp, err := b.HandleRequest(req) + if err != nil { + t.Fatal(err) + } + + os.Setenv("NOMAD_TOKEN", connToken) + defer os.Unsetenv("NOMAD_TOKEN") + os.Setenv("NOMAD_ADDR", connURL) + defer os.Unsetenv("NOMAD_ADDR") + + req.Operation = logical.ReadOperation + req.Path = "creds/test" + resp, err = b.HandleRequest(req) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("resp nil") + } + if resp.IsError() { + t.Fatalf("resp is error: %v", resp.Error()) + } +}