Merge branch 'master' of github.com:hashicorp/nomad

This commit is contained in:
Alex Dadgar 2016-11-01 13:24:07 -07:00
commit ebe4b7893b
16 changed files with 595 additions and 39 deletions

View File

@ -167,7 +167,7 @@ func GetTaskEnv(allocDir *allocdir.AllocDir, node *structs.Node,
}
env.SetTaskLocalDir(filepath.Join(taskdir, allocdir.TaskLocal))
env.SetSecretDir(filepath.Join(taskdir, allocdir.TaskSecrets))
env.SetSecretsDir(filepath.Join(taskdir, allocdir.TaskSecrets))
}
if task.Resources != nil {

View File

@ -21,9 +21,9 @@ const (
// removed.
TaskLocalDir = "NOMAD_TASK_DIR"
// SecretDir is the environment variable with the path to the tasks secret
// SecretsDir is the environment variable with the path to the tasks secret
// directory where it can store sensitive data.
SecretDir = "NOMAD_SECRET_DIR"
SecretsDir = "NOMAD_SECRETS_DIR"
// MemLimit is the environment variable with the tasks memory limit in MBs.
MemLimit = "NOMAD_MEMORY_LIMIT"
@ -89,7 +89,7 @@ type TaskEnvironment struct {
JobMeta map[string]string
AllocDir string
TaskDir string
SecretDir string
SecretsDir string
CpuLimit int
MemLimit int
TaskName string
@ -167,8 +167,8 @@ func (t *TaskEnvironment) Build() *TaskEnvironment {
if t.TaskDir != "" {
t.TaskEnv[TaskLocalDir] = t.TaskDir
}
if t.SecretDir != "" {
t.TaskEnv[SecretDir] = t.SecretDir
if t.SecretsDir != "" {
t.TaskEnv[SecretsDir] = t.SecretsDir
}
// Build the resource limits
@ -274,13 +274,13 @@ func (t *TaskEnvironment) ClearTaskLocalDir() *TaskEnvironment {
return t
}
func (t *TaskEnvironment) SetSecretDir(dir string) *TaskEnvironment {
t.SecretDir = dir
func (t *TaskEnvironment) SetSecretsDir(dir string) *TaskEnvironment {
t.SecretsDir = dir
return t
}
func (t *TaskEnvironment) ClearSecretDir() *TaskEnvironment {
t.SecretDir = ""
func (t *TaskEnvironment) ClearSecretsDir() *TaskEnvironment {
t.SecretsDir = ""
return t
}

View File

@ -155,7 +155,7 @@ func (d *LxcDriver) Abilities() DriverAbilities {
// Fingerprint fingerprints the lxc driver configuration
func (d *LxcDriver) Fingerprint(cfg *config.Config, node *structs.Node) (bool, error) {
enabled := cfg.ReadBoolDefault(lxcConfigOption, false)
enabled := cfg.ReadBoolDefault(lxcConfigOption, true)
if !enabled && !cfg.DevMode {
return false, nil
}

View File

@ -35,16 +35,16 @@ func TestLxcDriver_Fingerprint(t *testing.T) {
if err != nil {
t.Fatalf("err: %v", err)
}
if apply {
t.Fatalf("should not apply by default")
if !apply {
t.Fatalf("should apply by default")
}
apply, err = d.Fingerprint(&config.Config{Options: map[string]string{lxcConfigOption: "1"}}, node)
apply, err = d.Fingerprint(&config.Config{Options: map[string]string{lxcConfigOption: "0"}}, node)
if err != nil {
t.Fatalf("err: %v", err)
}
if !apply {
t.Fatalf("should apply with config")
t.Fatalf("should not apply with config")
}
if node.Attributes["driver.lxc"] == "" {
t.Fatalf("missing driver")

View File

@ -272,6 +272,13 @@ func (c *Command) readConfig() *Config {
c.Ui.Error("WARNING: Bootstrap mode enabled! Potentially unsafe operation.")
}
// Check to see if we should read the Vault token from the environment
if config.Vault.Token == "" {
if token, ok := os.LookupEnv("VAULT_TOKEN"); ok {
config.Vault.Token = token
}
}
return config
}
@ -918,7 +925,8 @@ Vault Options:
-vault-token=<token>
The Vault token used to derive tokens from Vault on behalf of clients.
This only needs to be set on Servers.
This only needs to be set on Servers. Overrides the Vault token read from
the VAULT_TOKEN environment variable.
-vault-allow-unauthenticated
Whether to allow jobs to be sumbitted that request Vault Tokens but do not

View File

@ -22,12 +22,20 @@ import (
const (
// authPolicy is a policy that allows token creation operations
authPolicy = `path "auth/token/create/*" {
capabilities = ["create", "read", "update", "delete", "list"]
authPolicy = `path "auth/token/create/test" {
capabilities = ["create", "update"]
}
path "auth/token/roles/*" {
capabilities = ["create", "read", "update", "delete", "list"]
path "auth/token/lookup/*" {
capabilities = ["read"]
}
path "auth/token/roles/test" {
capabilities = ["read"]
}
path "/auth/token/revoke-accessor/*" {
capabilities = ["update"]
}
`
)
@ -199,7 +207,7 @@ func TestVaultClient_SetConfig(t *testing.T) {
// created in that role
func defaultTestVaultRoleAndToken(v *testutil.TestVault, t *testing.T, rolePeriod int) string {
d := make(map[string]interface{}, 2)
d["allowed_policies"] = "default,auth"
d["allowed_policies"] = "auth"
d["period"] = rolePeriod
return testVaultRoleAndToken(v, t, d)
}
@ -312,7 +320,7 @@ func TestVaultClient_LookupToken_Invalid(t *testing.T) {
}
}
func TestVaultClient_LookupToken(t *testing.T) {
func TestVaultClient_LookupToken_Root(t *testing.T) {
v := testutil.NewTestVault(t).Start()
defer v.Stop()
@ -373,6 +381,70 @@ func TestVaultClient_LookupToken(t *testing.T) {
}
}
func TestVaultClient_LookupToken_Role(t *testing.T) {
v := testutil.NewTestVault(t).Start()
defer v.Stop()
// Set the configs token in a new test role
v.Config.Token = defaultTestVaultRoleAndToken(v, t, 5)
logger := log.New(os.Stderr, "", log.LstdFlags)
client, err := NewVaultClient(v.Config, logger, nil)
if err != nil {
t.Fatalf("failed to build vault client: %v", err)
}
client.SetActive(true)
defer client.Stop()
waitForConnection(client, t)
// Lookup ourselves
s, err := client.LookupToken(context.Background(), v.Config.Token)
if err != nil {
t.Fatalf("self lookup failed: %v", err)
}
policies, err := PoliciesFrom(s)
if err != nil {
t.Fatalf("failed to parse policies: %v", err)
}
expected := []string{"auth", "default"}
if !reflect.DeepEqual(policies, expected) {
t.Fatalf("Unexpected policies; got %v; want %v", policies, expected)
}
// Create a token with a different set of policies
expected = []string{"default"}
req := vapi.TokenCreateRequest{
Policies: expected,
}
s, err = v.Client.Auth().Token().Create(&req)
if err != nil {
t.Fatalf("failed to create child token: %v", err)
}
// Get the client token
if s == nil || s.Auth == nil {
t.Fatalf("bad secret response: %+v", s)
}
// Lookup new child
s, err = client.LookupToken(context.Background(), s.Auth.ClientToken)
if err != nil {
t.Fatalf("self lookup failed: %v", err)
}
policies, err = PoliciesFrom(s)
if err != nil {
t.Fatalf("failed to parse policies: %v", err)
}
if !reflect.DeepEqual(policies, expected) {
t.Fatalf("Unexpected policies; got %v; want %v", policies, expected)
}
}
func TestVaultClient_LookupToken_RateLimit(t *testing.T) {
v := testutil.NewTestVault(t).Start()
defer v.Stop()
@ -621,7 +693,7 @@ func TestVaultClient_RevokeTokens_PreEstablishs(t *testing.T) {
}
}
func TestVaultClient_RevokeTokens(t *testing.T) {
func TestVaultClient_RevokeTokens_Root(t *testing.T) {
v := testutil.NewTestVault(t).Start()
defer v.Stop()
@ -685,6 +757,73 @@ func TestVaultClient_RevokeTokens(t *testing.T) {
}
}
func TestVaultClient_RevokeTokens_Role(t *testing.T) {
v := testutil.NewTestVault(t).Start()
defer v.Stop()
// Set the configs token in a new test role
v.Config.Token = defaultTestVaultRoleAndToken(v, t, 5)
purged := 0
purge := func(accessors []*structs.VaultAccessor) error {
purged += len(accessors)
return nil
}
logger := log.New(os.Stderr, "", log.LstdFlags)
client, err := NewVaultClient(v.Config, logger, purge)
if err != nil {
t.Fatalf("failed to build vault client: %v", err)
}
client.SetActive(true)
defer client.Stop()
waitForConnection(client, t)
// Create some vault tokens
auth := v.Client.Auth().Token()
req := vapi.TokenCreateRequest{
Policies: []string{"default"},
}
t1, err := auth.Create(&req)
if err != nil {
t.Fatalf("Failed to create vault token: %v", err)
}
if t1 == nil || t1.Auth == nil {
t.Fatalf("bad secret response: %+v", t1)
}
t2, err := auth.Create(&req)
if err != nil {
t.Fatalf("Failed to create vault token: %v", err)
}
if t2 == nil || t2.Auth == nil {
t.Fatalf("bad secret response: %+v", t2)
}
// Create two VaultAccessors
vas := []*structs.VaultAccessor{
&structs.VaultAccessor{Accessor: t1.Auth.Accessor},
&structs.VaultAccessor{Accessor: t2.Auth.Accessor},
}
// Issue a token revocation
if err := client.RevokeTokens(context.Background(), vas, true); err != nil {
t.Fatalf("RevokeTokens failed: %v", err)
}
// Lookup the token and make sure we get an error
if s, err := auth.Lookup(t1.Auth.ClientToken); err == nil {
t.Fatalf("Revoked token lookup didn't fail: %+v", s)
}
if s, err := auth.Lookup(t2.Auth.ClientToken); err == nil {
t.Fatalf("Revoked token lookup didn't fail: %+v", s)
}
if purged != 2 {
t.Fatalf("Expected purged 2; got %d", purged)
}
}
func waitForConnection(v *vaultClient, t *testing.T) {
testutil.WaitForResult(func() (bool, error) {
return v.ConnectionEstablished()

View File

@ -0,0 +1,20 @@
# Allow creating tokens under the role
path "auth/token/create/nomad-server" {
capabilities = ["create", "update"]
}
# Allow looking up the role
path "auth/token/roles/nomad-server" {
capabilities = ["read"]
}
# Allow looking up incoming tokens to validate they have permissions to
# access the tokens they are requesting
path "auth/token/lookup/*" {
capabilities = ["read"]
}
# Allow revoking tokens that should no longer exist
path "/auth/token/revoke-accessor/*" {
capabilities = ["update"]
}

View File

@ -0,0 +1,8 @@
{
"allowed_policies": "nomad-server",
"explicit_max_ttl": 0,
"name": "nomad-server",
"orphan": false,
"period": 259200,
"renewable": true
}

View File

@ -318,6 +318,49 @@ When `server_auto_join`, `client_auto_join` and `auto_advertise` are all
enabled, which is by default, and Consul is available, the Nomad cluster will
self-bootstrap.
## Vault Options
The following options are used to configure [Vault](https://www.vaultproject.io)
integration and are entirely optional.
* `vault`: The top-level config key used to contain all Vault-related
configuration options. The value is a key-value map which supports the
following keys:
<br>
* `address`: The address to the Vault server given in the format of
`protocol://host:port`. Defaults to `https://vault.service.consul:8200`.
* `token`: Token is used by Servers to derive child token's for jobs
requesting tokens. As such it does not need to be specified by the Client.
Visit the [Vault Integration](/docs/vault-integration/index.html)
documentation to see how to generate an appropriate token. The VAULT_TOKEN
environment variable is used when starting the agent. If a flag or
configuration specifies a value they take precedence.
* `allow_unauthenticated`: allows users to submit jobs requiring Vault tokens
without providing a Vault token proving they have access to these policies.
* `task_token_ttl`: Sets the TTL of created tokens when using a root token.
* `tls_ca_file`: Optional path to the CA certificate used for Vault
communication, defaults to the system bundle if not specified.
* `tls_ca_path`: Optional path to a folder containing CA certificate to be
used for Vault communication, defaults to the system bundle if not
specified.
* `tls_cert_file`: The path to the certificate used for Vault communication. If
this is set then you need to also set `tls_key_file`.
* `tls_key_file`: The path to the private key used for Consul communication. If
this is set then you need to also set `tls_cert_file`.
* `tls_skip_verify`: Enables or disables SSL verifaction. Defaults to `false`.
* `tls_server_name`: Optional parameter used to set the SNI host when
connecting to Vault via TLS.
## <a id="atlas_options"></a>Atlas Options
**NOTE**: Nomad integration with Atlas is awaiting release of Atlas features
@ -550,7 +593,7 @@ documentation [here](/docs/drivers/index.html)
If specified, fingerprinters not in the whitelist will be disabled. If the
whitelist is empty, all fingerprinters are used.
### <a id="chroot_env_map"></a>Client ChrootEnv Map
### <a id="chroot_env_map"></a>Client Chroot Map
Drivers based on [Isolated Fork/Exec](/docs/drivers/exec.html) implement file
system isolation using chroot on Linux. The `chroot_env` map allows the chroot
@ -618,3 +661,19 @@ via CLI arguments. The `agent` command accepts the following arguments:
option.
* `-state-dir=<path>`: Equivalent to the Client [state_dir](#state_dir) config
option.
* `-vault-enabled`: Whether to enable or disabled Vault integration.
* `-vault-address=<addr>`: The address to communicate with Vault.
* `-vault-token=<token>`: The Vault token used to derive tokens. Only needs to
be set on Servers. Overrides the Vault token read from the VAULT_TOKEN
environment variable.
* `-vault-ca-file=<path>`: Path to a PEM-encoded CA cert file used to verify the
Vault server SSL certificate.
* `-vault-ca-path=<path>`: Path to a directory of PEM-encoded CA cert files used
to verify the Vault server SSL certificate.Whether to enable or disabled Vault
integration.
* `vault-cert-file=<path>`: The path to the certificate for Vault communication.
* `vault-key-file=<path>`: The path to the private key for Vault communication.
* `vault-tls-skip-verify`: A boolean that determines whether to skip SSL
certificate verification.
* `vault-tls-server-name=<name>`: Used to set the SNI host when connecting to
Vault over TLS.

View File

@ -0,0 +1,100 @@
---
layout: "docs"
page_title: "Drivers: LXC"
sidebar_current: "docs-drivers-lxc"
description: |-
The lxc task driver is used to run application containers using lxc.
---
# LXC Driver
Name: `lxc`
The `lxc` driver provides an interface for using LXC for running application
containers.
!> **Experimental!** Currently, the LXC driver supports launching containers
via templates but only supports host networking. If both an LXC image and the
host it is run on use upstart or systemd, shutdown signals may be passed from
the container to the host.
~> LXC is only enabled in the special `linux_amd64_lxc` build of Nomad because
it links to the `liblxc` system library. Use the `lxc` build tag if compiling
Nomad yourself.
## Task Configuration
```hcl
task "busybox" {
driver = "lxc"
config {
log_level = "trace"
verbosity = "verbose"
template = "/usr/share/lxc/templates/lxc-busybox"
}
}
```
The `lxc` driver supports the following configuration in the job spec:
* `template` - The LXC template to run.
```hcl
config {
template = "/usr/share/lxc/templates/lxc-alpine"
}
```
* `log_level` - (Optional) LXC library's logging level. Defaults to `error`.
Must be one of `trace`, `debug`, `info`, `warn`, or `error`.
```hcl
config {
log_level = "debug"
}
```
* `verbosity` - (Optional) Enables extra verbosity in the LXC library's
logging. Defaults to `quiet`. Must be one of `quiet` or `verbose`.
```hcl
config {
verbosity = "quiet"
}
```
## Networking
Currently the `lxc` driver only supports host networking. See the `none`
networking type in the [`lxc.container.conf` manual][lxc_man] for more
information.
[lxc_man]: https://linuxcontainers.org/lxc/manpages/man5/lxc.container.conf.5.html#lbAM
## Client Requirements
The `lxc` driver requires the following:
* 64bit Linux host
* The `linux_amd64_lxc` Nomad binary
* `liblxc` to be installed
* `lxc-templates` to be installed
## Client Configuration
* `lxc.enable` - The `lxc` driver may be disabled on hosts by setting this
[client configuration][/docs/agent/config.html#options] option to `false`
(defaults to `true`).
## Client Attributes
The `lxc` driver will set the following client attributes:
* `driver.lxc` - Set to `1` if LXC is found and enabled on the host node.
* `driver.lxc.version` - Version of `lxc` eg: `1.1.0`.
## Resource Isolation
This driver supports CPU and memory isolation via the `lxc` library. Network
isolation is not supported as of now.

View File

@ -93,5 +93,5 @@ restart {
`interval` is reached. This is the default behavior.
- `"fail"` - Instructs the scheduler to not attempt to restart the task on
failure. This mode is useful for non-idempotent jobs which are not safe to
simply restart.
failure. This mode is useful for non-idempotent jobs which are unlikely to
succeed after a few failures.

View File

@ -38,6 +38,20 @@ job "docs" {
}
```
The Nomad client will make the Vault token available to the task by writing it
to the secret directory at `secret/vault_token` and by injecting an VAULT_TOKEN
environment variable.
If Vault token renewal fails due to a Vault outage, the Nomad client will
attempt to retrieve a new Vault token. When the new Vault token is retrieved,
the contents of the file will be replaced and action will be taken based on the
`change_mode`.
If Nomad is unable to renew the Vault token (perhaps due to a Vault outage or
network error), the client will retrieve a new Vault token. If successful, the
contents of the secrets file are updated on disk, and action will be taken
according to the value set in the `change_mode` parameter.
If a `vault` stanza is specified, the [`template`][template] stanza can interact
with Vault as well.
@ -69,8 +83,9 @@ The following examples only show the `vault` stanzas. Remember that the
### Retrieve Token
This example tells the Nomad client to retrieve a Vault token. The token is
available to the task via the canonical environment variable `VAULT_TOKEN`. The
resulting token will have the "frontend" Vault policy attached.
available to the task via the canonical environment variable `VAULT_TOKEN` and
written to disk at `secrets/vault_token`. The resulting token will have the
"frontend" Vault policy attached.
```hcl
vault {

View File

@ -28,6 +28,10 @@ environment variables.
<td>`NOMAD_TASK_DIR`</td>
<td>Path to the local task directory</td>
</tr>
<tr>
<td>`NOMAD_SECRETS_DIR`</td>
<td>Path to the task's secrets directory</td>
</tr>
<tr>
<td>`NOMAD_MEMORY_LIMIT`</td>
<td>The task's memory limit in MB</td>
@ -76,6 +80,10 @@ environment variables.
<td>`NOMAD_META_<key>`</td>
<td>The metadata of the task</td>
</tr>
<tr>
<td>`VAULT_TOKEN`</td>
<td>The task's Vault token. See [Vault Integration](/docs/vault-integration/index.html) for more details</td>
</tr>
</table>
## Task Identifiers
@ -117,24 +125,28 @@ details.
### Task Directories
Nomad makes the following two directories available to tasks:
Nomad makes the following directories available to tasks:
* `alloc/`: This directory is shared across all tasks in a task group and can be
used to store data that needs to be used by multiple tasks, such as a log
shipper.
* `local/`: This directory is private to each task. It can be used to store
arbitrary data that shouldn't be shared by tasks in the task group.
arbitrary data that should not be shared by tasks in the task group.
* `secrets/`: This directory is private to each task, not accessible via the
`nomad fs` command or filesystem APIs and where possible backed by an
in-memory filesystem. It can be used to store secret data that should not be
visible outside the task.
Both these directories are persisted until the allocation is removed, which
occurs hours after all the tasks in the task group enter terminal states. This
gives time to view the data produced by tasks.
These directories are persisted until the allocation is removed, which occurs
hours after all the tasks in the task group enter terminal states. This gives
time to view the data produced by tasks.
Depending on the driver and operating system being targeted, the directories are
made available in various ways. For example, on `docker` the directories are
bound to the container, while on `exec` on Linux the directories are mounted into the
chroot. Regardless of how the directories are made available, the path to the
directories can be read through the `NOMAD_ALLOC_DIR` and `NOMAD_TASK_DIR`
environment variables.
directories can be read through the `NOMAD_ALLOC_DIR`, `NOMAD_TASK_DIR`, and
`NOMAD_SECRETS_DIR` environment variables.
## Meta
@ -148,4 +160,5 @@ Currently there is no enforcement that the meta keys be lowercase, but using
multiple keys with the same uppercased representation will lead to undefined
behavior.
[jobspec]: /docs/job-specification/index.html "Nomad Job Specification"
[jobspec]: /docs/job-specification/vault-integration/index.html "Nomad Job Specification"
[vault]: /docs/vault-integration/index.html "Nomad Vault Integration"

View File

@ -39,9 +39,6 @@ To configure a job to register with service discovery, please see the
- The service discovery feature in Nomad depends on operators making sure that
the Nomad client can reach the Consul agent.
- Nomad assumes that it controls the life cycle of all the externally
discoverable services running on a host.
- Tasks running inside Nomad also need to reach out to the Consul agent if
they want to use any of the Consul APIs. Ex: A task running inside a docker
container in the bridge mode won't be able to talk to a Consul Agent running

View File

@ -0,0 +1,193 @@
---
layout: "docs"
page_title: "Vault Integration"
sidebar_current: "docs-vault-integration"
description: |-
Learn how to integrate with HashiCorp Vault and retrieve Vault tokens for
tasks.
---
# Vault Integration
Many workloads require access to tokens, passwords, certificates, API keys, and
other secrets. To enable secure, auditable and easy access to your secrets,
Nomad integrates with HashiCorp's [Vault][]. Nomad servers and clients
coordinate with Vault to derive a Vault token that has access to only the Vault
policies the tasks needs. Nomad clients make the token avaliable to the task and
handle the tokens renewal. Further, Nomad's [`template` block][template] can
retrieve secrets from Vault making it easier than ever to secure your
infrastructure.
Note that in order to use Vault with Nomad, you will need to configure and
install Vault separately from Nomad. Nomad does not run Vault for you.
## Vault Configuration
To use the Vault integration, Nomad servers must be provided a Vault token. This
token can either be a root token or a token from a role. The root token is the
easiest way to get started, but we recommend a role-based token for production
installations. Nomad servers will renew the token automatically.
### Root Token
If Nomad is given a [root
token](https://www.vaultproject.io/docs/concepts/tokens.html#root-tokens), no
further configuration is needed as Nomad can derive a token for jobs using any
Vault policies.
### Role based Token
Vault's [Token Authentication Backend][auth] supports a concept called "roles".
Roles allow policies to be grouped together and token creation to be delegated
to a trusted service such as Nomad. By creating a role, the set of policies that
tasks managed by Nomad can acess may be limited compared to giving Nomad a root
token.
When given a non-root token, Nomad queries the token to determine the role it
was generated from. It will then derive tokens for jobs based on that role.
Nomad expects the role to be created with several properties described below
when creating the role with the Vault endpoint `/auth/token/roles/<role_name>`:
```json
{
"allowed_policies": "<comma-seperated list of policies>",
"explicit_max_ttl": 0,
"name": "nomad",
"orphan": false,
"period": 259200,
"renewable": true
}
```
#### Parameters:
* `allowed_policies` - Specifies the list of allowed policies as a
comma-seperated string This list should contain all policies that jobs running
under Nomad should have access to. Further, the list must contain one or more
policies that gives Nomad the following permissions:
```hcl
# Allow creating tokens under the role
path "auth/token/create/nomad-server" {
capabilities = ["create", "update"]
}
# Allow looking up the role
path "auth/token/roles/nomad-server" {
capabilities = ["read"]
}
# Allow looking up incoming tokens to validate they have permissions to
# access the tokens they are requesting
path "auth/token/lookup/*" {
capabilities = ["read"]
}
# Allow revoking tokens that should no longer exist
path "/auth/token/revoke-accessor/*" {
capabilities = ["update"]
}
```
* `explicit_max_ttl` - Specifies the max TTL of a token. Must be set to `0` to
allow periodic tokens.
* `name` - Specifies the name of the policy. We recommend using the name
`nomad-server`. If a different name is chosen, replace the role in the above
policy.
* `orphan` - Specifies whether tokens created againsts this role will be
orphaned and have no parents. Must be set to `false`. This ensures that the
token can be revoked when the task is no longer needed or a node dies.
* `period` - Specifies the length the TTL is extended by each renewal in
seconds. It is suggested to set this value on the order of magnitude of 3 days
(259200 seconds) to avoid a large renewal request rate to Vault. Must be set
to a positive value.
* `renewable` - Specifies whether created tokens are renewable. Must be set to
`true`. This allows Nomad to renew tokens for tasks.
See Vault's [Token Authentication Backend][auth] documentation for all possible
fields and more complete documentation.
#### Example Configuration
To make getting started easy, the basic [`nomad-server`
policy](/data/vault/nomad-server-policy.hcl) and
[role](/data/vault/nomad-server-role.json) described above are available.
The below example assumes Vault is accessible, unsealed and the the operator has
appropriate permissions.
```
# Download the policy and role
$ curl https://nomadproject.io/data/vault/nomad-server-policy.hcl -O -s
$ curl https://nomadproject.io/data/vault/nomad-server-role.json -O -s
# Write the policy to Vault
$ vault policy-write nomad-server nomad-server-policy.hcl
# Edit the role to add any policies that you would like to be accessible to
# Nomad jobs in the list of allowed_policies. Do not remove `nomad-server`.
$ editor nomad-server-role.json
# Create the role with Vault
$ vault write /auth/token/roles/nomad @nomad-server-role.json
```
#### Retrieving the Role based Token
After the role is created, a token suitable for the Nomad servers may be
retrieved by issuing the following Vault command:
```
$ vault token-create -role nomad-server
Key Value
--- -----
token f02f01c2-c0d1-7cb7-6b88-8a14fada58c0
token_accessor 8cb7fcb3-9a4f-6fbf-0efc-83092bb0cb1c
token_duration 259200s
token_renewable true
token_policies [<policies>]
```
The token can then be set in the server configuration's [vault block][config],
as a command-line flag, or via an environment variable.
```
$ nomad agent -config /path/to/config -vault-token=f02f01c2-c0d1-7cb7-6b88-8a14fada58c0
```
```
$ VAULT_TOKEN=f02f01c2-c0d1-7cb7-6b88-8a14fada58c0 nomad agent -config /path/to/config
```
## Agent Configuration
To enable Vault integration, please see the [Nomad agent Vault
integration][config] configuration.
## Vault Definition Syntax
To configure a job to retrieve Vault tokens, please see the [`vault` job
specification documentation][vault-spec].
## Troubleshooting
Upon startup, Nomad will attempt to connect to the specified Vault server. Nomad
will lookup the passed token and if the token is from a role, the role will be
validated. Nomad will not shutdown if given an invalid Vault token, but will log
the reasons the token is invalid and disable Vault integration.
## Assumptions
- Vault 0.6.2 or later is needed.
- Nomad is given either a root token or a token created from an approriate role.
[auth]: https://www.vaultproject.io/docs/auth/token.html "Vault Authentication Backend"
[config]: /docs/agent/config.html#vault-options "Nomad Vault configuration block"
[template]: /docs/job-specification/template.html "Nomad template Job Specification"
[vault]: https://www.vaultproject.io/ "Vault by HashiCorp"
[vault-spec]: /docs/job-specification/vault.html "Nomad Vault Job Specification"

View File

@ -100,6 +100,10 @@
<a href="/docs/service-discovery/index.html">Service Discovery</a>
</li>
<li<%= sidebar_current("docs-vault-integration") %>>
<a href="/docs/vault-integration/index.html">Vault Integration</a>
</li>
<li<%= sidebar_current("docs-operating-a-job") %>>
<a href="/docs/operating-a-job/index.html">Operating a Job</a>
<ul class="nav">