Merge pull request #4617 from hashicorp/docs-reord

Rebased docs reorg
This commit is contained in:
Preetha 2018-08-27 07:21:31 -07:00 committed by GitHub
commit c6e3924758
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
97 changed files with 1052 additions and 811 deletions

View File

@ -40,7 +40,7 @@
/community.html /resources.html
# Docs
/docs/agent/config.html /docs/agent/configuration/index.html
/docs/agent/config.html /docs/configuration/index.html
/docs/jobops /guides/operating-a-job/index.html
/docs/jobops/ /guides/operating-a-job/index.html
/docs/jobops/index.html /guides/operating-a-job/index.html
@ -49,20 +49,20 @@
/docs/jobops/resources.html /guides/operating-a-job/resource-utilization.html
/docs/jobops/logs.html /guides/operating-a-job/accessing-logs.html
/docs/jobops/updating.html /guides/operating-a-job/update-strategies/index.html
/docs/jobops/servicediscovery.html /docs/service-discovery/index.html
/docs/jobops/servicediscovery.html /guides/operations/consul-integration/index.html
/docs/jobspec /docs/job-specification/index.html
/docs/jobspec/ /docs/job-specification/index.html
/docs/jobspec/index.html /docs/job-specification/index.html
/docs/jobspec/interpreted.html /docs/runtime/interpolation.html
/docs/jobspec/json.html /api/json-jobs.html
/docs/jobspec/environment.html /docs/runtime/environment.html
/docs/jobspec/schedulers.html /docs/runtime/schedulers.html
/docs/jobspec/schedulers.html /docs/schedulers.html
/docs/jobspec/servicediscovery.html /docs/job-specification/service.html
/docs/jobspec/networking.html /docs/job-specification/network.html
/docs/cluster/automatic.html /guides/cluster/automatic.html
/docs/cluster/manual.html /guides/cluster/manual.html
/docs/cluster/federation.html /guides/cluster/federation.html
/docs/cluster/requirements.html /guides/cluster/requirements.html
/docs/cluster/automatic.html /guides/operations/cluster/automatic.html
/docs/cluster/manual.html /guides/operations/cluster/manual.html
/docs/cluster/federation.html /guides/operations/federation.html
/docs/cluster/requirements.html /guides/operations/requirements.html
/docs/commands/operator-index.html /docs/commands/operator.html
/docs/commands/operator-raft-list-peers.html /docs/commands/operator/raft-list-peers.html
/docs/commands/operator-raft-remove-peer.html /docs/commands/operator/raft-remove-peer.html
@ -84,6 +84,7 @@
/docs/commands/server-force-leave.html /docs/commands/server/force-leave.html
/docs/commands/server-join.html /docs/commands/server/join.html
/docs/commands/server-members.html /docs/commands/server/members.html
/docs/runtime/schedulers.html /docs/schedulers.html
# Moved /docs/operating-a-job/ -> /guides/operating-a-job/
/docs/operating-a-job /guides/operating-a-job/index.html
@ -109,6 +110,42 @@
/docs/operating-a-job/update-strategies/handling-signals.html /guides/operating-a-job/update-strategies/handling-signals.html
/docs/operating-a-job/update-strategies/rolling-upgrades.html /guides/operating-a-job/update-strategies/rolling-upgrades.html
# Moved /docs/agent/configuration/ -> /docs/configuration/
/docs/agent/configuration /docs/configuration/index.html
/docs/agent/configuration/ /docs/configuration/index.html
/docs/agent/configuration/index.html /docs/configuration/index.html
/docs/agent/configuration/acl.html /docs/configuration/acl.html
/docs/agent/configuration/autopilot.html /docs/configuration/autopilot.html
/docs/agent/configuration/client.html /docs/configuration/client.html
/docs/agent/configuration/consul.html /docs/configuration/consul.html
/docs/agent/configuration/sentinel.html /docs/configuration/sentinel.html
/docs/agent/configuration/server.html /docs/configuration/server.html
/docs/agent/configuration/server_join.html /docs/configuration/server_join.html
/docs/agent/configuration/telemetry.html /docs/configuration/telemetry.html
/docs/agent/configuration/tls.html /docs/configuration/tls.html
/docs/agent/configuration/vault.html /docs/configuration/vault.html
# Moved guide-like docs to /guides
/docs/agent /guides/operations/agent/index.html
/docs/agent/ /guides/operations/agent/index.html
/docs/agent/index.html /guides/operations/agent/index.html
/docs/agent/cloud_auto_join.html /guides/operations/cluster/cloud_auto_join.html
/docs/agent/telemetry.html /guides/operations/monitoring/telemetry.html
/docs/agent/encryption.html /guides/security/encryption.html
/docs/install /guides/operations/install/index.html
/docs/install/ /guides/operations/install/index.html
/docs/install/index.html /guides/operations/install/index.html
/docs/upgrade /guides/operations/upgrade/index.html
/docs/upgrade/ /guides/operations/upgrade/index.html
/docs/upgrade/index.html /guides/operations/upgrade/index.html
/docs/upgrade/upgrade-specific.html /guides/operations/upgrade/upgrade-specific.html
/docs/service-discovery /guides/operations/consul-integration/index.html
/docs/service-discovery/ /guides/operations/consul-integration/index.html
/docs/service-discovery/index.html /guides/operations/consul-integration/index.html
/docs/vault-integration /guides/operations/vault-integration/index.html
/docs/vault-integration/ /guides/operations/vault-integration/index.html
/docs/vault-integration/index.html /guides/operations/vault-integration/index.html
# API
/docs/http/index.html /api/index.html
@ -133,3 +170,24 @@
/docs/http/status.html /api/status.html
/docs/http/operator.html /api/operator.html
/docs/http/system.html /api/system.html
# Guides
# Reorganized Guides by Persona
/guides/autopilot.html /guides/operations/autopilot.html
/guides/cluster/automatic.html /guides/operations/cluster/automatic.html
/guides/cluster/bootstrapping.html /guides/operations/cluster/bootstrapping.html
/guides/cluster/manual.html /guides/operations/cluster/manual.html
/guides/cluster/federation /guides/operations/federation
/guides/cluster/requirements.html /guides/operations/requirements.html
/guides/nomad-metrics.html /guides/operations/monitoring/nomad-metrics.html
/guides/node-draining.html /guides/operations/node-draining.html
/guides/outage.html /guides/operations/outage.html
/guides/acl.html /guides/security/acl.html
/guides/namespaces.html /guides/security/namespaces.html
/guides/quotas.html /guides/security/quotas.html
/guides/securing-nomad.html /guides/security/securing-nomad.html
/guides/sentinel-policy.html /guides/security/sentinel-policy.html
/guides/sentinel/job.html /guides/security/sentinel/job.html

View File

@ -9,7 +9,7 @@ description: |-
# ACL Policies HTTP API
The `/acl/policies` and `/acl/policy/` endpoints are used to manage ACL policies.
For more details about ACLs, please see the [ACL Guide](/guides/acl.html).
For more details about ACLs, please see the [ACL Guide](/guides/security/acl.html).
## List Policies

View File

@ -9,13 +9,13 @@ description: |-
# ACL Tokens HTTP API
The `/acl/bootstrap`, `/acl/tokens`, and `/acl/token/` endpoints are used to manage ACL tokens.
For more details about ACLs, please see the [ACL Guide](/guides/acl.html).
For more details about ACLs, please see the [ACL Guide](/guides/security/acl.html).
## Bootstrap Token
This endpoint is used to bootstrap the ACL system and provide the initial management token.
This request is always forwarded to the authoritative region. It can only be invoked once
until a [bootstrap reset](/guides/acl.html#reseting-acl-bootstrap) is performed.
until a [bootstrap reset](/guides/security/acl.html#reseting-acl-bootstrap) is performed.
| Method | Path | Produces |
| ------ | ---------------------------- | -------------------------- |

View File

@ -75,7 +75,7 @@ administration.
Several endpoints in Nomad use or require ACL tokens to operate. The token are used to authenticate the request and determine if the request is allowed based on the associated authorizations. Tokens are specified per-request by using the `X-Nomad-Token` request header set to the `SecretID` of an ACL Token.
For more details about ACLs, please see the [ACL Guide](/guides/acl.html).
For more details about ACLs, please see the [ACL Guide](/guides/security/acl.html).
## Authentication

View File

@ -197,7 +197,7 @@ The `Job` object supports the following keys:
- `Type` - Specifies the job type and switches which scheduler
is used. Nomad provides the `service`, `system` and `batch` schedulers,
and defaults to `service`. To learn more about each scheduler type visit
[here](/docs/runtime/schedulers.html)
[here](/docs/schedulers.html)
- `Update` - Specifies an update strategy to be applied to all task groups
within the job. When specified both at the job level and the task group level,
@ -366,7 +366,7 @@ The `Task` object supports the following keys:
Consul for service discovery. A `Service` object represents a routable and
discoverable service on the network. Nomad automatically registers when a task
is started and de-registers it when the task transitions to the dead state.
[Click here](/docs/service-discovery/index.html) to learn more about
[Click here](/guides/operations/consul-integration/index.html#service-discovery) to learn more about
services. Below is the fields in the `Service` object:
- `Name`: An explicit name for the Service. Nomad will replace `${JOB}`,

View File

@ -759,8 +759,8 @@ $ curl \
This endpoint toggles the drain mode of the node. When draining is enabled, no
further allocations will be assigned to this node, and existing allocations will
be migrated to new nodes. See the [Decommissioning Nodes
guide](/guides/node-draining.html) for suggested usage.
be migrated to new nodes. See the [Workload Migration
Guide](/guides/operations/node-draining.html) for suggested usage.
| Method | Path | Produces |
| ------- | ------------------------- | -------------------------- |

View File

@ -14,7 +14,7 @@ as interacting with the Raft subsystem.
~> Use this interface with extreme caution, as improper use could lead to a
Nomad outage and even loss of data.
See the [Outage Recovery](/guides/outage.html) guide for some examples of how
See the [Outage Recovery](/guides/operations/outage.html) guide for some examples of how
these capabilities are used. For a CLI to perform these operations manually,
please see the documentation for the
[`nomad operator`](/docs/commands/operator.html) command.
@ -164,7 +164,7 @@ $ curl \
```
For more information about the Autopilot configuration options, see the
[agent configuration section](/docs/agent/configuration/autopilot.html).
[agent configuration section](/docs/configuration/autopilot.html).
## Update Autopilot Configuration

View File

@ -9,9 +9,9 @@ description: |-
# Sentinel Policies HTTP API
The `/sentinel/policies` and `/sentinel/policy/` endpoints are used to manage Sentinel policies.
For more details about Sentinel policies, please see the [Sentinel Policy Guide](/guides/sentinel-policy.html).
For more details about Sentinel policies, please see the [Sentinel Policy Guide](/guides/security/sentinel-policy.html).
Sentinel endpoints are only available when ACLs are enabled. For more details about ACLs, please see the [ACL Guide](/guides/acl.html).
Sentinel endpoints are only available when ACLs are enabled. For more details about ACLs, please see the [ACL Guide](/guides/security/acl.html).
~> **Enterprise Only!** This API endpoint and functionality only exists in
Nomad Enterprise. This is not present in the open source version of Nomad.

View File

@ -1,131 +0,0 @@
---
layout: "docs"
page_title: "server_join Stanza - Agent Configuration"
sidebar_current: "docs-agent-configuration--server-join"
description: |-
The "server_join" stanza specifies how the Nomad agent will discover and connect to Nomad servers.
---
# `server_join` Stanza
<table class="table table-bordered table-striped">
<tr>
<th width="120">Placement</th>
<td>
<code>server -> **server_join**</code>
<br>
<code>client -> **server_join**</code>
</td>
</tr>
</table>
The `server_join` stanza specifies how the Nomad agent will discover and connect
to Nomad servers.
```hcl
server_join {
retry_join = [ "1.1.1.1", "2.2.2.2" ]
retry_max = 3
retry_interval = "15s"
}
```
## `server_join` Parameters
- `retry_join` `(array<string>: [])` - Specifies a list of server addresses to
join. This is similar to [`start_join`](#start_join), but will continue to
be attempted even if the initial join attempt fails, up to
[retry_max](#retry_max). Further, `retry_join` is available to
both Nomad servers and clients, while `start_join` is only defined for Nomad
servers. This is useful for cases where we know the address will become
available eventually. Use `retry_join` with an array as a replacement for
`start_join`, **do not use both options**.
Address format includes both using IP addresses as well as an interface to the
[go-discover](https://github.com/hashicorp/go-discover) library for doing
automated cluster joining using cloud metadata. See [Cloud
Auto-join][cloud_auto_join] for more information.
```
server_join {
retry_join = [ "1.1.1.1", "2.2.2.2" ]
}
```
Using the `go-discover` interface, this can be defined both in a client or
server configuration as well as provided as a command-line argument.
```
server_join {
retry_join = [ "provider=aws tag_key=..." ]
}
```
See the [server address format](#server-address-format) for more information
about expected server address formats.
- `retry_interval` `(string: "30s")` - Specifies the time to wait between retry
join attempts.
- `retry_max` `(int: 0)` - Specifies the maximum number of join attempts to be
made before exiting with a return code of 1. By default, this is set to 0
which is interpreted as infinite retries.
- `start_join` `(array<string>: [])` - Specifies a list of server addresses to
join on startup. If Nomad is unable to join with any of the specified
addresses, agent startup will fail. See the
[server address format](#server-address-format) section for more information
on the format of the string. This field is defined only for Nomad servers and
will result in a configuration parse error if included in a client
configuration.
## Server Address Format
This section describes the acceptable syntax and format for describing the
location of a Nomad server. There are many ways to reference a Nomad server,
including directly by IP address and resolving through DNS.
### Directly via IP Address
It is possible to address another Nomad server using its IP address. This is
done in the `ip:port` format, such as:
```
1.2.3.4:5678
```
If the port option is omitted, it defaults to the Serf port, which is 4648
unless configured otherwise:
```
1.2.3.4 => 1.2.3.4:4648
```
### Via Domains or DNS
It is possible to address another Nomad server using its DNS address. This is
done in the `address:port` format, such as:
```
nomad-01.company.local:5678
```
If the port option is omitted, it defaults to the Serf port, which is 4648
unless configured otherwise:
```
nomad-01.company.local => nomad-01.company.local:4648
```
### Via the go-discover interface
As of Nomad 0.8.4, `retry_join` accepts a unified interface using the
[go-discover](https://github.com/hashicorp/go-discover) library for doing
automated cluster joining using cloud metadata. See [Cloud
Auto-join][cloud_auto_join] for more information.
```
"provider=aws tag_key=..." => 1.2.3.4:4648
```
[cloud_auto_join]: /docs/agent/cloud_auto_join.html "Nomad Cloud Auto-join"

View File

@ -14,7 +14,8 @@ or server functionality, including exposing interfaces for client consumption
and running jobs.
Due to the power and flexibility of this command, the Nomad agent is documented
in its own section. See the [Nomad Agent](/docs/agent/index.html) section for
in its own section. See the [Nomad Agent](/guides/operations/agent/index.html)
guide and the [Configuration](/docs/configuration/index.html) documentation section for
more information on how to use this command and the options it has.
## Command-line Options
@ -24,39 +25,38 @@ via CLI arguments. The `agent` command accepts the following arguments:
* `-alloc-dir=<path>`: Equivalent to the Client [alloc_dir](#alloc_dir) config
option.
* `-acl-enabled`: Equivalent to the ACL [enabled](/docs/agent/configuration/acl.html#enabled) config option.
* `-acl-replication-token`: Equivalent to the ACL [replication_token](/docs/agent/configuration/acl.html#replication_token) config option.
* `-acl-enabled`: Equivalent to the ACL [enabled](/docs/configuration/acl.html#enabled) config option.
* `-acl-replication-token`: Equivalent to the ACL [replication_token](/docs/configuration/acl.html#replication_token) config option.
* `-bind=<address>`: Equivalent to the [bind_addr](#bind_addr) config option.
* `-bootstrap-expect=<num>`: Equivalent to the
[bootstrap_expect](#bootstrap_expect) config option.
* `-client`: Enable client mode on the local agent.
* `-config=<path>`: Specifies the path to a configuration file or a directory of
configuration files to load. Can be specified multiple times.
* `-consul-address=<addr>`: Equivalent to the [address](/docs/agent/configuration/consul.html#address) config option.
* `-consul-auth=<auth>`: Equivalent to the [auth](/docs/agent/configuration/consul.html#auth) config option.
* `-consul-auto-advertise`: Equivalent to the [auto_advertise](/docs/agent/configuration/consul.html#auto_advertise) config option.
* `-consul-ca-file=<path>`: Equivalent to the [ca_file](/docs/agent/configuration/consul.html#ca_file) config option.
* `-consul-cert-file=<path>`: Equivalent to the [cert_file](/docs/agent/configuration/consul.html#cert_file) config option.
* `-consul-checks-use-advertise`: Equivalent to the [checks_use_advertise](/docs/agent/configuration/consul.html#checks_use_advertise) config option.
* `-consul-client-auto-join`: Equivalent to the [client_auto_join](/docs/agent/configuration/consul.html#client_auto_join) config option.
* `-consul-client-service-name=<name>`: Equivalent to the [client_service_name](/docs/agent/configuration/consul.html#client_service_name) config option.
* `-consul-client-http-check-name=<name>`: Equivalent to the [client_http_check_name](/docs/agent/configuration/consul.html#client_http_check_name) config option.
* `-consul-key-file=<path>`: Equivalent to the [key_file](/docs/agent/configuration/consul.html#key_file) config option.
* `-consul-server-service-name=<name>`: Equivalent to the [server_service_name](/docs/agent/configuration/consul.html#server_service_name) config option.
* `-consul-server-http-check-name=<name>`: Equivalent to the [server_http_check_name](/docs/agent/configuration/consul.html#server_http_check_name) config option.
* `-consul-server-serf-check-name=<name>`: Equivalent to the [server_serf_check_name](/docs/agent/configuration/consul.html#server_serf_check_name) config option.
* `-consul-server-rpc-check-name=<name>`: Equivalent to the [server_rpc_check_name](/docs/agent/configuration/consul.html#server_rpc_check_name) config option.
* `-consul-server-auto-join`: Equivalent to the [server_auto_join](/docs/agent/configuration/consul.html#server_auto_join) config option.
* `-consul-ssl`: Equivalent to the [ssl](/docs/agent/configuration/consul.html#ssl) config option.
* `-consul-token=<token>`: Equivalent to the [token](/docs/agent/configuration/consul.html#token) config option.
* `-consul-verify-ssl`: Equivalent to the [verify_ssl](/docs/agent/configuration/consul.html#verify_ssl) config option.
* `-consul-address=<addr>`: Equivalent to the [address](/docs/configuration/consul.html#address) config option.
* `-consul-auth=<auth>`: Equivalent to the [auth](/docs/configuration/consul.html#auth) config option.
* `-consul-auto-advertise`: Equivalent to the [auto_advertise](/docs/configuration/consul.html#auto_advertise) config option.
* `-consul-ca-file=<path>`: Equivalent to the [ca_file](/docs/configuration/consul.html#ca_file) config option.
* `-consul-cert-file=<path>`: Equivalent to the [cert_file](/docs/configuration/consul.html#cert_file) config option.
* `-consul-checks-use-advertise`: Equivalent to the [checks_use_advertise](/docs/configuration/consul.html#checks_use_advertise) config option.
* `-consul-client-auto-join`: Equivalent to the [client_auto_join](/docs/configuration/consul.html#client_auto_join) config option.
* `-consul-client-service-name=<name>`: Equivalent to the [client_service_name](/docs/configuration/consul.html#client_service_name) config option.
* `-consul-client-http-check-name=<name>`: Equivalent to the [client_http_check_name](/docs/configuration/consul.html#client_http_check_name) config option.
* `-consul-key-file=<path>`: Equivalent to the [key_file](/docs/configuration/consul.html#key_file) config option.
* `-consul-server-service-name=<name>`: Equivalent to the [server_service_name](/docs/configuration/consul.html#server_service_name) config option.
* `-consul-server-http-check-name=<name>`: Equivalent to the [server_http_check_name](/docs/configuration/consul.html#server_http_check_name) config option.
* `-consul-server-serf-check-name=<name>`: Equivalent to the [server_serf_check_name](/docs/configuration/consul.html#server_serf_check_name) config option.
* `-consul-server-rpc-check-name=<name>`: Equivalent to the [server_rpc_check_name](/docs/configuration/consul.html#server_rpc_check_name) config option.
* `-consul-server-auto-join`: Equivalent to the [server_auto_join](/docs/configuration/consul.html#server_auto_join) config option.
* `-consul-ssl`: Equivalent to the [ssl](/docs/configuration/consul.html#ssl) config option.
* `-consul-token=<token>`: Equivalent to the [token](/docs/configuration/consul.html#token) config option.
* `-consul-verify-ssl`: Equivalent to the [verify_ssl](/docs/configuration/consul.html#verify_ssl) config option.
* `-data-dir=<path>`: Equivalent to the [data_dir](#data_dir) config option.
* `-dc=<datacenter>`: Equivalent to the [datacenter](#datacenter) config option.
* `-dev`: Start the agent in development mode. This enables a pre-configured
dual-role agent (client + server) which is useful for developing or testing
Nomad. No other configuration is required to start the agent in this mode.
* `-encrypt`: Set the Serf encryption key. See [Agent
Encryption](/docs/agent/encryption.html) for more details.
* `-encrypt`: Set the Serf encryption key. See the [Encryption Overview](/guides/security/encryption.html) for more details.
* `-join=<address>`: Address of another agent to join upon starting up. This can
be specified multiple times to specify multiple agents to join.
* `-log-level=<level>`: Equivalent to the [log_level](#log_level) config option.

View File

@ -30,7 +30,7 @@ description below for specific usage information and requirements.
* `-servers`: List the client's known servers. Client nodes do not participate
in the gossip pool, and instead register with these servers periodically over
the network. The initial value of this list may come from configuration files
using the [`servers`](/docs/agent/configuration/client.html#servers)
using the [`servers`](/docs/configuration/client.html#servers)
configuration option in the client block.
* `-update-servers`: Updates the client's server list using the provided

View File

@ -28,7 +28,7 @@ placed on another node about to be drained.
The [node status](/docs/commands/node/status.html) command compliments this
nicely by providing the current drain status of a given node.
See the [Decommissioning Nodes guide](/guides/node-draining.html) for detailed
See the [Workload Migration guide](/guides/operations/node-draining.html) for detailed
examples of node draining.
## Usage

View File

@ -14,9 +14,9 @@ as interacting with the Raft subsystem. This was added in Nomad 0.5.5.
~> Use this command with extreme caution, as improper use could lead to a Nomad
outage and even loss of data.
See the [Outage Recovery](/guides/outage.html) guide for some examples of how
See the [Outage Recovery](/guides/operations/outage.html) guide for some examples of how
this command is used. For an API to perform these operations programmatically,
please see the documentation for the [Operator](/guides/outage.html)
please see the documentation for the [Operator](/api/operator.html)
endpoint.
## Usage

View File

@ -9,7 +9,7 @@ description: >
# Command: operator autopilot get-config
The Autopilot operator command is used to view the current Autopilot configuration. See the
[Autopilot Guide](/guides/autopilot.html) for more information about Autopilot.
[Autopilot Guide](/guides/operations/autopilot.html) for more information about Autopilot.
## Usage

View File

@ -9,7 +9,7 @@ description: >
# Command: operator autopilot set-config
The Autopilot operator command is used to set the current Autopilot configuration. See the
[Autopilot Guide](/guides/autopilot.html) for more information about Autopilot.
[Autopilot Guide](/guides/operations/autopilot.html) for more information about Autopilot.
## Usage
@ -41,11 +41,11 @@ running Raft protocol version 3 or higher. Must be a duration value such as `10s
new servers until it can perform a migration. Must be one of `[true|false]`.
* `-redundancy-zone-tag`- (Enterprise-only) Controls the
[`redundancy_zone`](/docs/agent/configuration/server.html#redundancy_zone)
[`redundancy_zone`](/docs/configuration/server.html#redundancy_zone)
used for separating servers into different redundancy zones.
* `-upgrade-version-tag` - (Enterprise-only) Controls the
[`upgrade_version`](/docs/agent/configuration/server.html#upgrade_version) to
[`upgrade_version`](/docs/configuration/server.html#upgrade_version) to
use for version info when performing upgrade migrations. If left blank, the
Nomad version will be used.

View File

@ -11,7 +11,7 @@ description: >
The Raft list-peers command is used to display the current Raft peer
configuration.
See the [Outage Recovery](/guides/outage.html) guide for some examples of how
See the [Outage Recovery](/guides/operations/outage.html) guide for some examples of how
this command is used. For an API to perform these operations programmatically,
please see the documentation for the [Operator](/api/operator.html)
endpoint.

View File

@ -19,7 +19,7 @@ to clean up by simply running [`nomad
server force-leave`](/docs/commands/server/force-leave.html) instead of this
command.
See the [Outage Recovery](/guides/outage.html) guide for some examples of how
See the [Outage Recovery](/guides/operations/outage.html) guide for some examples of how
this command is used. For an API to perform these operations programmatically,
please see the documentation for the [Operator](/api/operator.html)
endpoint.

View File

@ -1,7 +1,7 @@
---
layout: "docs"
page_title: "acl Stanza - Agent Configuration"
sidebar_current: "docs-agent-configuration-acl"
sidebar_current: "docs-configuration-acl"
description: |-
The "acl" stanza configures the Nomad agent to enable ACLs and tune various parameters.
---

View File

@ -1,7 +1,7 @@
---
layout: "docs"
page_title: "autopilot Stanza - Agent Configuration"
sidebar_current: "docs-agent-configuration-autopilot"
sidebar_current: "docs-configuration-autopilot"
description: |-
The "autopilot" stanza configures the Nomad agent to configure Autopilot behavior.
---
@ -18,7 +18,7 @@ description: |-
</table>
The `autopilot` stanza configures the Nomad agent to configure Autopilot behavior.
For more information about Autopilot, see the [Autopilot Guide](/guides/autopilot.html).
For more information about Autopilot, see the [Autopilot Guide](/guides/operations/autopilot.html).
```hcl
autopilot {
@ -51,7 +51,7 @@ autopilot {
- `enable_redundancy_zones` `(bool: false)` - (Enterprise-only) Controls whether
Autopilot separates servers into zones for redundancy, in conjunction with the
[redundancy_zone](/docs/agent/configuration/server.html#redundancy_zone) parameter.
[redundancy_zone](/docs/configuration/server.html#redundancy_zone) parameter.
Only one server in each zone can be a voting member at one time.
- `disable_upgrade_migration` `(bool: false)` - (Enterprise-only) Disables Autopilot's
@ -61,5 +61,5 @@ autopilot {
- `enable_custom_upgrades` `(bool: false)` - (Enterprise-only) Specifies whether to
enable using custom upgrade versions when performing migrations, in conjunction with
the [upgrade_version](/docs/agent/configuration/server.html#upgrade_version) parameter.
the [upgrade_version](/docs/configuration/server.html#upgrade_version) parameter.

View File

@ -1,7 +1,7 @@
---
layout: "docs"
page_title: "client Stanza - Agent Configuration"
sidebar_current: "docs-agent-configuration-client"
sidebar_current: "docs-configuration-client"
description: |-
The "client" stanza configures the Nomad agent to accept jobs as assigned by
the Nomad server, join the cluster, and specify driver-specific configuration.
@ -32,7 +32,7 @@ client {
- `alloc_dir` `(string: "[data_dir]/alloc")` - Specifies the directory to use
for allocation data. By default, this is the top-level
[data_dir](/docs/agent/configuration/index.html#data_dir) suffixed with
[data_dir](/docs/configuration/index.html#data_dir) suffixed with
"alloc", like `"/opt/nomad/alloc"`. This must be an absolute path
- `chroot_env` <code>([ChrootEnv](#chroot_env-parameters): nil)</code> -
@ -98,7 +98,7 @@ client {
- `state_dir` `(string: "[data_dir]/client")` - Specifies the directory to use
to store client state. By default, this is - the top-level
[data_dir](/docs/agent/configuration/index.html#data_dir) suffixed with
[data_dir](/docs/configuration/index.html#data_dir) suffixed with
"client", like `"/opt/nomad/client"`. This must be an absolute path.
- `gc_interval` `(string: "1m")` - Specifies the interval at which Nomad
@ -356,4 +356,4 @@ client {
}
}
```
[server-join]: /docs/agent/configuration/server_join.html "Server Join"
[server-join]: /docs/configuration/server_join.html "Server Join"

View File

@ -1,7 +1,7 @@
---
layout: "docs"
page_title: "consul Stanza - Agent Configuration"
sidebar_current: "docs-agent-configuration-consul"
sidebar_current: "docs-configuration-consul"
description: |-
The "consul" stanza configures the Nomad agent's communication with
Consul for service discovery and key-value integration. When
@ -168,4 +168,4 @@ consul {
```
[consul]: https://www.consul.io/ "Consul by HashiCorp"
[bootstrap]: /guides/cluster/automatic.html "Automatic Bootstrapping"
[bootstrap]: /guides/operations/cluster/automatic.html "Automatic Bootstrapping"

View File

@ -1,12 +1,12 @@
---
layout: "docs"
page_title: "Agent Configuration"
sidebar_current: "docs-agent-configuration"
sidebar_current: "docs-configuration"
description: |-
Learn about the configuration options available for the Nomad agent.
---
# Agent Configuration
# Nomad Configuration
Nomad agents have a variety of parameters that can be specified via
configuration files or command-line flags. Configuration files are written in
@ -236,10 +236,10 @@ http_api_response_headers {
[hcl]: https://github.com/hashicorp/hcl "HashiCorp Configuration Language"
[go-sockaddr/template]: https://godoc.org/github.com/hashicorp/go-sockaddr/template
[consul]: /docs/agent/configuration/consul.html "Nomad Agent consul Configuration"
[vault]: /docs/agent/configuration/vault.html "Nomad Agent vault Configuration"
[tls]: /docs/agent/configuration/tls.html "Nomad Agent tls Configuration"
[client]: /docs/agent/configuration/client.html "Nomad Agent client Configuration"
[sentinel]: /docs/agent/configuration/sentinel.html "Nomad Agent sentinel Configuration"
[server]: /docs/agent/configuration/server.html "Nomad Agent server Configuration"
[acl]: /docs/agent/configuration/acl.html "Nomad Agent ACL Configuration"
[consul]: /docs/configuration/consul.html "Nomad Agent consul Configuration"
[vault]: /docs/configuration/vault.html "Nomad Agent vault Configuration"
[tls]: /docs/configuration/tls.html "Nomad Agent tls Configuration"
[client]: /docs/configuration/client.html "Nomad Agent client Configuration"
[sentinel]: /docs/configuration/sentinel.html "Nomad Agent sentinel Configuration"
[server]: /docs/configuration/server.html "Nomad Agent server Configuration"
[acl]: /docs/configuration/acl.html "Nomad Agent ACL Configuration"

View File

@ -1,7 +1,7 @@
---
layout: "docs"
page_title: "sentinel Stanza - Agent Configuration"
sidebar_current: "docs-agent-configuration-sentinel"
sidebar_current: "docs-configuration-sentinel"
description: |-
The "sentinel" stanza configures the Nomad agent for Sentinel policies and tune various parameters.
---

View File

@ -1,7 +1,7 @@
---
layout: "docs"
page_title: "server Stanza - Agent Configuration"
sidebar_current: "docs-agent-configuration-server"
sidebar_current: "docs-configuration-server"
description: |-
The "server" stanza configures the Nomad agent to operate in server mode to
participate in scheduling decisions, register with service discovery, handle
@ -51,7 +51,7 @@ server {
- `data_dir` `(string: "[data_dir]/server")` - Specifies the directory to use -
for server-specific data, including the replicated log. By default, this is -
the top-level [data_dir](/docs/agent/configuration/index.html#data_dir)
the top-level [data_dir](/docs/configuration/index.html#data_dir)
suffixed with "server", like `"/opt/nomad/server"`. This must be an absolute
path.
@ -70,7 +70,7 @@ server {
provided once on each agent's initial startup sequence. If it is provided
after Nomad has been initialized with an encryption key, then the provided key
is ignored and a warning will be displayed. See the
[Nomad encryption documentation][encryption] for more details on this option
[encryption documentation][encryption] for more details on this option
and its impact on the cluster.
- `node_gc_threshold` `(string: "24h")` - Specifies how long a node must be in a
@ -127,7 +127,7 @@ server {
- `redundancy_zone` `(string: "")` - (Enterprise-only) Specifies the redundancy
zone that this server will be a part of for Autopilot management. For more
information, see the [Autopilot Guide](/guides/autopilot.html).
information, see the [Autopilot Guide](/guides/operations/autopilot.html).
- `rejoin_after_leave` `(bool: false)` - Specifies if Nomad will ignore a
previous leave and attempt to rejoin the cluster when starting. By default,
@ -142,7 +142,7 @@ server {
- `upgrade_version` `(string: "")` - A custom version of the format X.Y.Z to use
in place of the Nomad version when custom upgrades are enabled in Autopilot.
For more information, see the [Autopilot Guide](/guides/autopilot.html).
For more information, see the [Autopilot Guide](/guides/operations/autopilot.html).
### Deprecated Parameters
@ -169,7 +169,7 @@ server {
- `start_join` `(array<string>: [])` - Specifies a list of server addresses to
join on startup. If Nomad is unable to join with any of the specified
addresses, agent startup will fail. See the [server address
format](/docs/agent/configuration/server_join.html#server-address-format)
format](/docs/configuration/server_join.html#server-address-format)
section for more information on the format of the string. This field is
deprecated in favor of the [server_join stanza][server-join].
@ -203,7 +203,7 @@ server {
The Nomad servers can automatically bootstrap if Consul is configured. For a
more detailed explanation, please see the
[automatic Nomad bootstrapping documentation](/guides/cluster/automatic.html).
[automatic Nomad bootstrapping documentation](/guides/operations/cluster/automatic.html).
### Restricting Schedulers
@ -218,5 +218,5 @@ server {
}
```
[encryption]: /docs/agent/encryption.html "Nomad Agent Encryption"
[server-join]: /docs/agent/configuration/server_join.html "Server Join"
[encryption]: /guides/security/encryption.html "Nomad Encryption Overview"
[server-join]: /docs/configuration/server_join.html "Server Join"

View File

@ -1,42 +1,138 @@
---
layout: "docs"
page_title: "Cloud Auto-join"
sidebar_current: "docs-agent-cloud-auto-join"
page_title: "server_join Stanza - Agent Configuration"
sidebar_current: "docs-configuration--server-join"
description: |-
Nomad supports automatic cluster joining using cloud metadata from various cloud providers
The "server_join" stanza specifies how the Nomad agent will discover and connect to Nomad servers.
---
# Cloud Auto-joining
# `server_join` Stanza
As of Nomad 0.8.4,
[`retry_join`](/docs/agent/configuration/server_join.html#retry_join) accepts a
unified interface using the
[go-discover](https://github.com/hashicorp/go-discover) library for doing
automatic cluster joining using cloud metadata. To use retry-join with a
supported cloud provider, specify the configuration on the command line or
configuration file as a `key=value key=value ...` string.
<table class="table table-bordered table-striped">
<tr>
<th width="120">Placement</th>
<td>
<code>server -> **server_join**</code>
<br>
<code>client -> **server_join**</code>
</td>
</tr>
</table>
Values are taken literally and must not be URL
encoded. If the values contain spaces, backslashes or double quotes then
they need to be double quoted and the usual escaping rules apply.
The `server_join` stanza specifies how the Nomad agent will discover and connect
to Nomad servers.
```json
{
"retry_join": ["provider=my-cloud config=val config2=\"some other val\" ..."]
```hcl
server_join {
retry_join = [ "1.1.1.1", "2.2.2.2" ]
retry_max = 3
retry_interval = "15s"
}
```
The cloud provider-specific configurations are detailed below. This can be
combined with static IP or DNS addresses or even multiple configurations
for different providers.
## `server_join` Parameters
In order to use discovery behind a proxy, you will need to set
`HTTP_PROXY`, `HTTPS_PROXY` and `NO_PROXY` environment variables per
[Golang `net/http` library](https://golang.org/pkg/net/http/#ProxyFromEnvironment).
- `retry_join` `(array<string>: [])` - Specifies a list of server addresses to
join. This is similar to [`start_join`](#start_join), but will continue to
be attempted even if the initial join attempt fails, up to
[retry_max](#retry_max). Further, `retry_join` is available to
both Nomad servers and clients, while `start_join` is only defined for Nomad
servers. This is useful for cases where we know the address will become
available eventually. Use `retry_join` with an array as a replacement for
`start_join`, **do not use both options**.
The following sections give the options specific to a subset of supported cloud
provider. For information on all providers, see further documentation in
[go-discover](https://github.com/hashicorp/go-discover).
Address format includes both using IP addresses as well as an interface to the
[go-discover](https://github.com/hashicorp/go-discover) library for doing
automated cluster joining using cloud metadata. See the [Cloud Auto-join](#cloud-auto-join)
section below for more information.
```
server_join {
retry_join = [ "1.1.1.1", "2.2.2.2" ]
}
```
Using the `go-discover` interface, this can be defined both in a client or
server configuration as well as provided as a command-line argument.
```
server_join {
retry_join = [ "provider=aws tag_key=..." ]
}
```
See the [server address format](#server-address-format) for more information
about expected server address formats.
- `retry_interval` `(string: "30s")` - Specifies the time to wait between retry
join attempts.
- `retry_max` `(int: 0)` - Specifies the maximum number of join attempts to be
made before exiting with a return code of 1. By default, this is set to 0
which is interpreted as infinite retries.
- `start_join` `(array<string>: [])` - Specifies a list of server addresses to
join on startup. If Nomad is unable to join with any of the specified
addresses, agent startup will fail. See the
[server address format](#server-address-format) section for more information
on the format of the string. This field is defined only for Nomad servers and
will result in a configuration parse error if included in a client
configuration.
## Server Address Format
This section describes the acceptable syntax and format for describing the
location of a Nomad server. There are many ways to reference a Nomad server,
including directly by IP address and resolving through DNS.
### Directly via IP Address
It is possible to address another Nomad server using its IP address. This is
done in the `ip:port` format, such as:
```
1.2.3.4:5678
```
If the port option is omitted, it defaults to the Serf port, which is 4648
unless configured otherwise:
```
1.2.3.4 => 1.2.3.4:4648
```
### Via Domains or DNS
It is possible to address another Nomad server using its DNS address. This is
done in the `address:port` format, such as:
```
nomad-01.company.local:5678
```
If the port option is omitted, it defaults to the Serf port, which is 4648
unless configured otherwise:
```
nomad-01.company.local => nomad-01.company.local:4648
```
### Via the go-discover interface
As of Nomad 0.8.4, `retry_join` accepts a unified interface using the
[go-discover](https://github.com/hashicorp/go-discover) library for doing
automated cluster joining using cloud metadata. See [Cloud
Auto-join][cloud_auto_join] for more information.
```
"provider=aws tag_key=..." => 1.2.3.4:4648
```
## Cloud Auto-join
The following sections describe the Cloud Auto-join `retry_join` options that are specific
to a subset of supported cloud providers. For information on all providers, see further
documentation in [go-discover](https://github.com/hashicorp/go-discover).
### Amazon EC2
@ -133,4 +229,3 @@ Discovery requires a [GCE Service
Account](https://cloud.google.com/compute/docs/access/service-accounts).
Credentials are searched using the following paths, in order of precedence.

View File

@ -1,7 +1,7 @@
---
layout: "docs"
page_title: "telemetry Stanza - Agent Configuration"
sidebar_current: "docs-agent-configuration-telemetry"
sidebar_current: "docs-configuration-telemetry"
description: |-
The "telemetry" stanza configures Nomad's publication of metrics and telemetry
to third-party systems.
@ -31,7 +31,7 @@ telemetry {
This section of the documentation only covers the configuration options for
`telemetry` stanza. To understand the architecture and metrics themselves,
please see the [Nomad telemetry documentation](/docs/agent/telemetry.html).
please see the [Telemetry guide](/guides/operations/monitoring/telemetry.html).
## `telemetry` Parameters

View File

@ -1,7 +1,7 @@
---
layout: "docs"
page_title: "tls Stanza - Agent Configuration"
sidebar_current: "docs-agent-configuration-tls"
sidebar_current: "docs-configuration-tls"
description: |-
The "tls" stanza configures Nomad's TLS communication via HTTP and RPC to
enforce secure cluster communication between servers, clients, and between.
@ -33,7 +33,7 @@ start the Nomad agent.
This section of the documentation only covers the configuration options for
`tls` stanza. To understand how to setup the certificates themselves, please see
the [Agent's Gossip and RPC Encryption](/docs/agent/encryption.html).
the [Encryption Overview Guide](/guides/security/encryption.html).
## `tls` Parameters

View File

@ -1,7 +1,7 @@
---
layout: "docs"
page_title: "vault Stanza - Agent Configuration"
sidebar_current: "docs-agent-configuration-vault"
sidebar_current: "docs-configuration-vault"
description: |-
The "vault" stanza configures Nomad's integration with HashiCorp's Vault.
When configured, Nomad can create and distribute Vault tokens to tasks
@ -86,8 +86,8 @@ vault {
- `token` `(string: "")` - Specifies the parent Vault token to use to derive child tokens for jobs
requesting tokens.
Visit the [Vault Integration](/docs/vault-integration/index.html)
documentation to see how to generate an appropriate token in Vault.
Visit the [Vault Integration Guide](/guides/operations/vault-integration/index.html)
to see how to generate an appropriate token in Vault.
!> It is **strongly discouraged** to place the token as a configuration
parameter like this, since the token could be checked into source control
@ -150,4 +150,4 @@ token needs to be given to the servers without having to restart them. A reload
can be accomplished by sending the process a `SIGHUP` signal.
[vault]: https://www.vaultproject.io/ "Vault by HashiCorp"
[nomad-vault]: /docs/vault-integration/index.html "Nomad Vault Integration"
[nomad-vault]: /guides/operations/vault-integration/index.html "Nomad Vault Integration"

View File

@ -583,7 +583,7 @@ of the Linux Kernel and Docker daemon.
## Client Configuration
The `docker` driver has the following [client configuration
options](/docs/agent/configuration/client.html#options):
options](/docs/configuration/client.html#options):
* `docker.endpoint` - If using a non-standard socket, HTTP or another location,
or if TLS is being used, `docker.endpoint` must be set. If unset, Nomad will

View File

@ -131,4 +131,4 @@ the client manages garbage collection locally which mitigates any issue this may
create.
This list is configurable through the agent client
[configuration file](/docs/agent/configuration/client.html#chroot_env).
[configuration file](/docs/configuration/client.html#chroot_env).

View File

@ -109,7 +109,7 @@ The `lxc` driver requires the following:
## Client Configuration
* `lxc.enable` - The `lxc` driver may be disabled on hosts by setting this
[client configuration][/docs/agent/configuration/client.html##options-parameters]
[client configuration][/docs/configuration/client.html##options-parameters]
option to `false` (defaults to `true`).
## Client Attributes

View File

@ -58,9 +58,9 @@ The `qemu` driver supports the following configuration in the job spec:
the monitor socket path is limited to 108 characters. Graceful shutdown will
be disabled if qemu is < 2.10.1 and the generated monitor path exceeds this
length. You may encounter this issue if you set long
[data_dir](https://www.nomadproject.io/docs/agent/configuration/index.html#data_dir)
[data_dir](/docs/configuration/index.html#data_dir)
or
[alloc_dir](https://www.nomadproject.io/docs/agent/configuration/client.html#alloc_dir)
[alloc_dir](/docs/configuration/client.html#alloc_dir)
paths.) This feature is currently not supported on Windows.
* `port_map` - (Optional) A key-value map of port labels.

View File

@ -79,7 +79,7 @@ task "example" {
The `raw_exec` driver can run on all supported operating systems. For security
reasons, it is disabled by default. To enable raw exec, the Nomad client
configuration must explicitly enable the `raw_exec` driver in the client's
[options](/docs/agent/configuration/client.html#options):
[options](/docs/configuration/client.html#options):
```
client {

View File

@ -167,7 +167,7 @@ over HTTP.
## Client Configuration
The `rkt` driver has the following [client configuration
options](/docs/agent/configuration/client.html#options):
options](/docs/configuration/client.html#options):
* `rkt.volumes.enabled`: Defaults to `true`. Allows tasks to bind host paths
(`volumes`) inside their container. Binding relative paths is always allowed

View File

@ -10,7 +10,7 @@ description: |-
# Nomad Enterprise Advanced Autopilot
Nomad Enterprise supports Advanced Autopilot capabilities which enable fully
[Nomad Enterprise](https://www.hashicorp.com/go/nomad-enterprise) supports Advanced Autopilot capabilities which enable fully
automated server upgrades, higher throughput for reads and scheduling, and hot
server failover on a per availability zone basis. See the sections below for
additional details on each of these capabilities.
@ -38,5 +38,8 @@ completely lost, only one voter will be lost, so the cluster remains available.
If a voter is lost in an availability zone, Autopilot will promote the non-voter
to voter automatically, putting the hot standby server into service quickly.
See the [Nomad Autopilot Guide](/guides/autopilot.html)
See the [Nomad Autopilot Guide](/guides/operations/autopilot.html)
for a comprehensive overview of Nomad's open source and enterprise Autopilot features.
Click [here](https://www.hashicorp.com/go/nomad-enterprise) to set up a demo or
request a trial of Nomad Enterprise.

View File

@ -9,7 +9,7 @@ description: |-
# Nomad Enterprise
[Nomad Enterprise](https://www.hashicorp.com/products/nomad/) adds collaboration,
[Nomad Enterprise](https://www.hashicorp.com/go/nomad-enterprise) adds collaboration,
operational, and governance capabilities to Nomad. Namespaces allow multiple
teams to safely use a shared multi-region deployment. With Resource Quotas,
operators can limit resource consumption across teams or projects. Sentinel
@ -23,4 +23,7 @@ links below for a detailed overview of each feature.
- [Sentinel Policies](/docs/enterprise/sentinel/index.html)
- [Advanced Autopilot](/docs/enterprise/autopilot/index.html)
These features are part of [Nomad Enterprise](https://www.hashicorp.com/products/nomad/).
Click [here](https://www.hashicorp.com/go/nomad-enterprise) to set up a demo or request a trial
of Nomad Enterprise.

View File

@ -10,15 +10,17 @@ description: |-
# Nomad Enterprise Namespaces
In [Nomad Enterprise](https://www.hashicorp.com/products/nomad/), a shared
cluster can be partitioned into [namespaces](/guides/namespaces.html) which allows
In [Nomad Enterprise](https://www.hashicorp.com/go/nomad-enterprise), a shared
cluster can be partitioned into [namespaces](/guides/security/namespaces.html) which allows
jobs and their associated objects to be isolated from each other and other users
of the cluster.
Namespaces enhance the usability of a shared cluster by isolating teams from the
jobs of others, provide fine grain access control to jobs when coupled with
[ACLs](/guides/acl.html), and can prevent bad actors from negatively impacting
the whole cluster when used in conjunction with
[resource quotas](/docs/enterprise/quotas/index.html).
[ACLs](/guides/security/acl.html), and can prevent bad actors from negatively impacting
the whole cluster when used in conjunction with
[resource quotas](/guides/security/quotas.html). See the
[Namespaces Guide](/guides/security/namespaces.html) for a thorough overview.
See the [Namespaces Guide](/guides/namespaces.html) for a thorough overview.
Click [here](https://www.hashicorp.com/go/nomad-enterprise) to set up a demo or
request a trial of Nomad Enterprise.

View File

@ -10,12 +10,14 @@ description: |-
# Nomad Enterprise Resource Quotas
In [Nomad Enterprise](https://www.hashicorp.com/products/nomad/), operators can
define [quota specifications](/guides/quotas.html) and apply them to namespaces.
In [Nomad Enterprise](https://www.hashicorp.com/go/nomad-enterprise), operators can
define [quota specifications](/guides/security/quotas.html) and apply them to namespaces.
When a quota is attached to a namespace, the jobs within the namespace may not
consume more resources than the quota specification allows.
This allows operators to partition a shared cluster and ensure that no single
actor can consume the whole resources of the cluster.
actor can consume the whole resources of the cluster. See the
[Resource Quotas Guide](/guides/security/quotas.html) for more details.
See the [Resource Quotas Guide](/guides/quotas.html) for more details.
Click [here](https://www.hashicorp.com/go/nomad-enterprise) to set up a demo or
request a trial of Nomad Enterprise.

View File

@ -8,8 +8,8 @@ description: |-
# Nomad Enterprise Sentinel Policy Enforcement
In [Nomad Enterprise](https://www.hashicorp.com/products/nomad/), operators can
create [Sentinel policies](/guides/sentinel-policy.html) for fine-grained policy
In [Nomad Enterprise](https://www.hashicorp.com/go/nomad-enterprise), operators can
create [Sentinel policies](/guides/security/sentinel-policy.html) for fine-grained policy
enforcement. Sentinel policies build on top of the ACL system and allow operators to define
policies such as disallowing jobs to be submitted to production on
Fridays. These extremely rich policies are defined as code. For example, to
@ -30,4 +30,7 @@ all_drivers_docker = rule {
}
```
See the [Sentinel Policies Guide](/guides/sentinel-policy.html) for additional details and examples.
See the [Sentinel Policies Guide](/guides/security/sentinel-policy.html) for additional details and examples.
Click [here](https://www.hashicorp.com/go/nomad-enterprise) to set up a demo or
request a trial of Nomad Enterprise.

View File

@ -16,8 +16,8 @@ Only anonymous information, which cannot be used to identify the user or host, i
sent to Checkpoint. An anonymous ID is sent which helps de-duplicate warning messages.
This anonymous ID can be disabled. Using the Checkpoint service is optional and can be disabled.
See [`disable_anonymous_signature`](/docs/agent/configuration/index.html#disable_anonymous_signature)
and [`disable_update_check`](/docs/agent/configuration/index.html#disable_update_check).
See [`disable_anonymous_signature`](/docs/configuration/index.html#disable_anonymous_signature)
and [`disable_update_check`](/docs/configuration/index.html#disable_update_check).
## Q: Is Nomad eventually or strongly consistent?
@ -40,4 +40,4 @@ clusters][consul_fed].
[consul_dc]: https://www.consul.io/docs/agent/options.html#_datacenter
[consul_fed]: https://www.consul.io/docs/guides/datacenters.html
[nomad_region]: /docs/agent/configuration/index.html#datacenter
[nomad_region]: /docs/configuration/index.html#datacenter

View File

@ -275,7 +275,7 @@ constraint {
[job]: /docs/job-specification/job.html "Nomad job Job Specification"
[group]: /docs/job-specification/group.html "Nomad group Job Specification"
[client-meta]: /docs/agent/configuration/client.html#meta "Nomad meta Job Specification"
[client-meta]: /docs/configuration/client.html#meta "Nomad meta Job Specification"
[task]: /docs/job-specification/task.html "Nomad task Job Specification"
[interpolation]: /docs/runtime/interpolation.html "Nomad interpolation"
[node-variables]: /docs/runtime/interpolation.html#node-variables- "Nomad interpolation-Node variables"

View File

@ -223,4 +223,4 @@ $ VAULT_TOKEN="..." nomad job run example.nomad
[task]: /docs/job-specification/task.html "Nomad task Job Specification"
[update]: /docs/job-specification/update.html "Nomad update Job Specification"
[vault]: /docs/job-specification/vault.html "Nomad vault Job Specification"
[scheduler]: /docs/runtime/schedulers.html "Nomad Scheduler Types"
[scheduler]: /docs/schedulers.html "Nomad Scheduler Types"

View File

@ -48,7 +48,7 @@ stanza for allocations on that node. The `migrate` stanza is for job authors to
define how their services should be migrated, while the node drain deadline is
for system operators to put hard limits on how long a drain may take.
See the [Decommissioning Nodes guide](/guides/node-draining.html) for details
See the [Workload Migration Guide](/guides/operations/node-draining.html) for details
on node draining.
## `migrate` Parameters

View File

@ -157,7 +157,7 @@ job "email-blast" {
```
[batch-type]: /docs/job-specification/job.html#type "Batch scheduler type"
[dispatch command]: /docs/commands/job-dispatch.html "Nomad Job Dispatch Command"
[dispatch command]: /docs/commands/job/dispatch.html "Nomad Job Dispatch Command"
[resources]: /docs/job-specification/resources.html "Nomad resources Job Specification"
[interpolation]: /docs/runtime/interpolation.html "Nomad Runtime Interpolation"
[dispatch_payload]: /docs/job-specification/dispatch_payload.html "Nomad dispatch_payload Job Specification"

View File

@ -622,7 +622,7 @@ system of a task for that driver.</small>
[check_restart_stanza]: /docs/job-specification/check_restart.html "check_restart stanza"
[consul_grpc]: https://www.consul.io/api/agent/check.html#grpc
[service-discovery]: /docs/service-discovery/index.html "Nomad Service Discovery"
[service-discovery]: /guides/operations/consul-integration/index.html#service-discovery/index.html "Nomad Service Discovery"
[interpolation]: /docs/runtime/interpolation.html "Nomad Runtime Interpolation"
[network]: /docs/job-specification/network.html "Nomad network Job Specification"
[qemu]: /docs/drivers/qemu.html "Nomad qemu Driver"

View File

@ -195,12 +195,12 @@ task "server" {
[meta]: /docs/job-specification/meta.html "Nomad meta Job Specification"
[resources]: /docs/job-specification/resources.html "Nomad resources Job Specification"
[logs]: /docs/job-specification/logs.html "Nomad logs Job Specification"
[service]: /docs/service-discovery/index.html "Nomad Service Discovery"
[service]: /guides/operations/consul-integration/index.html#service-discovery/index.html "Nomad Service Discovery"
[exec]: /docs/drivers/exec.html "Nomad exec Driver"
[java]: /docs/drivers/java.html "Nomad Java Driver"
[Docker]: /docs/drivers/docker.html "Nomad Docker Driver"
[rkt]: /docs/drivers/rkt.html "Nomad rkt Driver"
[template]: /docs/job-specification/template.html "Nomad template Job Specification"
[user_drivers]: /docs/agent/configuration/client.html#_quot_user_checked_drivers_quot_
[user_blacklist]: /docs/agent/configuration/client.html#_quot_user_blacklist_quot_
[max_kill]: /docs/agent/configuration/client.html#max_kill_timeout
[user_drivers]: /docs/configuration/client.html#_quot_user_checked_drivers_quot_
[user_blacklist]: /docs/configuration/client.html#_quot_user_blacklist_quot_
[max_kill]: /docs/configuration/client.html#max_kill_timeout

View File

@ -278,7 +278,7 @@ rather than `secret/...`.
## Client Configuration
The `template` block has the following [client configuration
options](/docs/agent/configuration/client.html#options):
options](/docs/configuration/client.html#options):
* `template.allow_host_source` - Allows templates to specify their source
template as an absolute path referencing host directories. Defaults to `true`.

View File

@ -73,7 +73,7 @@
</tr>
<tr>
<td><tt>VAULT&lowbar;TOKEN</tt></td>
<td>The task's Vault token. See [Vault Integration](/docs/vault-integration/index.html) for more details</td>
<td>The task's Vault token. See [Vault Integration](/guides/operations/vault-integration/index.html) for more details</td>
</tr>
<tr><th colspan="2">Network-related Variables</th></tr>
<tr>

View File

@ -98,4 +98,4 @@ multiple keys with the same uppercased representation will lead to undefined
behavior.
[jobspec]: /docs/job-specification/index.html "Nomad Job Specification"
[vault]: /docs/vault-integration/index.html "Nomad Vault Integration"
[vault]: /guides/operations/vault-integration/index.html "Nomad Vault Integration"

View File

@ -1,12 +1,12 @@
---
layout: "docs"
page_title: "Interpolation - Runtime"
page_title: "Variable Interpolation"
sidebar_current: "docs-runtime-interpolation"
description: |-
Learn about the Nomad's interpolation and interpreted variables.
---
# Interpolation
# Variable Interpolation
Nomad supports interpreting two classes of variables, node attributes and
runtime environment variables. Node attributes are interpretable in constraints,

View File

@ -1,12 +1,12 @@
---
layout: "docs"
page_title: "Scheduler Types - Runtime"
sidebar_current: "docs-runtime-schedulers"
page_title: "Schedulers"
sidebar_current: "docs-schedulers"
description: |-
Learn about Nomad's various schedulers.
---
# Scheduler Types
# Schedulers
Nomad has three scheduler types that can be used when creating your job:
`service`, `batch` and `system`. Here we will describe the differences between

View File

@ -1,54 +0,0 @@
---
layout: "docs"
page_title: "Service Discovery"
sidebar_current: "docs-service-discovery"
description: |-
Learn how to add service discovery to jobs
---
# Service Discovery
Nomad schedules workloads of various types across a cluster of generic hosts.
Because of this, placement is not known in advance and you will need to use
service discovery to connect tasks to other services deployed across your
cluster. Nomad integrates with [Consul][] to provide service discovery and
monitoring.
Note that in order to use Consul with Nomad, you will need to configure and
install Consul on your nodes alongside Nomad, or schedule it as a system job.
Nomad does not currently run Consul for you.
## Configuration
To enable Consul integration, please see the
[Nomad agent Consul integration](/docs/agent/configuration/consul.html)
configuration.
## Service Definition Syntax
To configure a job to register with service discovery, please see the
[`service` job specification documentation][service].
## Assumptions
- Consul 0.7.2 or later is needed for `tls_skip_verify` in HTTP checks.
- Consul 0.6.4 or later is needed for using the Script checks.
- Consul 0.6.0 or later is needed for using the TCP checks.
- The service discovery feature in Nomad depends on operators making sure that
the Nomad client can reach the Consul agent.
- Tasks running inside Nomad also need to reach out to the Consul agent if
they want to use any of the Consul APIs. Ex: A task running inside a docker
container in the bridge mode won't be able to talk to a Consul Agent running
on the loopback interface of the host since the container in the bridge mode
has its own network interface and doesn't see interfaces on the global
network namespace of the host. There are a couple of ways to solve this, one
way is to run the container in the host networking mode, or make the Consul
agent listen on an interface in the network namespace of the container.
[consul]: https://www.consul.io/ "Consul by HashiCorp"
[service]: /docs/job-specification/service.html "Nomad service Job Specification"

View File

@ -0,0 +1,15 @@
---
layout: "guides"
page_title: "Getting Started"
sidebar_current: "guides-getting-started"
description: |-
This section takes you to the Getting Started section.
---
# Nomad Getting Started
Welcome to the Nomad guides section! If you are just getting started with
Nomad, please start with the [Nomad introduction](/intro/getting-started/install.html) instead and then continue on to the guides. The guides provide examples of
common Nomad workflows and actions for developers, operators, and security teams.

View File

@ -1,12 +1,12 @@
---
layout: "guides"
page_title: "Operating a Job"
page_title: "Job Lifecycle"
sidebar_current: "guides-operating-a-job"
description: |-
Learn how to operate a Nomad Job.
Learn how to deploy and manage a Nomad Job.
---
# Operating a Job
# Job Lifecycle
The general flow for operating a job in Nomad is:

View File

@ -83,8 +83,7 @@ While single point in time resource usage measurements are useful, it is often
more useful to graph resource usage over time to better understand and estimate
resource usage. Nomad supports outputting resource data to statsite and statsd
and is the recommended way of monitoring resources. For more information about
outputting telemetry see the [telemetry
documentation](/docs/agent/telemetry.html).
outputting telemetry see the [Telemetry Guide](/guides/operations/monitoring/telemetry.html).
For more advanced use cases, the resource usage data is also accessible via the
client's HTTP API. See the documentation of the Client's [allocation HTTP

View File

@ -1,7 +1,7 @@
---
layout: "docs"
layout: "guides"
page_title: "Nomad Agent"
sidebar_current: "docs-agent"
sidebar_current: "guides-operations-agent"
description: |-
The Nomad agent is a long running process which can be used either in
a client or server mode.

View File

@ -1,7 +1,7 @@
---
layout: "guides"
page_title: "Autopilot"
sidebar_current: "guides-autopilot"
sidebar_current: "guides-operations-autopilot"
description: |-
This guide covers how to configure and use Autopilot features.
---
@ -13,15 +13,15 @@ operator-friendly management of Nomad servers. It includes cleanup of dead
servers, monitoring the state of the Raft cluster, and stable server introduction.
To enable Autopilot features (with the exception of dead server cleanup),
the `raft_protocol` setting in the [server stanza](/docs/agent/configuration/server.html)
the `raft_protocol` setting in the [server stanza](/docs/configuration/server.html)
must be set to 3 on all servers. In Nomad 0.8 this setting defaults to 2; in Nomad 0.9 it will default to 3.
For more information, see the [Version Upgrade section](/docs/upgrade/upgrade-specific.html#raft-protocol-version-compatibility)
For more information, see the [Version Upgrade section](/guides/operations/upgrade/upgrade-specific.html#raft-protocol-version-compatibility)
on Raft Protocol versions.
## Configuration
The configuration of Autopilot is loaded by the leader from the agent's
[Autopilot settings](/docs/agent/configuration/autopilot.html) when initially
[Autopilot settings](/docs/configuration/autopilot.html) when initially
bootstrapping the cluster:
```
@ -149,7 +149,7 @@ setting.
## Server Read and Scheduling Scaling
With the [`non_voting_server`](/docs/agent/configuration/server.html#non_voting_server) option, a
With the [`non_voting_server`](/docs/configuration/server.html#non_voting_server) option, a
server can be explicitly marked as a non-voter and will never be promoted to a voting
member. This can be useful when more read scaling is needed; being a non-voter means
that the server will still have data replicated to it, but it will not be part of the
@ -164,7 +164,7 @@ have an overly-large quorum (2-3 nodes per AZ) or give up redundancy within an A
deploying just one server in each.
If the `EnableRedundancyZones` setting is set, Nomad will use its value to look for a
zone in each server's specified [`redundancy_zone`](/docs/agent/configuration/server.html#redundancy_zone)
zone in each server's specified [`redundancy_zone`](/docs/configuration/server.html#redundancy_zone)
field.
Here's an example showing how to configure this:
@ -216,6 +216,6 @@ a migration, so that the migration logic can be used for updating the cluster wh
changing configuration.
If the `EnableCustomUpgrades` setting is set to `true`, Nomad will use its value to look for a
version in each server's specified [`upgrade_version`](/docs/agent/configuration/server.html#upgrade_version)
version in each server's specified [`upgrade_version`](/docs/configuration/server.html#upgrade_version)
tag. The upgrade logic will follow semantic versioning and the `upgrade_version`
must be in the form of either `X`, `X.Y`, or `X.Y.Z`.

View File

@ -1,14 +1,14 @@
---
layout: "guides"
page_title: "Automatically Bootstrapping a Nomad Cluster"
sidebar_current: "guides-cluster-automatic"
page_title: "Automatic Clustering with Consul"
sidebar_current: "guides-operations-cluster-automatic"
description: |-
Learn how to automatically bootstrap a Nomad cluster using Consul. By having
a Consul agent installed on each host, Nomad can automatically discover other
clients and servers to bootstrap the cluster without operator involvement.
---
# Automatic Bootstrapping
# Automatic Clustering with Consul
To automatically bootstrap a Nomad cluster, we must leverage another HashiCorp
open source tool, [Consul](https://www.consul.io/). Bootstrapping Nomad is
@ -115,5 +115,5 @@ consul {
```
Please refer to the [Consul
documentation](/docs/agent/configuration/consul.html) for the complete set of
documentation](/docs/configuration/consul.html) for the complete set of
configuration options.

View File

@ -1,12 +1,12 @@
---
layout: "guides"
page_title: "Bootstrapping a Nomad Cluster"
sidebar_current: "guides-cluster-bootstrap"
page_title: "Clustering"
sidebar_current: "guides-operations-cluster"
description: |-
Learn how to bootstrap a Nomad cluster.
Learn how to cluster Nomad.
---
# Bootstrapping a Nomad Cluster
# Clustering
Nomad models infrastructure into regions and datacenters. Servers reside at the
regional layer and manage all state and scheduling decisions for that region.
@ -15,10 +15,12 @@ datacenter (and thus a region that contains that datacenter). For more details o
the architecture of Nomad and how it models infrastructure see the [architecture
page](/docs/internals/architecture.html).
There are two strategies for bootstrapping a Nomad cluster:
There are multiple strategies available for creating a multi-node Nomad cluster:
1. <a href="/guides/operations/cluster/manual.html">Manual Clustering</a>
1. <a href="/guides/operations/cluster/automatic.html">Automatic Clustering with Consul</a>
1. <a href="/guides/operations/cluster/cloud_auto_join.html">Cloud Auto-join</a>
1. <a href="/guides/cluster/automatic.html">Automatic bootstrapping</a>
1. <a href="/guides/cluster/manual.html">Manual bootstrapping</a>
Please refer to the specific documentation links above or in the sidebar for
more detailed information about each strategy.

View File

@ -0,0 +1,37 @@
---
layout: "guides"
page_title: "Cloud Auto-join"
sidebar_current: "guides-operations-cluster-cloud-auto-join"
description: |-
Nomad supports automatic cluster joining using cloud metadata from various
cloud providers
---
# Cloud Auto-joining
As of Nomad 0.8.4,
[`retry_join`](/docs/configuration/server_join.html#retry_join) accepts a
unified interface using the
[go-discover](https://github.com/hashicorp/go-discover) library for doing
automatic cluster joining using cloud metadata. To use retry-join with a
supported cloud provider, specify the configuration on the command line or
configuration file as a `key=value key=value ...` string. Values are taken
literally and must not be URL encoded. If the values contain spaces, backslashes
or double quotes thenthey need to be double quoted and the usual escaping rules
apply.
```json
{
"retry_join": ["provider=my-cloud config=val config2=\"some other val\" ..."]
}
```
The cloud provider-specific configurations are documented [here](/docs/configuration/server_join.html#cloud-auto-join).
This can be combined with static IP or DNS addresses or even multiple configurations
for different providers. In order to use discovery behind a proxy, you will need to set
`HTTP_PROXY`, `HTTPS_PROXY` and `NO_PROXY` environment variables per
[Golang `net/http` library](https://golang.org/pkg/net/http/#ProxyFromEnvironment).

View File

@ -1,14 +1,14 @@
---
layout: "guides"
page_title: "Manually Bootstrapping a Nomad Cluster"
sidebar_current: "guides-cluster-manual"
page_title: "Manually Clustering"
sidebar_current: "guides-operations-cluster-manual"
description: |-
Learn how to manually bootstrap a Nomad cluster using the server join
command. This section also discusses Nomad federation across multiple
datacenters and regions.
---
# Manual Bootstrapping
# Manual Clustering
Manually bootstrapping a Nomad cluster does not rely on additional tooling, but
does require operator participation in the cluster formation process. When

View File

@ -0,0 +1,77 @@
---
layout: "guides"
page_title: "Consul Integration"
sidebar_current: "guides-operations-consul-integration"
description: |-
Learn how to integrate Nomad with Consul and add service discovery to jobs
---
# Consul Integration
[Consul][] is a tool for discovering and configuring services in your
infrastructure. Consul's key features include service discover, health checking,
a KV store, and robust support for multi-datacenter deployments. Nomad's integration
with Consul enables automatic clustering, built-in service registration, and
dynamic rendering of configuration files and environment variables. The sections
below describe the integration in more detail.
## Configuration
In order to use Consul with Nomad, you will need to configure and
install Consul on your nodes alongside Nomad, or schedule it as a system job.
Nomad does not currently run Consul for you.
To enable Consul integration, please see the
[Nomad agent Consul integration](/docs/configuration/consul.html)
configuration.
## Automatic Clustering with Consul
Nomad servers and clients will be automatically informed of each other's
existence when a running Consul cluster already exists and the Consul agent is
installed and configured on each host. Please see the [Automatic Clustering with
Consul](/guides/operations/cluster/automatic.html) guide for more information.
## Service Discovery
Nomad schedules workloads of various types across a cluster of generic hosts.
Because of this, placement is not known in advance and you will need to use
service discovery to connect tasks to other services deployed across your
cluster. Nomad integrates with Consul to provide service discovery and
monitoring.
To configure a job to register with service discovery, please see the
[`service` job specification documentation][service].
## Dynamic Configuration
Nomad's job specification includes a [`template` stanza](/docs/job-specification/template.html)
that utilizes a Consul ecosystem tool called [Consul Template](https://github.com/hashicorp/consul-template). This mechanism creates a convenient way to ship configuration files
that are populated from environment variables, Consul data, Vault secrets, or just
general configurations within a Nomad task.
For more information on Nomad's template stanza and how it leverages Consul Template,
please see the [`template` job specification documentation](/docs/job-specification/template.html).
## Assumptions
- Consul 0.7.2 or later is needed for `tls_skip_verify` in HTTP checks.
- Consul 0.6.4 or later is needed for using the Script checks.
- Consul 0.6.0 or later is needed for using the TCP checks.
- The service discovery feature in Nomad depends on operators making sure that
the Nomad client can reach the Consul agent.
- Tasks running inside Nomad also need to reach out to the Consul agent if
they want to use any of the Consul APIs. Ex: A task running inside a docker
container in the bridge mode won't be able to talk to a Consul Agent running
on the loopback interface of the host since the container in the bridge mode
has its own network interface and doesn't see interfaces on the global
network namespace of the host. There are a couple of ways to solve this, one
way is to run the container in the host networking mode, or make the Consul
agent listen on an interface in the network namespace of the container.
[consul]: https://www.consul.io/ "Consul by HashiCorp"
[service]: /docs/job-specification/service.html "Nomad service Job Specification"

View File

@ -1,13 +1,13 @@
---
layout: "guides"
page_title: "Federating a Nomad Cluster"
sidebar_current: "guides-cluster-federation"
page_title: "Multi-region Federation"
sidebar_current: "guides-operations-federation"
description: |-
Learn how to join Nomad servers across multiple regions so users can submit
jobs to any server in any region using global federation.
---
# Federating a Cluster
# Multi-region Federation
Because Nomad operates at a regional level, federation is part of Nomad core.
Federation enables users to submit jobs or interact with the HTTP API targeting
@ -33,4 +33,4 @@ enough to join just one known server.
If bootstrapped via Consul and the Consul clusters in the Nomad regions are
federated, then federation occurs automatically.
[ports]: /guides/cluster/requirements.html#ports-used
[ports]: /guides/operations/requirements.html#ports-used

View File

@ -0,0 +1,13 @@
---
layout: "guides"
page_title: "Nomad Operations"
sidebar_current: "guides-operations"
description: |-
Learn how to operate Nomad.
---
# Nomad Operations
The Nomad Operations guides section provides best practices and guidance for
operating Nomad in a real-world production setting. Please navigate the
appropriate sub-sections for more information.

View File

@ -1,7 +1,7 @@
---
layout: "docs"
layout: "guides"
page_title: "Installing Nomad"
sidebar_current: "docs-installing"
sidebar_current: "guides-operations-installing"
description: |-
Learn how to install Nomad.
---

View File

@ -1,7 +1,7 @@
---
layout: "guides"
page_title: "Setting up Nomad with Grafana and Prometheus Metrics"
sidebar_current: "guides-nomad-metrics"
sidebar_current: "guides-operations-monitoring-grafana"
description: |-
It is possible to collect metrics on Nomad and create dashboards with Grafana
and Prometheus. Nomad has default configurations for these, but it is

View File

@ -1,7 +1,7 @@
---
layout: "docs"
layout: "guides"
page_title: "Telemetry"
sidebar_current: "docs-agent-telemetry"
sidebar_current: "guides-operations-monitoring-telemetry"
description: |-
Learn about the telemetry data available in Nomad.
---
@ -30,7 +30,7 @@ Telemetry information can be streamed to both [statsite](https://github.com/armo
as well as statsd based on providing the appropriate configuration options.
To configure the telemetry output please see the [agent
configuration](/docs/agent/configuration/telemetry.html).
configuration](/docs/configuration/telemetry.html).
Below is sample output of a telemetry dump:
@ -233,7 +233,7 @@ By default the collection interval is 1 second but it can be changed by the
changing the value of the `collection_interval` key in the `telemetry`
configuration block.
Please see the [agent configuration](/docs/agent/configuration/telemetry.html)
Please see the [agent configuration](/docs/configuration/telemetry.html)
page for more details.
As of Nomad 0.9, Nomad will emit additional labels for [parameterized](/docs/job-specification/parameterized.html) and

View File

@ -1,20 +1,20 @@
---
layout: "guides"
page_title: "Decommissioning Nodes"
sidebar_current: "guides-decommissioning-nodes"
page_title: "Workload Migration"
sidebar_current: "guides-operations-decommissioning-nodes"
description: |-
Decommissioning nodes is a normal part of cluster operations for a variety of
Workload migration is a normal part of cluster operations for a variety of
reasons: server maintenance, operating system upgrades, etc. Nomad offers a
number of parameters for controlling how running jobs are migrated off of
draining nodes.
---
# Decommissioning Nomad Client Nodes
# Workload Migration
Decommissioning nodes is a normal part of cluster operations for a variety of
reasons: server maintenance, operating system upgrades, etc. Nomad offers a
number of parameters for controlling how running jobs are migrated off of
draining nodes.
Migrating workloads and decommissioning nodes are a normal part of cluster
operations for a variety of reasons: server maintenance, operating system
upgrades, etc. Nomad offers a number of parameters for controlling how running
jobs are migrated off of draining nodes.
## Configuring How Jobs are Migrated

View File

@ -1,7 +1,7 @@
---
layout: "guides"
page_title: "Outage Recovery"
sidebar_current: "guides-outage-recovery"
sidebar_current: "guides-operations-outage-recovery"
description: |-
Don't panic! This is a critical first step. Depending on your deployment
configuration, it may take only a single server failure for cluster
@ -20,15 +20,15 @@ requires an operator to intervene, but the process is straightforward.
~> This guide is for recovery from a Nomad outage due to a majority of server
nodes in a datacenter being lost. If you are looking to add or remove servers,
see the [bootstrapping guide](/guides/cluster/bootstrapping.html).
see the [bootstrapping guide](/guides/operations/cluster/bootstrapping.html).
## Failure of a Single Server Cluster
If you had only a single server and it has failed, simply restart it. A
single server configuration requires the
[`-bootstrap-expect=1`](/docs/agent/configuration/server.html#bootstrap_expect)
[`-bootstrap-expect=1`](/docs/configuration/server.html#bootstrap_expect)
flag. If the server cannot be recovered, you need to bring up a new
server. See the [bootstrapping guide](/guides/cluster/bootstrapping.html)
server. See the [bootstrapping guide](/guides/operations/cluster/bootstrapping.html)
for more detail.
In the case of an unrecoverable server failure in a single server cluster, data
@ -126,7 +126,7 @@ any automated processes that will put the peers file in place on a
periodic basis.
The next step is to go to the
[`-data-dir`](/docs/agent/configuration/index.html#data_dir) of each Nomad
[`-data-dir`](/docs/configuration/index.html#data_dir) of each Nomad
server. Inside that directory, there will be a `raft/` sub-directory. We need to
create a `raft/peers.json` file. It should look something like:
@ -220,5 +220,5 @@ Nomad server in the cluster, like this:
server's RPC port used for cluster communications.
- `non_voter` `(bool: <false>)` - This controls whether the server is a non-voter, which is used
in some advanced [Autopilot](/guides/autopilot.html) configurations. If omitted, it will
in some advanced [Autopilot](/guides/operations/autopilot.html) configurations. If omitted, it will
default to false, which is typical for most clusters.

View File

@ -1,13 +1,13 @@
---
layout: "guides"
page_title: "Nomad Client and Server Requirements"
sidebar_current: "guides-cluster-requirements"
page_title: "Hardware Requirements"
sidebar_current: "guides-operations-requirements"
description: |-
Learn about Nomad client and server requirements such as memory and CPU
recommendations, network topologies, and more.
---
# Cluster Requirements
# Hardware Requirements
## Resources (RAM, CPU, etc.)
@ -29,7 +29,7 @@ used by Nomad. This should be used to target a specific resource utilization per
node and to reserve resources for applications running outside of Nomad's
supervision such as Consul and the operating system itself.
Please see the [reservation configuration](/docs/agent/configuration/client.html#reserved) for
Please see the [reservation configuration](/docs/configuration/client.html#reserved) for
more detail.
## Network Topology

View File

@ -1,12 +1,12 @@
---
layout: "docs"
layout: "guides"
page_title: "Upgrading"
sidebar_current: "docs-upgrade-upgrading"
sidebar_current: "guides-operations-upgrade"
description: |-
Learn how to upgrade Nomad.
---
# Upgrading Nomad
# Upgrading
This page documents how to upgrade Nomad when a new version is released.
@ -23,7 +23,7 @@ For upgrades we strive to ensure backwards compatibility. For most upgrades, the
process is as simple as upgrading the binary and restarting the service.
Prior to starting the upgrade please check the
[specific version details](/docs/upgrade/upgrade-specific.html) page as some
[specific version details](/guides/operations/upgrade/upgrade-specific.html) page as some
version differences may require specific steps.
At a high level we complete the following steps to upgrade Nomad:
@ -102,8 +102,8 @@ Use the same actions in step #2 above to confirm cluster health.
Following the successful upgrade of the servers you can now update your
clients using a similar process as the servers. You may either upgrade clients
in-place or start new nodes on the new version. See the [Decommissioning Nodes
guide](/guides/node-draining.html) for instructions on how to migrate running
in-place or start new nodes on the new version. See the [Workload Migration
Guide](/guides/operations/node-draining.html) for instructions on how to migrate running
allocations from the old nodes to the new nodes with the [`nomad node
drain`](/docs/commands/node/drain.html) command.
@ -118,5 +118,5 @@ are in a `ready` state.
The process of upgrading to a Nomad Enterprise version is identical to upgrading
between versions of open source Nomad. The same guidance above should be
followed and as always, prior to starting the upgrade please check the [specific
version details](/docs/upgrade/upgrade-specific.html) page as some version
version details](/guides/operations/upgrade/upgrade-specific.html) page as some version
differences may require specific steps.

View File

@ -1,15 +1,15 @@
---
layout: "docs"
layout: "guides"
page_title: "Upgrade Guides"
sidebar_current: "docs-upgrade-specific"
sidebar_current: "guides-operations-upgrade-specific"
description: |-
Specific versions of Nomad may have additional information about the upgrade
process beyond the standard flow.
---
# Upgrading Specific Versions
# Upgrade Guides
The [upgrading page](/docs/upgrade/index.html) covers the details of doing
The [upgrading page](/guides/operations/upgrade/index.html) covers the details of doing
a standard upgrade. However, specific versions of Nomad may have more
details provided for their upgrades as a result of new features or changed
behavior. This page is used to document those details separately from the
@ -21,7 +21,7 @@ standard upgrade flow.
When upgrading to Nomad 0.8.0 from a version lower than 0.7.0, users will need
to set the
[`raft_protocol`](/docs/agent/configuration/server.html#raft_protocol) option
[`raft_protocol`](/docs/configuration/server.html#raft_protocol) option
in their `server` stanza to 1 in order to maintain backwards compatibility with
the old servers during the upgrade. After the servers have been migrated to
version 0.8.0, `raft_protocol` can be moved up to 2 and the servers restarted
@ -50,18 +50,18 @@ Raft Protocol versions supported by each Nomad version:
</tr>
</table>
In order to enable all [Autopilot](/guides/autopilot.html) features, all servers
In order to enable all [Autopilot](/guides/operations/autopilot.html) features, all servers
in a Nomad cluster must be running with Raft protocol version 3 or later.
#### Upgrading to Raft Protocol 3
This section provides details on upgrading to Raft Protocol 3 in Nomad 0.8 and higher. Raft protocol version 3 requires Nomad running 0.8.0 or newer on all servers in order to work. See [Raft Protocol Version Compatibility](/docs/upgrade/upgrade-specific.html#raft-protocol-version-compatibility) for more details. Also the format of `peers.json` used for outage recovery is different when running with the latest Raft protocol. See [Manual Recovery Using peers.json](/guides/outage.html#manual-recovery-using-peers-json) for a description of the required format.
This section provides details on upgrading to Raft Protocol 3 in Nomad 0.8 and higher. Raft protocol version 3 requires Nomad running 0.8.0 or newer on all servers in order to work. See [Raft Protocol Version Compatibility](/guides/operations/upgrade/upgrade-specific.html#raft-protocol-version-compatibility) for more details. Also the format of `peers.json` used for outage recovery is different when running with the latest Raft protocol. See [Manual Recovery Using peers.json](/guides/operations/outage.html#manual-recovery-using-peers-json) for a description of the required format.
Please note that the Raft protocol is different from Nomad's internal protocol as shown in commands like `nomad server members`. To see the version of the Raft protocol in use on each server, use the `nomad operator raft list-peers` command.
The easiest way to upgrade servers is to have each server leave the cluster, upgrade its `raft_protocol` version in the `server` stanza, and then add it back. Make sure the new server joins successfully and that the cluster is stable before rolling the upgrade forward to the next server. It's also possible to stand up a new set of servers, and then slowly stand down each of the older servers in a similar fashion.
When using Raft protocol version 3, servers are identified by their `node-id` instead of their IP address when Nomad makes changes to its internal Raft quorum configuration. This means that once a cluster has been upgraded with servers all running Raft protocol version 3, it will no longer allow servers running any older Raft protocol versions to be added. If running a single Nomad server, restarting it in-place will result in that server not being able to elect itself as a leader. To avoid this, either set the Raft protocol back to 2, or use [Manual Recovery Using peers.json](/guides/outage.html#manual-recovery-using-peers-json) to map the server to its node ID in the Raft quorum configuration.
When using Raft protocol version 3, servers are identified by their `node-id` instead of their IP address when Nomad makes changes to its internal Raft quorum configuration. This means that once a cluster has been upgraded with servers all running Raft protocol version 3, it will no longer allow servers running any older Raft protocol versions to be added. If running a single Nomad server, restarting it in-place will result in that server not being able to elect itself as a leader. To avoid this, either set the Raft protocol back to 2, or use [Manual Recovery Using peers.json](/guides/operations/outage.html#manual-recovery-using-peers-json) to map the server to its node ID in the Raft quorum configuration.
### Node Draining Improvements
@ -78,7 +78,7 @@ The `drain` command now blocks until the drain completes. To get the Nomad
-force -detach <node-id>`
See the [`migrate` stanza documentation][migrate] and [Decommissioning Nodes
guide](/guides/node-draining.html) for details.
guide](/guides/operations/node-draining.html) for details.
### Periods in Environment Variable Names No Longer Escaped
@ -124,7 +124,7 @@ as the old style will be deprecated in future versions of Nomad.
### RPC Advertise Address
The behavior of the [advertised RPC
address](/docs/agent/configuration/index.html#rpc-1) has changed to be only used
address](/docs/configuration/index.html#rpc-1) has changed to be only used
to advertise the RPC address of servers to client nodes. Server to server
communication is done using the advertised Serf address. Existing cluster's
should not be effected but the advertised RPC address may need to be updated to
@ -149,7 +149,7 @@ If you manually configure `advertise` addresses no changes are necessary.
The change to the default, advertised IP also effect clients that do not specify
which network_interface to use. If you have several routable IPs, it is advised
to configure the client's [network
interface](https://www.nomadproject.io/docs/agent/configuration/client.html#network_interface)
interface](/docs/configuration/client.html#network_interface)
such that tasks bind to the correct address.
## Nomad 0.5.5

View File

@ -1,9 +1,9 @@
---
layout: "docs"
layout: "guides"
page_title: "Vault Integration"
sidebar_current: "docs-vault-integration"
sidebar_current: "guides-operations-vault-integration"
description: |-
Learn how to integrate with HashiCorp Vault and retrieve Vault tokens for
Learn how to integrate Nomad with HashiCorp Vault and retrieve Vault tokens for
tasks.
---
@ -341,8 +341,8 @@ You can see examples of `v1` and `v2` syntax in the
[auth]: https://www.vaultproject.io/docs/auth/token.html "Vault Authentication Backend"
[config]: /docs/agent/configuration/vault.html "Nomad Vault Configuration Block"
[createfromrole]: /docs/agent/configuration/vault.html#create_from_role "Nomad vault create_from_role Configuration Flag"
[config]: /docs/configuration/vault.html "Nomad Vault Configuration Block"
[createfromrole]: /docs/configuration/vault.html#create_from_role "Nomad vault create_from_role Configuration Flag"
[template]: /docs/job-specification/template.html "Nomad template Job Specification"
[vault]: https://www.vaultproject.io/ "Vault by HashiCorp"
[vault-spec]: /docs/job-specification/vault.html "Nomad Vault Job Specification"

View File

@ -1,14 +1,14 @@
---
layout: "guides"
page_title: "ACLs"
sidebar_current: "guides-acl"
page_title: "Access Control"
sidebar_current: "guides-security-acl"
description: |-
Nomad provides an optional Access Control List (ACL) system which can be used to control
access to data and APIs. The ACL is Capability-based, relying on tokens which are
associated with policies to determine which fine grained rules can be applied.
---
# ACL System
# Access Control
Nomad provides an optional Access Control List (ACL) system which can be used to control access to data and APIs. The ACL is [Capability-based](https://en.wikipedia.org/wiki/Capability-based_security), relying on tokens which are associated with policies to determine which fine grained rules can be applied. Nomad's capability based ACL system is very similar to the design of [AWS IAM](https://aws.amazon.com/iam/).
@ -56,15 +56,15 @@ Constructing rules from these policies is covered in detail in the Rule Specific
Nomad supports multi-datacenter and multi-region configurations. A single region is able to service multiple datacenters, and all servers in a region replicate their state between each other. In a multi-region configuration, there is a set of servers per region. Each region operates independently and is loosely coupled to allow jobs to be scheduled in any region and requests to flow transparently to the correct region.
When ACLs are enabled, Nomad depends on an "authoritative region" to act as a single source of truth for ACL policies and global ACL tokens. The authoritative region is configured in the [`server` stanza](/docs/agent/configuration/server.html) of agents, and all regions must share a single authoritative source. Any ACL policies or global ACL tokens are created in the authoritative region first. All other regions replicate ACL policies and global ACL tokens to act as local mirrors. This allows policies to be administered centrally, and for enforcement to be local to each region for low latency.
When ACLs are enabled, Nomad depends on an "authoritative region" to act as a single source of truth for ACL policies and global ACL tokens. The authoritative region is configured in the [`server` stanza](/docs/configuration/server.html) of agents, and all regions must share a single authoritative source. Any ACL policies or global ACL tokens are created in the authoritative region first. All other regions replicate ACL policies and global ACL tokens to act as local mirrors. This allows policies to be administered centrally, and for enforcement to be local to each region for low latency.
Global ACL tokens are used to allow cross region requests. Standard ACL tokens are created in a single target region and not replicated. This means if a request takes place between regions, global tokens must be used so that both regions will have the token registered.
# Configuring ACLs
ACLs are not enabled by default, and must be enabled. Clients and Servers need to set `enabled` in the [`acl` stanza](/docs/agent/configuration/acl.html). This enables the [ACL Policy](/api/acl-policies.html) and [ACL Token](/api/acl-tokens.html) APIs, as well as endpoint enforcement.
ACLs are not enabled by default, and must be enabled. Clients and Servers need to set `enabled` in the [`acl` stanza](/docs/configuration/acl.html). This enables the [ACL Policy](/api/acl-policies.html) and [ACL Token](/api/acl-tokens.html) APIs, as well as endpoint enforcement.
For multi-region configurations, all servers must be configured to use a single [authoritative region](/docs/agent/configuration/server.html#authoritative_region). The authoritative region is responsible for managing ACL policies and global tokens. Servers in other regions will replicate policies and global tokens to act as a mirror, and must have their [`replication_token`](/docs/agent/configuration/acl.html#replication_token) configured.
For multi-region configurations, all servers must be configured to use a single [authoritative region](/docs/configuration/server.html#authoritative_region). The authoritative region is responsible for managing ACL policies and global tokens. Servers in other regions will replicate policies and global tokens to act as a mirror, and must have their [`replication_token`](/docs/configuration/acl.html#replication_token) configured.
# Bootstrapping ACLs
@ -74,9 +74,9 @@ Bootstrapping ACLs on a new cluster requires a few steps, outlined below:
The APIs needed to manage policies and tokens are not enabled until ACLs are enabled. To begin, we need to enable the ACLs on the servers. If a multi-region setup is used, the authoritative region should be enabled first. For each server:
1. Set `enabled = true` in the [`acl` stanza](/docs/agent/configuration/acl.html#enabled).
1. Set `authoritative_region` in the [`server` stanza](/docs/agent/configuration/server.html#authoritative_region).
1. For servers outside the authoritative region, set `replication_token` in the [`acl` stanza](/docs/agent/configuration/acl.html#replication_token). Replication tokens should be `management` type tokens which are either created in the authoritative region, or created as Global tokens.
1. Set `enabled = true` in the [`acl` stanza](/docs/configuration/acl.html#enabled).
1. Set `authoritative_region` in the [`server` stanza](/docs/configuration/server.html#authoritative_region).
1. For servers outside the authoritative region, set `replication_token` in the [`acl` stanza](/docs/configuration/acl.html#replication_token). Replication tokens should be `management` type tokens which are either created in the authoritative region, or created as Global tokens.
1. Restart the Nomad server to pick up the new configuration.
Please take care to restart the servers one at a time, and ensure each server has joined and is operating correctly before restarting another.
@ -103,7 +103,7 @@ The bootstrap token is a `management` type token, meaning it can perform any ope
### Enable ACLs on Nomad Clients
To enforce client endpoints, we need to enable ACLs on clients as well. This is simpler than servers, and we just need to set `enabled = true` in the [`acl` stanza](/docs/agent/configuration/acl.html). Once configured, we need to restart the client for the change.
To enforce client endpoints, we need to enable ACLs on clients as well. This is simpler than servers, and we just need to set `enabled = true` in the [`acl` stanza](/docs/configuration/acl.html). Once configured, we need to restart the client for the change.
### Set an Anonymous Policy (Optional)

View File

@ -1,12 +1,12 @@
---
layout: "docs"
page_title: "Gossip and RPC Encryption"
sidebar_current: "docs-agent-encryption"
layout: "guides"
page_title: "Encryption Overview"
sidebar_current: "guides-security-encryption"
description: |-
Learn how to configure Nomad to encrypt HTTP, RPC, and Serf traffic.
---
# Encryption
# Encryption Overview
The Nomad agent supports encrypting all of its network traffic. There are
two separate encryption systems, one for gossip traffic, and one for HTTP and
@ -16,7 +16,7 @@ RPC.
Enabling gossip encryption only requires that you set an encryption key when
starting the Nomad server. The key can be set via the
[`encrypt`](/docs/agent/configuration/server.html#encrypt) parameter: the value
[`encrypt`](/docs/configuration/server.html#encrypt) parameter: the value
of this setting is a server configuration file containing the encryption key.
The key must be 16 bytes, base64 encoded. As a convenience, Nomad provides the
@ -88,5 +88,5 @@ as it is unable to use client certificates.
Read the [Securing Nomad with TLS Guide][guide] for details on how to configure
encryption for Nomad.
[guide]: /guides/securing-nomad.html "Securing Nomad with TLS"
[tls]: /docs/agent/configuration/tls.html "Nomad TLS Configuration"
[guide]: /guides/security/securing-nomad.html "Securing Nomad with TLS"
[tls]: /docs/configuration/tls.html "Nomad TLS Configuration"

View File

@ -0,0 +1,13 @@
---
layout: "guides"
page_title: "Security and Governance"
sidebar_current: "guides-security"
description: |-
Learn how to use Nomad safely and securely in a multi-team setting.
---
# Security and Governance
The Nomad Security and Governance guides section provides best practices and
guidance for operating Nomad safely and securely in a multi-team setting. Please
navigate the appropriate sub-sections for more information.

View File

@ -1,7 +1,7 @@
---
layout: "guides"
page_title: "Namespaces"
sidebar_current: "guides-namespaces"
sidebar_current: "guides-security-namespaces"
description: |-
Nomad Enterprise provides support for namespaces, which allow jobs and their
associated objects to be segmented from each other and other users of the
@ -27,7 +27,7 @@ When combined with ACLs, the isolation of namespaces can be enforced, only
allowing designated users access to read or modify the jobs and associated
objects in a namespace.
When [resource quotas](/guides/quotas.html) are applied to a namespace they
When [resource quotas](/guides/security/quotas.html) are applied to a namespace they
provide a means to limit resource consumption by the jobs in the namespace. This
can prevent a single actor from consuming excessive cluster resources and
negatively impacting other teams and applications sharing the cluster.
@ -38,9 +38,9 @@ Nomad places all jobs and their derived objects into namespaces. These include
jobs, allocations, deployments, and evaluations.
Nomad does not namespace objects that are shared across multiple namespaces.
This includes nodes, [ACL policies](/guides/acl.html), [Sentinel
policies](/guides/sentinel-policy.html), and [quota
specifications](/guides/quotas.html).
This includes nodes, [ACL policies](/guides/security/acl.html), [Sentinel
policies](/guides/security/sentinel-policy.html), and [quota
specifications](/guides/security/quotas.html).
## Working with Namespaces
@ -104,7 +104,7 @@ rails-www service 50 running 09/17/17 19:17:46 UTC
### ACLs
Access to namespaces can be restricted using [ACLs](/guides/acl.html). As an
Access to namespaces can be restricted using [ACLs](/guides/security/acl.html). As an
example we could create an ACL policy that allows full access to the QA
environment for our web namespaces but restrict the production access by
creating the following policy:

View File

@ -1,7 +1,7 @@
---
layout: "guides"
page_title: "Resource Quotas"
sidebar_current: "guides-quotas"
sidebar_current: "guides-security-quotas"
description: |-
Nomad Enterprise provides support for resource quotas, which allow operators
to restrict the aggregate resource usage of namespaces.
@ -21,7 +21,7 @@ This is not present in the open source version of Nomad.
When many teams or users are sharing Nomad clusters, there is the concern that a
single user could use more than their fair share of resources. Resource quotas
provide a mechanism for cluster administrators to restrict the resources that a
[namespace](/guides/namespaces.html) has access to.
[namespace](/guides/security/namespaces.html) has access to.
## Quotas Objects
@ -172,7 +172,7 @@ allocation since that would cause the quota to be oversubscribed on memory.
### ACLs
Access to quotas can be restricted using [ACLs](/guides/acl.html). As an
Access to quotas can be restricted using [ACLs](/guides/security/acl.html). As an
example we could create an ACL policy that allows read-only access to quotas.
```
@ -201,7 +201,7 @@ When specifying resource limits the following enforcement behaviors are defined:
Nomad makes working with quotas in a federated cluster simple by replicating
quota specifications from the [authoritative Nomad
region](/docs/agent/configuration/server.html#authoritative_region). This allows
region](/docs/configuration/server.html#authoritative_region). This allows
operators to interact with a single cluster but create quota specifications that
apply to all Nomad clusters.

View File

@ -1,7 +1,7 @@
---
layout: "guides"
page_title: "Securing Nomad with TLS"
sidebar_current: "guides-securing-nomad"
sidebar_current: "guides-security-tls"
description: |-
Securing Nomad's cluster communication with TLS is important for both
security and easing operations. Nomad can use mutual TLS (mTLS) for
@ -352,7 +352,7 @@ not use TLS:
Nomad server's gossip protocol use a shared key instead of TLS for encryption.
This encryption key must be added to every server's configuration using the
[`encrypt`](/docs/agent/configuration/server.html#encrypt) parameter or with
[`encrypt`](/docs/configuration/server.html#encrypt) parameter or with
the [`-encrypt` command line option](/docs/commands/agent.html).
The Nomad CLI includes a `operator keygen` command for generating a new secure gossip
@ -499,16 +499,16 @@ connections) once the entire cluster has been migrated.
[cfssl]: https://cfssl.org/
[cfssl.json]: https://raw.githubusercontent.com/hashicorp/nomad/master/demo/vagrant/cfssl.json
[guide-install]: https://www.nomadproject.io/intro/getting-started/install.html
[guide-cluster]: https://www.nomadproject.io/intro/getting-started/cluster.html
[guide-install]: /intro/getting-started/install.html
[guide-cluster]: /intro/getting-started/cluster.html
[guide-server]: https://raw.githubusercontent.com/hashicorp/nomad/master/demo/vagrant/server.hcl
[heartbeat_grace]: /docs/agent/configuration/server.html#heartbeat_grace
[heartbeat_grace]: /docs/configuration/server.html#heartbeat_grace
[letsencrypt]: https://letsencrypt.org/
[rpc_upgrade_mode]: https://www.nomadproject.io/docs/agent/configuration/tls.html#rpc_upgrade_mode/
[rpc_upgrade_mode]: /docs/configuration/tls.html#rpc_upgrade_mode/
[tls]: https://en.wikipedia.org/wiki/Transport_Layer_Security
[tls_block]: /docs/agent/configuration/tls.html
[tls_block]: /docs/configuration/tls.html
[vagrantfile]: https://raw.githubusercontent.com/hashicorp/nomad/master/demo/vagrant/Vagrantfile
[vault]: https://www.vaultproject.io/
[vault-pki]: https://www.vaultproject.io/docs/secrets/pki/index.html
[verify_https_client]: /docs/agent/configuration/tls.html#verify_https_client
[verify_server_hostname]: /docs/agent/configuration/tls.html#verify_server_hostname
[verify_https_client]: /docs/configuration/tls.html#verify_https_client
[verify_server_hostname]: /docs/configuration/tls.html#verify_server_hostname

View File

@ -1,14 +1,14 @@
---
layout: "guides"
page_title: "Sentinel Policies"
sidebar_current: "guides-sentinel"
sidebar_current: "guides-security-sentinel"
description: |-
Nomad integrates with Sentinel for fine-grained policy enforcement. Sentinel allows operators to express their policies as code, and have their policies automatically enforced. This allows operators to define a "sandbox" and restrict actions to only those compliant with policy. The Sentinel integration builds on the ACL System.
---
# Sentinel Policies
[Nomad Enterprise](https://www.hashicorp.com/products/nomad/) integrates with [HashiCorp Sentinel](https://docs.hashicorp.com/sentinel) for fine-grained policy enforcement. Sentinel allows operators to express their policies as code, and have their policies automatically enforced. This allows operators to define a "sandbox" and restrict actions to only those compliant with policy. The Sentinel integration builds on the [ACL System](/guides/acl.html).
[Nomad Enterprise](/docs/enterprise/index.html) integrates with [HashiCorp Sentinel](https://docs.hashicorp.com/sentinel) for fine-grained policy enforcement. Sentinel allows operators to express their policies as code, and have their policies automatically enforced. This allows operators to define a "sandbox" and restrict actions to only those compliant with policy. The Sentinel integration builds on the [ACL System](/guides/security/acl.html).
~> **Enterprise Only!** This functionality only exists in Nomad Enterprise.
This is not present in the open source version of Nomad.
@ -55,23 +55,23 @@ The following table summarizes the enforcement levels that are available:
| soft-mandatory | Prevents operation when a policy fails, issues a warning if overridden |
| hard-mandatory | Prevents operation when a policy fails |
The [`sentinel-override` capability](/guides/acl.html#sentinel-override) is required to override a `soft-mandatory` policy. This allows a restricted set of users to have override capability when necessary.
The [`sentinel-override` capability](/guides/security/acl.html#sentinel-override) is required to override a `soft-mandatory` policy. This allows a restricted set of users to have override capability when necessary.
## Multi-Region Configuration
Nomad supports multi-datacenter and multi-region configurations. A single region is able to service multiple datacenters, and all servers in a region replicate their state between each other. In a multi-region configuration, there is a set of servers per region. Each region operates independently and is loosely coupled to allow jobs to be scheduled in any region and requests to flow transparently to the correct region.
When ACLs are enabled, Nomad depends on an "authoritative region" to act as a single source of truth for ACL policies, global ACL tokens, and Sentinel policies. The authoritative region is configured in the [`server` stanza](/docs/agent/configuration/server.html) of agents, and all regions must share a single authoritative source. Any Sentinel policies are created in the authoritative region first. All other regions replicate Sentinel policies, ACL policies, and global ACL tokens to act as local mirrors. This allows policies to be administered centrally, and for enforcement to be local to each region for low latency.
When ACLs are enabled, Nomad depends on an "authoritative region" to act as a single source of truth for ACL policies, global ACL tokens, and Sentinel policies. The authoritative region is configured in the [`server` stanza](/docs/configuration/server.html) of agents, and all regions must share a single authoritative source. Any Sentinel policies are created in the authoritative region first. All other regions replicate Sentinel policies, ACL policies, and global ACL tokens to act as local mirrors. This allows policies to be administered centrally, and for enforcement to be local to each region for low latency.
## Configuring Sentinel Policies
Sentinel policies are tied to the ACL system, which is not enabled by default.
See the [ACL guide](/guides/acl.html) for details on how to configure ACLs.
See the [ACL guide](/guides/security/acl.html) for details on how to configure ACLs.
## Example: Installing Sentinel Policies
This example shows how to install a Sentinel policy. It assumes that ACLs have already
been bootstrapped (see the [ACL guide](/guides/acl.html)), and that a `NOMAD_TOKEN` environment variable
been bootstrapped (see the [ACL guide](/guides/security/acl.html)), and that a `NOMAD_TOKEN` environment variable
is set to a management token.
First, create a Sentinel policy, named `test.sentinel`:
@ -205,5 +205,5 @@ The following objects are made available in the `submit-job` scope:
| ------ | ------------------------- |
| `job` | The job being submitted |
See the [Sentinel Job Object](/guides/sentinel/job.html) for details on the fields that are available.
See the [Sentinel Job Object](/guides/security/sentinel/job.html) for details on the fields that are available.

View File

@ -1,7 +1,7 @@
---
layout: "guides"
page_title: "Sentinel Job Object"
sidebar_current: "guides-sentinel-job"
sidebar_current: "guides-security-sentinel-job"
description: |-
Job objects can be introspected to apply fine grained Sentinel policies.
---

View File

@ -40,30 +40,30 @@ the first Nomad server contacted.
- `spark.nomad.docker.email` `(string: nil)` - Specifies the email address to
use when downloading the Docker image specified by
[spark.nomad.dockerImage](#spark.nomad.dockerImage). See the
[Docker driver authentication](https://www.nomadproject.io/docs/drivers/docker.html#authentication)
[Docker driver authentication](/docs/drivers/docker.html#authentication)
docs for more information.
- `spark.nomad.docker.password` `(string: nil)` - Specifies the password to use
when downloading the Docker image specified by
[spark.nomad.dockerImage](#spark.nomad.dockerImage). See the
[Docker driver authentication](https://www.nomadproject.io/docs/drivers/docker.html#authentication)
[Docker driver authentication](/docs/drivers/docker.html#authentication)
docs for more information.
- `spark.nomad.docker.serverAddress` `(string: nil)` - Specifies the server
address (domain/IP without the protocol) to use when downloading the Docker
image specified by [spark.nomad.dockerImage](#spark.nomad.dockerImage). Docker
Hub is used by default. See the
[Docker driver authentication](https://www.nomadproject.io/docs/drivers/docker.html#authentication)
[Docker driver authentication](/docs/drivers/docker.html#authentication)
docs for more information.
- `spark.nomad.docker.username` `(string: nil)` - Specifies the username to use
when downloading the Docker image specified by
[spark.nomad.dockerImage](#spark-nomad-dockerImage). See the
[Docker driver authentication](https://www.nomadproject.io/docs/drivers/docker.html#authentication)
[Docker driver authentication](/docs/drivers/docker.html#authentication)
docs for more information.
- `spark.nomad.dockerImage` `(string: nil)` - Specifies the `URL` for the
[Docker image](https://www.nomadproject.io/docs/drivers/docker.html#image) to
[Docker image](/docs/drivers/docker.html#image) to
use to run Spark with Nomad's `docker` driver. When not specified, Nomad's
`exec` driver will be used instead.

View File

@ -117,7 +117,7 @@ DataNodes to generically reference the NameNode:
```
Another viable option for DataNode task group is to use a dedicated
[system](https://www.nomadproject.io/docs/runtime/schedulers.html#system) job.
[system](/docs/schedulers.html#system) job.
This will deploy a DataNode to every client node in the system, which may or may
not be desirable depending on your use case.

View File

@ -127,9 +127,9 @@ $ spark-submit \
Nomad clients collect the `stderr` and `stdout` of running tasks. The CLI or the
HTTP API can be used to inspect logs, as documented in
[Accessing Logs](https://www.nomadproject.io/guides/operating-a-job/accessing-logs.html).
[Accessing Logs](/guides/operating-a-job/accessing-logs.html).
In cluster mode, the `stderr` and `stdout` of the `driver` application can be
accessed in the same way. The [Log Shipper Pattern](https://www.nomadproject.io/guides/operating-a-job/accessing-logs.html#log-shipper-pattern) uses sidecar tasks to forward logs to a central location. This
accessed in the same way. The [Log Shipper Pattern](/guides/operating-a-job/accessing-logs.html#log-shipper-pattern) uses sidecar tasks to forward logs to a central location. This
can be done using a job template as follows:
```hcl

View File

@ -10,7 +10,7 @@ description: |-
Nomad is well-suited for analytical workloads, given its [performance
characteristics](https://www.hashicorp.com/c1m/) and first-class support for
[batch scheduling](https://www.nomadproject.io/docs/runtime/schedulers.html).
[batch scheduling](/docs/schedulers.html).
Apache Spark is a popular data processing engine/framework that has been
architected to use third-party schedulers. The Nomad ecosystem includes a
[fork of Apache Spark](https://github.com/hashicorp/nomad-spark) that natively

View File

@ -12,7 +12,7 @@ description: |-
The Nomad Web UI offers an easy to use web experience for inspecting a Nomad cluster.
Jobs, Deployments, Evaluations, Task Groups, Allocations, Logs, Clients, and Servers can all be
monitored from the Web UI. The Web UI also supports the use of ACL tokens for
clusters that are using the [ACL system](/guides/acl.html).
clusters that are using the [ACL system](/guides/security/acl.html).
## Accessing the Web UI

View File

@ -15,14 +15,17 @@ to use to improve your environment.
We've covered the basics of all the core features of Nomad in this guide.
We recommend exploring the following resources as next steps.
* [Documentation](/docs/index.html) - The documentation is an in-depth
reference guide to all the features of Nomad.
* [Guides](/guides/index.html) - The Guides provide best practices and
guidance for using and operating Nomad in a real-world production setting.
* [Creating a Cluster](/guides/cluster/bootstrapping.html) - Additional details on
creating a production worthy Nomad Cluster.
* [Docs](/docs/index.html) - The Docs provide detailed reference information
all available features and options of Nomad.
* [Operating a Job](/guides/operating-a-job/index.html) - Additional details on how to
run a job in production.
* [Job Lifecycle](/guides/operating-a-job/index.html) - Additional details
specific to runnning a job in production.
* [Creating a Cluster](/guides/operations/cluster/bootstrapping.html) - Additional
details on creating a production worthy Nomad Cluster.
* [Example Terraform configuration](https://github.com/hashicorp/nomad/tree/master/terraform) -
Use Terraform to automatically provision a cluster in AWS.

View File

@ -110,7 +110,7 @@ Additional metadata can be viewed by providing the `-detailed` flag.
You can use `Ctrl-C` (the interrupt signal) to halt the agent.
By default, all signals will cause the agent to forcefully shutdown.
The agent [can be configured](/docs/agent/configuration/index.html#leave_on_terminate) to
The agent [can be configured](/docs/configuration/index.html#leave_on_terminate) to
gracefully leave on either the interrupt or terminate signals.
After interrupting the agent, you should see it leave the cluster
@ -134,7 +134,7 @@ replication continues to be attempted until the node recovers. Nomad will
automatically try to reconnect to _failed_ nodes, allowing it to recover from
certain network conditions, while _left_ nodes are no longer contacted.
If an agent is operating as a server, [`leave_on_terminate`](/docs/agent/configuration/index.html#leave_on_terminate) should only
If an agent is operating as a server, [`leave_on_terminate`](/docs/configuration/index.html#leave_on_terminate) should only
be set if the server will never rejoin the cluster again. The default value of `false` for `leave_on_terminate` and `leave_on_interrupt`
work well for most scenarios. If Nomad servers are part of an auto scaling group where new servers are brought up to replace
failed servers, using graceful leave avoids causing a potential availability outage affecting the [consensus protocol](/docs/internals/consensus.html).

View File

@ -1,12 +1,12 @@
---
layout: "intro"
page_title: "Nomad Web UI"
page_title: "Web UI"
sidebar_current: "getting-started-ui"
description: |-
Visit the Nomad Web UI to inspect jobs, allocations, and more.
---
# Nomad Web UI
# Web UI
At this point we have a fully functioning cluster with a job running in it. We have
learned how to inspect a job using `nomad status`, next we'll learn how to inspect

View File

@ -13,16 +13,37 @@ place to start with Nomad. We cover what Nomad is, what
problems it can solve, how it compares to existing software,
and contains a quick start for using Nomad.
If you are already familiar with the basics of Nomad, the
[documentation](/docs/index.html) provides a better reference
guide for all available features as well as internals.
If you are already familiar with the basics of Nomad, the [Guides](/guides/index.html)
and the [reference documentation](/docs/index.html) will provide a more comprehensive
resource.
## What is Nomad?
Nomad is a tool for managing a cluster of machines and running applications
Nomad is a flexible container orchestration tool that enables an organization to
easily deploy and manage any containerized or legacy application using a single,
unified workflow. Nomad can run a diverse workload of Docker, non-containerized,
microservice, and batch applications, and generally offers the following benefits
to developers and operators:
* **API-driven Automation**: Workload placement, scaling, and upgrades can be
automated, simplifying operations and eliminating the need for homegrown tooling.
* **Self-service Deployments**: Developers are empowered to service application
lifecycles directly, allowing operators to focus on higher value tasks.
* **Workload Reliability**: Application, node, and driver failures are handled
automatically, reducing the need for manual operator intervention
* **Increased Efficiency and Reduced Cost**: Higher application densities allow
operators to reduce fleet sizes and save money.
Nomad is trusted by enterprises from a range of sectors including financial,
retail, software, and others to run production workloads at scale across private
infrastructure and the public cloud.
## How it Works
At its core, Nomad is a tool for managing a cluster of machines and running applications
on them. Nomad abstracts away machines and the location of applications,
and instead enables users to declare what they want to run and Nomad handles
where they should run and how to run them.
and instead enables users to declare what they want to run while Nomad handles
where and how to run them.
The key features of Nomad are:
@ -57,6 +78,15 @@ The key features of Nomad are:
to support demanding workloads. Nomad has been proven to scale to cluster sizes that
exceed 10k nodes in real-world production environments.
## How Nomad Compares to Other Tools
Nomad differentiates from related tools by virtue of its **simplicity**, **flexibility**,
**scalability**, and **high performance**. Nomad's synergy and integration points with
HashiCorp Terrform, Consul, and Vault make it uniquely suited for easy integration into
an organization's existing workflows, minimizing the time-to-market for critical initiatives.
See the [Nomad vs. Other Software](/intro/vs/index.html) page for additional details and
comparisons.
## Next Steps
See the page on [Nomad use cases](/intro/use-cases.html) to see the

View File

@ -3,35 +3,70 @@ layout: "intro"
page_title: "Use Cases"
sidebar_current: "use-cases"
description: |-
This page lists some concrete use cases for Nomad, but the possible use cases are much broader than what we cover.
This page lists some concrete use cases for Nomad, but the possible use cases
are much broader than what we cover.
---
# Use Cases
Before understanding use cases, it's useful to know [what Nomad is](/intro/index.html).
This page lists some concrete use cases for Nomad, but the possible use cases are
much broader than what we cover.
This page lists Nomad's core use cases. Please note that the full range of potential
use cases is much broader than what is currently covered here. Reading through the
[Introduction to Nomad](/intro/index.html) is highly recommended before diving into
the use cases.
## Microservices Platform
## Docker Container Management
Microservices, or Service Oriented Architectures (SOA), are a design paradigm in which many
services with narrow scope, tight state encapsulation, and API driven interfaces interact together
to form a larger application. However, they add an operational challenge of managing hundreds
or thousands of services instead of a few large applications. Nomad provides a platform for
managing microservices, making it easier to adopt the paradigm.
Organizations are increasingly moving towards a Docker centric workflow for
application deployment and management. This transition requires new tooling
to automate placement, perform job updates, enable self-service for developers,
and to handle failures automatically. Nomad supports a [first-class Docker workflow](/docs/drivers/docker.html)
and integrates seamlessly with [Consul](/guides/operations/consul-integration/index.html)
and [Vault](/guides/operations/vault-integration/index.html) to enable a complete solution
while maximizing operational flexibility. Nomad is easy to use, can scale to
thousands of nodes in a single cluster, and can easily deploy across private data
centers and multiple clouds.
## Hybrid Cloud Deployments
## Legacy Application Deployment
Nomad is designed to handle multi-datacenter and multi-region deployments and is cloud agnostic.
This allows Nomad to schedule in private datacenters running bare metal, OpenStack, or VMware
alongside an AWS, Azure, or GCE cloud deployment. This makes it easier to migrate workloads
incrementally, or to utilize the cloud for bursting.
A virtual machine based application deployment strategy can lead to low hardware
utlization rates and high infrastructure costs. While a Docker-based deployment
strategy can be impractical for some organizations or use cases, the potential for
greater automation, increased resilience, and reduced cost is very attractive.
Nomad natively supports running legacy applications, static binaries, JARs, and
simple OS commands directly. Workloads are natively isolated at runtime and bin
packed to maximize efficiency and utilization (reducing cost). Developers and
operators benefit from API-driven automation and enhanced reliability for
applications through automatic failure handling.
## Microservices
Microservices and Service Oriented Architectures (SOA) are a design paradigm in
which many services with narrow scope, tight state encapsulation, and API driven
communication interact together to form a larger solution. However, managing hundreds
or thousands of services instead of a few large applications creates an operational
challenge. Nomad elegantly integrates with [Consul](/guides/operations/consul-integration/index.html)
for automatic service registration and dynamic rendering of configuration files. Nomad
and Consul together provide an ideal solution for managing microservices, making it
easier to adopt the paradigm.
## Batch Processing Workloads
As data science and analytics teams grow is size and complexity, they increasingly
benefit from highly performant and scalable tools that can run batch workloads with
minimal operational overhead. Nomad can natively run batch jobs, [parameterized](https://www.hashicorp.com/blog/replacing-queues-with-nomad-dispatch) jobs, and [Spark](https://github.com/hashicorp/nomad-spark)
workloads. Nomad's architecture enables easy scalability and an optimistically
concurrent scheduling strategy that can yield [thousands of container deployments per
second](https://www.hashicorp.com/c1m). Alternatives are overly complex and limited
in terms of their scheduling throughput, scalability, and multi-cloud capabilities.
**Related video**: [End to End Production Nomad at Citadel](https://www.youtube.com/watch?reload=9&v=ZOBcGpGsboA)
## Multi-region and Multi-cloud Deployments
Nomad is designed to natively handle multi-datacenter and multi-region deployments
and is cloud agnostic. This allows Nomad to schedule in private datacenters running
bare metal, OpenStack, or VMware alongside an AWS, Azure, or GCE cloud deployment.
This makes it easier to migrate workloads incrementally and to utilize the cloud
for bursting.
## E-Commerce
A typical E-Commerce website has a few types of workloads. There are long-lived services
used for web serving. These include the load balancer, web frontends, API servers, and OLTP databases.
Batch processing using Hadoop or Spark may run periodically for business reporting, user targeting,
or generating product recommendations. Nomad allows all these workloads to share an underlying cluster,
increasing utilization, reducing cost, simplifying scaling and providing a clean abstraction
for developers.

View File

@ -8,16 +8,29 @@ description: |-
# Nomad vs. Other Software
Nomad is a cluster manager and scheduler. There are many related categories
including cluster managers, resource managers, workload managers, and schedulers.
There are many existing tools in each category, and the comparisons are not exhaustive
of the entire space.
The following characteristics generally differentiate Nomad from related products:
Due to the bias of the comparisons being on the Nomad website, we attempt
to only use facts. If you find something that is invalid or out of date
in the comparisons, please
[open an issue](https://github.com/hashicorp/nomad/issues) and we'll
* **Simplicity**: Nomad runs as a single process with zero external dependencies.
Operators can easily provision, manage, and scale Nomad. Developers can easily
define and run applications.
* **Flexibility**: Nomad can run a diverse workload of containerized, legacy,
microservice, and batch applications. Nomad can schedule service, batch
processing and system jobs, and can run on both Linux and Windows.
* **Scalability and High Performance**: Nomad can schedule thousands of containers
per second, scale to thousands of nodes in a single cluster, and easily federate
across regions and cloud providers.
* **HashiCorp Interoperability**: Nomad elegantly integrates with Vault for secrets
management and Consul for service discovery and dynamic configuration. Nomad's
Consul-like architecture and Terraform-like job specification lower the barrier
to entry for existing users of the HashiCorp stack.
There are many relevant categories for comparison including cluster managers,
resource managers, workload managers, and schedulers. There are many existing
tools in each category, and the comparisons are not exhaustive of the entire space.
Due to the bias of the comparisons being on the Nomad website, we attempt to only
use facts. If you find something that is invalid or out of date in the comparisons,
please [open an issue](https://github.com/hashicorp/nomad/issues) and we will
address it as soon as possible.
Use the navigation on the left to read comparisons of Nomad versus other
systems.
Use the navigation on the left to read comparisons of Nomad versus other systems.

View File

@ -1,139 +1,60 @@
<% wrap_layout :inner do %>
<% content_for :sidebar do %>
<ul class="nav docs-sidenav">
<li<%= sidebar_current("docs-installing") %>>
<a href="/docs/install/index.html">Installing Nomad</a>
</li>
<li<%= sidebar_current("docs-runtime") %>>
<a href="/docs/runtime/index.html">Runtime</a>
<li<%= sidebar_current("docs-internal") %>>
<a href="/docs/internals/index.html">Internals</a>
<ul class="nav">
<li<%= sidebar_current("docs-runtime-environment") %>>
<a href="/docs/runtime/environment.html">Environment</a>
<li<%= sidebar_current("docs-internals-architecture") %>>
<a href="/docs/internals/architecture.html">Architecture</a>
</li>
<li<%= sidebar_current("docs-runtime-interpolation") %>>
<a href="/docs/runtime/interpolation.html">Interpolation</a>
<li<%= sidebar_current("docs-internals-scheduling") %>>
<a href="/docs/internals/scheduling.html">Scheduling</a>
</li>
<li<%= sidebar_current("docs-runtime-schedulers") %>>
<a href="/docs/runtime/schedulers.html">Schedulers</a>
<li<%= sidebar_current("docs-internals-consensus") %>>
<a href="/docs/internals/consensus.html">Consensus Protocol</a>
</li>
<li<%= sidebar_current("docs-internals-gossip") %>>
<a href="/docs/internals/gossip.html">Gossip Protocol</a>
</li>
</ul>
</li>
<li<%= sidebar_current("docs-job-specification") %>>
<a href="/docs/job-specification/index.html">Job Specification</a>
<li<%= sidebar_current("docs-configuration") %>>
<a href="/docs/configuration/index.html">Configuration</a>
<ul class="nav">
<li<%= sidebar_current("docs-job-specification-artifact")%>>
<a href="/docs/job-specification/artifact.html">artifact</a>
<li <%= sidebar_current("docs-configuration-acl") %>>
<a href="/docs/configuration/acl.html">acl</a>
</li>
<li<%= sidebar_current("docs-job-specification-check_restart")%>>
<a href="/docs/job-specification/check_restart.html">check_restart</a>
<li <%= sidebar_current("docs-configuration-autopilot") %>>
<a href="/docs/configuration/autopilot.html">autopilot</a>
</li>
<li<%= sidebar_current("docs-job-specification-constraint")%>>
<a href="/docs/job-specification/constraint.html">constraint</a>
<li <%= sidebar_current("docs-configuration-client") %>>
<a href="/docs/configuration/client.html">client</a>
</li>
<li<%= sidebar_current("docs-job-specification-dispatch-payload")%>>
<a href="/docs/job-specification/dispatch_payload.html">dispatch_payload</a>
<li <%= sidebar_current("docs-configuration-consul") %>>
<a href="/docs/configuration/consul.html">consul</a>
</li>
<li<%= sidebar_current("docs-job-specification-env")%>>
<a href="/docs/job-specification/env.html">env</a>
<li <%= sidebar_current("docs-configuration-sentinel") %>>
<a href="/docs/configuration/sentinel.html">sentinel</a>
</li>
<li<%= sidebar_current("docs-job-specification-ephemeral_disk")%>>
<a href="/docs/job-specification/ephemeral_disk.html">ephemeral_disk</a>
<li <%= sidebar_current("docs-configuration-server") %>>
<a href="/docs/configuration/server.html">server</a>
</li>
<li<%= sidebar_current("docs-job-specification-group")%>>
<a href="/docs/job-specification/group.html">group</a>
<li <%= sidebar_current("docs-configuration--server-join") %>>
<a href="/docs/configuration/server_join.html">server_join</a>
</li>
<li<%= sidebar_current("docs-job-specification-job")%>>
<a href="/docs/job-specification/job.html">job</a>
<li <%= sidebar_current("docs-configuration-telemetry") %>>
<a href="/docs/configuration/telemetry.html">telemetry</a>
</li>
<li<%= sidebar_current("docs-job-specification-logs")%>>
<a href="/docs/job-specification/logs.html">logs</a>
<li <%= sidebar_current("docs-configuration-tls") %>>
<a href="/docs/configuration/tls.html">tls</a>
</li>
<li<%= sidebar_current("docs-job-specification-meta")%>>
<a href="/docs/job-specification/meta.html">meta</a>
</li>
<li<%= sidebar_current("docs-job-specification-migrate")%>>
<a href="/docs/job-specification/migrate.html">migrate</a>
</li>
<li<%= sidebar_current("docs-job-specification-network")%>>
<a href="/docs/job-specification/network.html">network</a>
</li>
<li<%= sidebar_current("docs-job-specification-parameterized")%>>
<a href="/docs/job-specification/parameterized.html">parameterized</a>
</li>
<li<%= sidebar_current("docs-job-specification-periodic")%>>
<a href="/docs/job-specification/periodic.html">periodic</a>
</li>
<li<%= sidebar_current("docs-job-specification-reschedule")%>>
<a href="/docs/job-specification/reschedule.html">reschedule</a>
</li>
<li<%= sidebar_current("docs-job-specification-resources")%>>
<a href="/docs/job-specification/resources.html">resources</a>
</li>
<li<%= sidebar_current("docs-job-specification-restart")%>>
<a href="/docs/job-specification/restart.html">restart</a>
</li>
<li<%= sidebar_current("docs-job-specification-service")%>>
<a href="/docs/job-specification/service.html">service</a>
</li>
<li<%= sidebar_current("docs-job-specification-task")%>>
<a href="/docs/job-specification/task.html">task</a>
</li>
<li<%= sidebar_current("docs-job-specification-template")%>>
<a href="/docs/job-specification/template.html">template</a>
</li>
<li<%= sidebar_current("docs-job-specification-update")%>>
<a href="/docs/job-specification/update.html">update</a>
</li>
<li<%= sidebar_current("docs-job-specification-vault")%>>
<a href="/docs/job-specification/vault.html">vault</a>
</li>
</ul>
</li>
<li<%= sidebar_current("docs-service-discovery") %>>
<a href="/docs/service-discovery/index.html">Service Discovery</a>
</li>
<li<%= sidebar_current("docs-vault-integration") %>>
<a href="/docs/vault-integration/index.html">Vault Integration</a>
</li>
<li<%= sidebar_current("docs-drivers") %>>
<a href="/docs/drivers/index.html">Drivers</a>
<ul class="nav">
<li<%= sidebar_current("docs-drivers-docker") %>>
<a href="/docs/drivers/docker.html">Docker</a>
</li>
<li<%= sidebar_current("docs-drivers-exec") %>>
<a href="/docs/drivers/exec.html">Isolated Fork/Exec</a>
</li>
<li<%= sidebar_current("docs-drivers-java") %>>
<a href="/docs/drivers/java.html">Java</a>
</li>
<li<%= sidebar_current("docs-drivers-lxc") %>>
<a href="/docs/drivers/lxc.html">LXC</a>
</li>
<li<%= sidebar_current("docs-drivers-qemu") %>>
<a href="/docs/drivers/qemu.html">Qemu</a>
</li>
<li<%= sidebar_current("docs-drivers-raw-exec") %>>
<a href="/docs/drivers/raw_exec.html">Raw Fork/Exec</a>
</li>
<li<%= sidebar_current("docs-drivers-rkt") %>>
<a href="/docs/drivers/rkt.html">Rkt</a>
</li>
<li<%= sidebar_current("docs-drivers-custom") %>>
<a href="/docs/drivers/custom.html">Custom</a>
<li <%= sidebar_current("docs-configuration-vault") %>>
<a href="/docs/configuration/vault.html">vault</a>
</li>
</ul>
</li>
@ -392,95 +313,128 @@
</ul>
</li>
<li<%= sidebar_current("docs-agent") %>>
<a href="/docs/agent/index.html">Nomad Agent</a>
<ul class="nav">
<li <%= sidebar_current("docs-agent-cloud-auto-join") %>>
<a href="/docs/agent/cloud_auto_join.html">Cloud Auto-join</a>
</li>
<li<%= sidebar_current("docs-agent-configuration") %>>
<a href="/docs/agent/configuration/index.html">Configuration</a>
<ul class="nav">
<li <%= sidebar_current("docs-agent-configuration-acl") %>>
<a href="/docs/agent/configuration/acl.html">acl</a>
</li>
<li <%= sidebar_current("docs-agent-configuration-autopilot") %>>
<a href="/docs/agent/configuration/autopilot.html">autopilot</a>
</li>
<li <%= sidebar_current("docs-agent-configuration-client") %>>
<a href="/docs/agent/configuration/client.html">client</a>
</li>
<li <%= sidebar_current("docs-agent-configuration-consul") %>>
<a href="/docs/agent/configuration/consul.html">consul</a>
</li>
<li <%= sidebar_current("docs-agent-configuration-sentinel") %>>
<a href="/docs/agent/configuration/sentinel.html">sentinel</a>
</li>
<li <%= sidebar_current("docs-agent-configuration-server") %>>
<a href="/docs/agent/configuration/server.html">server</a>
</li>
<li <%= sidebar_current("docs-agent-configuration--server-join") %>>
<a href="/docs/agent/configuration/server_join.html">server_join</a>
</li>
<li <%= sidebar_current("docs-agent-configuration-telemetry") %>>
<a href="/docs/agent/configuration/telemetry.html">telemetry</a>
</li>
<li <%= sidebar_current("docs-agent-configuration-tls") %>>
<a href="/docs/agent/configuration/tls.html">tls</a>
</li>
<li <%= sidebar_current("docs-agent-configuration-vault") %>>
<a href="/docs/agent/configuration/vault.html">vault</a>
</li>
</ul>
</li>
<li<%= sidebar_current("docs-agent-encryption") %>>
<a href="/docs/agent/encryption.html">Encryption</a>
</li>
<li<%= sidebar_current("docs-agent-telemetry") %>>
<a href="/docs/agent/telemetry.html">Telemetry</a>
</li>
</ul>
</li>
<hr>
<li<%= sidebar_current("docs-internal") %>>
<a href="/docs/internals/index.html">Internals</a>
<li<%= sidebar_current("docs-job-specification") %>>
<a href="/docs/job-specification/index.html">Job Specification</a>
<ul class="nav">
<li<%= sidebar_current("docs-internals-architecture") %>>
<a href="/docs/internals/architecture.html">Architecture</a>
<li<%= sidebar_current("docs-job-specification-artifact")%>>
<a href="/docs/job-specification/artifact.html">artifact</a>
</li>
<li<%= sidebar_current("docs-internals-consensus") %>>
<a href="/docs/internals/consensus.html">Consensus Protocol</a>
<li<%= sidebar_current("docs-job-specification-check_restart")%>>
<a href="/docs/job-specification/check_restart.html">check_restart</a>
</li>
<li<%= sidebar_current("docs-internals-gossip") %>>
<a href="/docs/internals/gossip.html">Gossip Protocol</a>
<li<%= sidebar_current("docs-job-specification-constraint")%>>
<a href="/docs/job-specification/constraint.html">constraint</a>
</li>
<li<%= sidebar_current("docs-internals-scheduling") %>>
<a href="/docs/internals/scheduling.html">Scheduling</a>
<li<%= sidebar_current("docs-job-specification-dispatch-payload")%>>
<a href="/docs/job-specification/dispatch_payload.html">dispatch_payload</a>
</li>
<li<%= sidebar_current("docs-job-specification-env")%>>
<a href="/docs/job-specification/env.html">env</a>
</li>
<li<%= sidebar_current("docs-job-specification-ephemeral_disk")%>>
<a href="/docs/job-specification/ephemeral_disk.html">ephemeral_disk</a>
</li>
<li<%= sidebar_current("docs-job-specification-group")%>>
<a href="/docs/job-specification/group.html">group</a>
</li>
<li<%= sidebar_current("docs-job-specification-job")%>>
<a href="/docs/job-specification/job.html">job</a>
</li>
<li<%= sidebar_current("docs-job-specification-logs")%>>
<a href="/docs/job-specification/logs.html">logs</a>
</li>
<li<%= sidebar_current("docs-job-specification-meta")%>>
<a href="/docs/job-specification/meta.html">meta</a>
</li>
<li<%= sidebar_current("docs-job-specification-migrate")%>>
<a href="/docs/job-specification/migrate.html">migrate</a>
</li>
<li<%= sidebar_current("docs-job-specification-network")%>>
<a href="/docs/job-specification/network.html">network</a>
</li>
<li<%= sidebar_current("docs-job-specification-parameterized")%>>
<a href="/docs/job-specification/parameterized.html">parameterized</a>
</li>
<li<%= sidebar_current("docs-job-specification-periodic")%>>
<a href="/docs/job-specification/periodic.html">periodic</a>
</li>
<li<%= sidebar_current("docs-job-specification-reschedule")%>>
<a href="/docs/job-specification/reschedule.html">reschedule</a>
</li>
<li<%= sidebar_current("docs-job-specification-resources")%>>
<a href="/docs/job-specification/resources.html">resources</a>
</li>
<li<%= sidebar_current("docs-job-specification-restart")%>>
<a href="/docs/job-specification/restart.html">restart</a>
</li>
<li<%= sidebar_current("docs-job-specification-service")%>>
<a href="/docs/job-specification/service.html">service</a>
</li>
<li<%= sidebar_current("docs-job-specification-task")%>>
<a href="/docs/job-specification/task.html">task</a>
</li>
<li<%= sidebar_current("docs-job-specification-template")%>>
<a href="/docs/job-specification/template.html">template</a>
</li>
<li<%= sidebar_current("docs-job-specification-update")%>>
<a href="/docs/job-specification/update.html">update</a>
</li>
<li<%= sidebar_current("docs-job-specification-vault")%>>
<a href="/docs/job-specification/vault.html">vault</a>
</li>
</ul>
</li>
<li<%= sidebar_current("docs-upgrade") %>>
<a href="/docs/upgrade/index.html">Upgrading</a>
<li<%= sidebar_current("docs-drivers") %>>
<a href="/docs/drivers/index.html">Task Drivers</a>
<ul class="nav">
<li<%= sidebar_current("docs-upgrade-upgrading") %>>
<a href="/docs/upgrade/index.html">Upgrading Nomad</a>
<li<%= sidebar_current("docs-drivers-docker") %>>
<a href="/docs/drivers/docker.html">Docker</a>
</li>
<li<%= sidebar_current("docs-upgrade-specific") %>>
<a href="/docs/upgrade/upgrade-specific.html">Specific Version Details</a>
<li<%= sidebar_current("docs-drivers-exec") %>>
<a href="/docs/drivers/exec.html">Isolated Fork/Exec</a>
</li>
<li<%= sidebar_current("docs-drivers-java") %>>
<a href="/docs/drivers/java.html">Java</a>
</li>
<li<%= sidebar_current("docs-drivers-lxc") %>>
<a href="/docs/drivers/lxc.html">LXC</a>
</li>
<li<%= sidebar_current("docs-drivers-qemu") %>>
<a href="/docs/drivers/qemu.html">Qemu</a>
</li>
<li<%= sidebar_current("docs-drivers-raw-exec") %>>
<a href="/docs/drivers/raw_exec.html">Raw Fork/Exec</a>
</li>
<li<%= sidebar_current("docs-drivers-rkt") %>>
<a href="/docs/drivers/rkt.html">Rkt</a>
</li>
<li<%= sidebar_current("docs-drivers-custom") %>>
<a href="/docs/drivers/custom.html">Custom</a>
</li>
</ul>
</li>
<li<%= sidebar_current("docs-faq") %>>
<a href="/docs/faq.html">FAQ</a>
<li<%= sidebar_current("docs-schedulers") %>>
<a href="/docs/schedulers.html">Schedulers</a>
</li>
<li<%= sidebar_current("docs-runtime-environment") %>>
<a href="/docs/runtime/environment.html">Runtime Environment</a>
</li>
<li<%= sidebar_current("docs-variable-interpolation") %>>
<a href="/docs/runtime/interpolation.html">Variable Interpolation</a>
</li>
<hr>
@ -503,6 +457,10 @@
</ul>
</li>
<li<%= sidebar_current("docs-faq") %>>
<a href="/docs/faq.html">FAQ</a>
</li>
</ul>
<% end %>

View File

@ -6,7 +6,7 @@
</li>
<li>
<a href="/docs/install/index.html">Build from Source</a>
<a href="/guides/operations/install/index.html#compiling-from-source">Build from Source</a>
</li>
</ul>
<% end %>

View File

@ -2,71 +2,12 @@
<% content_for :sidebar do %>
<ul class="nav docs-sidenav">
<li<%= sidebar_current("guides-acl") %>>
<a href="/guides/acl.html">ACLs</a>
</li>
<li<%= sidebar_current("guides-autopilot") %>>
<a href="/guides/autopilot.html">Autopilot</a>
</li>
<li<%= sidebar_current("guides-spark") %>>
<a href="/guides/spark/spark.html">Apache Spark Integration</a>
<ul class="nav">
<li<%= sidebar_current("guides-spark-pre") %>>
<a href="/guides/spark/pre.html">Getting Started</a>
</li>
<li<%= sidebar_current("guides-spark-submit") %>>
<a href="/guides/spark/submit.html">Submitting Applications</a>
</li>
<li<%= sidebar_current("guides-spark-customizing") %>>
<a href="/guides/spark/customizing.html">Customizing Applications</a>
</li>
<li<%= sidebar_current("guides-spark-resource") %>>
<a href="/guides/spark/resource.html">Resource Allocation</a>
</li>
<li<%= sidebar_current("guides-spark-dynamic") %>>
<a href="/guides/spark/dynamic.html">Dynamic Executors</a>
</li>
<li<%= sidebar_current("guides-spark-hdfs") %>>
<a href="/guides/spark/hdfs.html">Using HDFS</a>
</li>
<li<%= sidebar_current("guides-spark-monitoring") %>>
<a href="/guides/spark/monitoring.html">Monitoring Output</a>
</li>
<li<%= sidebar_current("guides-spark-configuration") %>>
<a href="/guides/spark/configuration.html">Configuration Properties</a>
</li>
</ul>
</li>
<li<%= sidebar_current("guides-cluster") %>>
<a href="/guides/cluster/bootstrapping.html">Bootstrapping Clusters</a>
<ul class="nav">
<li<%= sidebar_current("guides-cluster-automatic") %>>
<a href="/guides/cluster/automatic.html">Automatic</a>
</li>
<li<%= sidebar_current("guides-cluster-manual") %>>
<a href="/guides/cluster/manual.html">Manual</a>
</li>
<li<%= sidebar_current("guides-cluster-federation") %>>
<a href="/guides/cluster/federation.html">Federation</a>
</li>
<li<%= sidebar_current("guides-cluster-requirements") %>>
<a href="/guides/cluster/requirements.html">Requirements</a>
</li>
</ul>
</li>
<li<%= sidebar_current("guides-decommissioning-nodes") %>>
<a href="/guides/node-draining.html">Decommissioning Nodes</a>
</li>
<li<%= sidebar_current("guides-namespaces") %>>
<a href="/guides/namespaces.html">Namespaces</a>
<li<%= sidebar_current("guides-getting-started") %>>
<a href="/guides/getting-started.html">Getting Started</a>
</li>
<li<%= sidebar_current("guides-operating-a-job") %>>
<a href="/guides/operating-a-job/index.html">Operating a Job</a>
<a href="/guides/operating-a-job/index.html">Job Lifecycle</a>
<ul class="nav">
<li<%= sidebar_current("guides-operating-a-job-configuring-tasks") %>>
<a href="/guides/operating-a-job/configuring-tasks.html">Configuring Tasks</a>
@ -98,39 +39,163 @@
</ul>
</li>
<li<%= sidebar_current("guides-operating-a-job-failure-handling-strategies") %>>
<a href="/guides/operating-a-job/failure-handling-strategies/index.html">Failure Recovery Strategies</a>
<ul class="nav">
<li<%= sidebar_current("guides-operating-a-job-failure-handling-strategies-local-restarts") %>>
<a href="/guides/operating-a-job/failure-handling-strategies/restart.html">Local Restarts</a>
</li>
<li<%= sidebar_current("guides-operating-a-job-failure-handling-strategies-check-restart") %>>
<a href="/guides/operating-a-job/failure-handling-strategies/check-restart.html">Check Restarts</a>
</li>
<li<%= sidebar_current("guides-operating-a-job-failure-handling-strategies-reschedule") %>>
<a href="/guides/operating-a-job/failure-handling-strategies/reschedule.html">Rescheduling</a>
</li>
</ul>
</li>
<a href="/guides/operating-a-job/failure-handling-strategies/index.html">Failure Recovery Strategies</a>
<ul class="nav">
<li<%= sidebar_current("guides-operating-a-job-failure-handling-strategies-local-restarts") %>>
<a href="/guides/operating-a-job/failure-handling-strategies/restart.html">Local Restarts</a>
</li>
<li<%= sidebar_current("guides-operating-a-job-failure-handling-strategies-check-restart") %>>
<a href="/guides/operating-a-job/failure-handling-strategies/check-restart.html">Check Restarts</a>
</li>
<li<%= sidebar_current("guides-operating-a-job-failure-handling-strategies-reschedule") %>>
<a href="/guides/operating-a-job/failure-handling-strategies/reschedule.html">Rescheduling</a>
</li>
</ul>
</li>
</ul>
</li>
<li<%= sidebar_current("guides-outage-recovery") %>>
<a href="/guides/outage.html">Outage Recovery</a>
</li>
<li<%= sidebar_current("guides-quotas") %>>
<a href="/guides/quotas.html">Resource Quotas</a>
</li>
<li<%= sidebar_current("guides-securing-nomad") %>>
<a href="/guides/securing-nomad.html">Securing Nomad</a>
</li>
<li<%= sidebar_current("guides-sentinel") %>>
<a href="/guides/sentinel-policy.html">Sentinel Policies</a>
<li<%= sidebar_current("guides-operations") %>>
<a href="/guides/operations/index.html">Operations</a>
<ul class="nav">
<li<%= sidebar_current("guides-sentinel-job") %>>
<a href="/guides/sentinel/job.html">Job Object</a>
<li<%= sidebar_current("guides-operations-installing") %>>
<a href="/guides/operations/install/index.html">Installing Nomad</a>
</li>
<li<%= sidebar_current("guides-agent") %>>
<a href="/guides/operations/agent/index.html">Running the Agent</a>
</li>
<li<%= sidebar_current("guides-operations-consul-integration") %>>
<a href="/guides/operations/consul-integration/index.html">Consul Integration</a>
</li>
<li<%= sidebar_current("guides-operations-cluster") %>>
<a href="/guides/operations/cluster/bootstrapping.html">Clustering</a>
<ul class="nav">
<li<%= sidebar_current("guides-operations-cluster-manual") %>>
<a href="/guides/operations/cluster/manual.html">Manual Clustering</a>
</li>
<li<%= sidebar_current("guides-operations-cluster-automatic") %>>
<a href="/guides/operations/cluster/automatic.html">Automatic Clustering with Consul</a>
</li>
<li <%= sidebar_current("guides-operations-cluster-cloud-auto-join") %>>
<a href="/guides/operations/cluster/cloud_auto_join.html">Cloud Auto-join</a>
</li>
</ul>
</li>
<li<%= sidebar_current("guides-operations-requirements") %>>
<a href="/guides/operations/requirements.html">Hardware Requirements</a>
</li>
<li<%= sidebar_current("guides-operations-federation") %>>
<a href="/guides/operations/federation.html">Multi-region Federation</a>
</li>
<li<%= sidebar_current("guides-operations-vault-integration") %>>
<a href="/guides/operations/vault-integration/index.html">Vault Integration</a>
</li>
<li<%= sidebar_current("guides-operations-decommissioning-nodes") %>>
<a href="/guides/operations/node-draining.html">Workload Migration</a>
</li>
<li<%= sidebar_current("guides-operations-outage-recovery") %>>
<a href="/guides/operations/outage.html">Outage Recovery</a>
</li>
<li<%= sidebar_current("guides-operations-monitoring") %>>
<a href="/guides/operations/monitoring/telemetry.html">Monitoring</a>
<ul class="nav">
<li<%= sidebar_current("guides-operations-monitoring-telemetry") %>>
<a href="/guides/operations/monitoring/telemetry.html">Telemetry</a>
</li>
<li<%= sidebar_current("guides-operations-monitoring-grafana") %>>
<a href="/guides/operations/monitoring/nomad-metrics.html">Grafana and Prometheus</a>
</li>
</ul>
</li>
<li<%= sidebar_current("guides-operations-upgrade") %>>
<a href="/guides/operations/upgrade/index.html">Upgrading</a>
<ul class="nav">
<li<%= sidebar_current("guides-operations-upgrade-specific") %>>
<a href="/guides/operations/upgrade/upgrade-specific.html">Upgrade Guides</a>
</li>
</ul>
</li>
<li<%= sidebar_current("guides-operations-autopilot") %>>
<a href="/guides/operations/autopilot.html">Autopilot</a>
</li>
</ul>
</li>
<li<%= sidebar_current("guides-security") %>>
<a href="/guides/security/index.html">Security and Governance</a>
<ul class="nav">
<li<%= sidebar_current("guides-security-encryption") %>>
<a href="/guides/security/encryption.html">Encryption Overview</a>
</li>
<li<%= sidebar_current("guides-security-tls") %>>
<a href="/guides/security/securing-nomad.html">Securing Nomad with TLS</a>
</li>
<li<%= sidebar_current("guides-security-acl") %>>
<a href="/guides/security/acl.html">Access Control</a>
</li>
<li<%= sidebar_current("guides-security-namespaces") %>>
<a href="/guides/security/namespaces.html">Namespaces</a>
</li>
<li<%= sidebar_current("guides-security-quotas") %>>
<a href="/guides/security/quotas.html">Resource Quotas</a>
</li>
<li<%= sidebar_current("guides-security-sentinel") %>>
<a href="/guides/security/sentinel-policy.html">Sentinel Policies</a>
<ul class="nav">
<li<%= sidebar_current("guides-security-sentinel-job") %>>
<a href="/guides/security/sentinel/job.html">Job Object</a>
</li>
</ul>
</li>
</ul>
</li>
<li<%= sidebar_current("guides-spark") %>>
<a href="/guides/spark/spark.html">Apache Spark Integration</a>
<ul class="nav">
<li<%= sidebar_current("guides-spark-pre") %>>
<a href="/guides/spark/pre.html">Getting Started</a>
</li>
<li<%= sidebar_current("guides-spark-submit") %>>
<a href="/guides/spark/submit.html">Submitting Applications</a>
</li>
<li<%= sidebar_current("guides-spark-customizing") %>>
<a href="/guides/spark/customizing.html">Customizing Applications</a>
</li>
<li<%= sidebar_current("guides-spark-resource") %>>
<a href="/guides/spark/resource.html">Resource Allocation</a>
</li>
<li<%= sidebar_current("guides-spark-dynamic") %>>
<a href="/guides/spark/dynamic.html">Dynamic Executors</a>
</li>
<li<%= sidebar_current("guides-spark-hdfs") %>>
<a href="/guides/spark/hdfs.html">Using HDFS</a>
</li>
<li<%= sidebar_current("guides-spark-monitoring") %>>
<a href="/guides/spark/monitoring.html">Monitoring Output</a>
</li>
<li<%= sidebar_current("guides-spark-configuration") %>>
<a href="/guides/spark/configuration.html">Configuration Properties</a>
</li>
</ul>
</li>

View File

@ -68,7 +68,7 @@
</li>
<li<%= sidebar_current("getting-started-ui") %>>
<a href="/intro/getting-started/ui.html">Nomad UI</a>
<a href="/intro/getting-started/ui.html">Web UI</a>
</li>
<li<%= sidebar_current("getting-started-nextsteps") %>>

View File

@ -80,7 +80,7 @@
<li><a href="/docs/index.html">Docs</a></li>
<li><a href="/api/index.html">API</a></li>
<li><a href="/resources.html">Resources</a></li>
<li><a href="https://www.hashicorp.com/products/nomad/?utm_source=oss&utm_medium=header-nav&utm_campaign=nomad">Enterprise</a></li>
<li><a href="/docs/enterprise/index.html">Enterprise</a></li>
<li><a href="https://demo.nomadproject.io">UI Demo</a></li>
<li>
<a href="/downloads.html">
@ -114,7 +114,7 @@
<li><a href="/docs/index.html">Docs</a></li>
<li><a href="/api/index.html">API</a></li>
<li><a href="/resources.html">Resources</a></li>
<li><a href="https://www.hashicorp.com/products/nomad/?utm_source=oss&utm_medium=header-nav&utm_campaign=nomad">Enterprise</a></li>
<li><a href="/docs/enterprise/index.html">Enterprise</a></li>
<li><a href="https://demo.nomadproject.io">UI Demo</a></li>
<li><a href="https://www.hashicorp.com/privacy">Privacy</a></li>
<li><a href="/security.html">Security</a></li>