renamed stanza to block for consistency with other projects (#15941)
This commit is contained in:
parent
24b85bf02b
commit
14b53df3b6
|
@ -170,7 +170,7 @@ event "fossa-scan" {
|
|||
}
|
||||
|
||||
## These are promotion and post-publish events
|
||||
## they should be added to the end of the file after the verify event stanza.
|
||||
## they should be added to the end of the file after the verify event block.
|
||||
|
||||
event "trigger-staging" {
|
||||
// This event is dispatched by the bob trigger-promotion command // and is required - do not delete.
|
||||
|
|
|
@ -8,7 +8,7 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
// The following levels are the only valid values for the `policy = "read"` stanza.
|
||||
// The following levels are the only valid values for the `policy = "read"` block.
|
||||
// When policies are merged together, the most privilege is granted, except for deny
|
||||
// which always takes precedence and supersedes.
|
||||
PolicyDeny = "deny"
|
||||
|
@ -20,7 +20,7 @@ const (
|
|||
|
||||
const (
|
||||
// The following are the fine-grained capabilities that can be granted within a namespace.
|
||||
// The Policy stanza is a short hand for granting several of these. When capabilities are
|
||||
// The Policy block is a short hand for granting several of these. When capabilities are
|
||||
// combined we take the union of all capabilities. If the deny capability is present, it
|
||||
// takes precedence and overwrites all other capabilities.
|
||||
|
||||
|
@ -54,7 +54,7 @@ var (
|
|||
|
||||
const (
|
||||
// The following are the fine-grained capabilities that can be granted for a volume set.
|
||||
// The Policy stanza is a short hand for granting several of these. When capabilities are
|
||||
// The Policy block is a short hand for granting several of these. When capabilities are
|
||||
// combined we take the union of all capabilities. If the deny capability is present, it
|
||||
// takes precedence and overwrites all other capabilities.
|
||||
|
||||
|
|
|
@ -40,7 +40,7 @@ func (c *Consul) MergeNamespace(namespace *string) {
|
|||
}
|
||||
}
|
||||
|
||||
// ConsulConnect represents a Consul Connect jobspec stanza.
|
||||
// ConsulConnect represents a Consul Connect jobspec block.
|
||||
type ConsulConnect struct {
|
||||
Native bool `hcl:"native,optional"`
|
||||
Gateway *ConsulGateway `hcl:"gateway,block"`
|
||||
|
@ -59,7 +59,7 @@ func (cc *ConsulConnect) Canonicalize() {
|
|||
}
|
||||
|
||||
// ConsulSidecarService represents a Consul Connect SidecarService jobspec
|
||||
// stanza.
|
||||
// block.
|
||||
type ConsulSidecarService struct {
|
||||
Tags []string `hcl:"tags,optional"`
|
||||
Port string `hcl:"port,optional"`
|
||||
|
@ -133,7 +133,7 @@ func (st *SidecarTask) Canonicalize() {
|
|||
}
|
||||
}
|
||||
|
||||
// ConsulProxy represents a Consul Connect sidecar proxy jobspec stanza.
|
||||
// ConsulProxy represents a Consul Connect sidecar proxy jobspec block.
|
||||
type ConsulProxy struct {
|
||||
LocalServiceAddress string `mapstructure:"local_service_address" hcl:"local_service_address,optional"`
|
||||
LocalServicePort int `mapstructure:"local_service_port" hcl:"local_service_port,optional"`
|
||||
|
@ -197,7 +197,7 @@ func (c *ConsulMeshGateway) Copy() *ConsulMeshGateway {
|
|||
}
|
||||
}
|
||||
|
||||
// ConsulUpstream represents a Consul Connect upstream jobspec stanza.
|
||||
// ConsulUpstream represents a Consul Connect upstream jobspec block.
|
||||
type ConsulUpstream struct {
|
||||
DestinationName string `mapstructure:"destination_name" hcl:"destination_name,optional"`
|
||||
DestinationNamespace string `mapstructure:"destination_namespace" hcl:"destination_namespace,optional"`
|
||||
|
|
|
@ -162,7 +162,7 @@ type SchedulerSetConfigurationResponse struct {
|
|||
}
|
||||
|
||||
// SchedulerAlgorithm is an enum string that encapsulates the valid options for a
|
||||
// SchedulerConfiguration stanza's SchedulerAlgorithm. These modes will allow the
|
||||
// SchedulerConfiguration block's SchedulerAlgorithm. These modes will allow the
|
||||
// scheduler to be user-selectable.
|
||||
type SchedulerAlgorithm string
|
||||
|
||||
|
|
|
@ -1052,7 +1052,7 @@ type TaskEvent struct {
|
|||
}
|
||||
|
||||
// CSIPluginType is an enum string that encapsulates the valid options for a
|
||||
// CSIPlugin stanza's Type. These modes will allow the plugin to be used in
|
||||
// CSIPlugin block's Type. These modes will allow the plugin to be used in
|
||||
// different ways by the client.
|
||||
type CSIPluginType string
|
||||
|
||||
|
|
|
@ -565,7 +565,7 @@ func TestTaskGroup_Merge_Update(t *testing.T) {
|
|||
}
|
||||
job.Canonicalize()
|
||||
|
||||
// Merge and canonicalize part of an update stanza
|
||||
// Merge and canonicalize part of an update block
|
||||
tg := &TaskGroup{
|
||||
Name: pointerOf("foo"),
|
||||
Update: &UpdateStrategy{
|
||||
|
@ -743,7 +743,7 @@ func TestTaskGroup_Canonicalize_MigrateStrategy(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
// TestSpread_Canonicalize asserts that the spread stanza is canonicalized correctly
|
||||
// TestSpread_Canonicalize asserts that the spread block is canonicalized correctly
|
||||
func TestSpread_Canonicalize(t *testing.T) {
|
||||
testutil.Parallel(t)
|
||||
|
||||
|
|
|
@ -39,7 +39,7 @@ var (
|
|||
// consulGRPCSocketHook creates Unix sockets to allow communication from inside a
|
||||
// netns to Consul gRPC endpoint.
|
||||
//
|
||||
// Noop for allocations without a group Connect stanza using bridge networking.
|
||||
// Noop for allocations without a group Connect block using bridge networking.
|
||||
type consulGRPCSocketHook struct {
|
||||
logger hclog.Logger
|
||||
|
||||
|
|
|
@ -171,7 +171,7 @@ func netModeToIsolationMode(netMode string) drivers.NetIsolationMode {
|
|||
func newNetworkConfigurator(log hclog.Logger, alloc *structs.Allocation, config *clientconfig.Config) (NetworkConfigurator, error) {
|
||||
tg := alloc.Job.LookupTaskGroup(alloc.TaskGroup)
|
||||
|
||||
// Check if network stanza is given
|
||||
// Check if network block is given
|
||||
if len(tg.Networks) == 0 {
|
||||
return &hostNetworkConfigurator{}, nil
|
||||
}
|
||||
|
|
|
@ -265,7 +265,7 @@ func (h *connectNativeHook) hostEnv(env map[string]string) map[string]string {
|
|||
func (h *connectNativeHook) maybeSetSITokenEnv(dir, task string, env map[string]string) error {
|
||||
if _, exists := env["CONSUL_HTTP_TOKEN"]; exists {
|
||||
// Consul token was already set - typically by using the Vault integration
|
||||
// and a template stanza to set the environment. Ignore the SI token as
|
||||
// and a template block to set the environment. Ignore the SI token as
|
||||
// the configured token takes precedence.
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -114,7 +114,7 @@ func TestConnectNativeHook_tlsEnv(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
// existing config from task env stanza
|
||||
// existing config from task env block
|
||||
taskEnv := map[string]string{
|
||||
"CONSUL_CACERT": "fakeCA.pem",
|
||||
"CONSUL_CLIENT_CERT": "fakeCert.pem",
|
||||
|
@ -490,7 +490,7 @@ func TestTaskRunner_ConnectNativeHook_shareTLS(t *testing.T) {
|
|||
request := &interfaces.TaskPrestartRequest{
|
||||
Task: tg.Tasks[0],
|
||||
TaskDir: allocDir.NewTaskDir(tg.Tasks[0].Name),
|
||||
TaskEnv: taskenv.NewEmptyTaskEnv(), // nothing set in env stanza
|
||||
TaskEnv: taskenv.NewEmptyTaskEnv(), // nothing set in env block
|
||||
}
|
||||
require.NoError(t, request.TaskDir.Build(false, nil))
|
||||
|
||||
|
@ -620,7 +620,7 @@ func TestTaskRunner_ConnectNativeHook_shareTLS_override(t *testing.T) {
|
|||
request := &interfaces.TaskPrestartRequest{
|
||||
Task: tg.Tasks[0],
|
||||
TaskDir: allocDir.NewTaskDir(tg.Tasks[0].Name),
|
||||
TaskEnv: taskEnv, // env stanza is configured w/ non-default tls configs
|
||||
TaskEnv: taskEnv, // env block is configured w/ non-default tls configs
|
||||
}
|
||||
require.NoError(t, request.TaskDir.Build(false, nil))
|
||||
|
||||
|
@ -634,7 +634,7 @@ func TestTaskRunner_ConnectNativeHook_shareTLS_override(t *testing.T) {
|
|||
require.True(t, response.Done)
|
||||
|
||||
// Assert environment variable for CONSUL_HTTP_SSL is set, because it was
|
||||
// the only one not overridden by task env stanza config
|
||||
// the only one not overridden by task env block config
|
||||
require.NotEmpty(t, response.Env)
|
||||
require.Equal(t, map[string]string{
|
||||
"CONSUL_HTTP_SSL": "true",
|
||||
|
|
|
@ -70,7 +70,7 @@ func (tr *TaskRunner) initHooks() {
|
|||
newDeviceHook(tr.devicemanager, hookLogger),
|
||||
}
|
||||
|
||||
// If the task has a CSI stanza, add the hook.
|
||||
// If the task has a CSI block, add the hook.
|
||||
if task.CSIPluginConfig != nil {
|
||||
tr.runnerHooks = append(tr.runnerHooks, newCSIPluginSupervisorHook(
|
||||
&csiPluginSupervisorHookConfig{
|
||||
|
@ -86,7 +86,7 @@ func (tr *TaskRunner) initHooks() {
|
|||
// If Vault is enabled, add the hook
|
||||
if task.Vault != nil {
|
||||
tr.runnerHooks = append(tr.runnerHooks, newVaultHook(&vaultHookConfig{
|
||||
vaultStanza: task.Vault,
|
||||
vaultBlock: task.Vault,
|
||||
client: tr.vaultClient,
|
||||
events: tr,
|
||||
lifecycle: tr,
|
||||
|
|
|
@ -45,7 +45,7 @@ func (tr *TaskRunner) updatedVaultToken(token string) {
|
|||
}
|
||||
|
||||
type vaultHookConfig struct {
|
||||
vaultStanza *structs.Vault
|
||||
vaultBlock *structs.Vault
|
||||
client vaultclient.VaultClient
|
||||
events ti.EventEmitter
|
||||
lifecycle ti.TaskLifecycle
|
||||
|
@ -56,8 +56,8 @@ type vaultHookConfig struct {
|
|||
}
|
||||
|
||||
type vaultHook struct {
|
||||
// vaultStanza is the vault stanza for the task
|
||||
vaultStanza *structs.Vault
|
||||
// vaultBlock is the vault block for the task
|
||||
vaultBlock *structs.Vault
|
||||
|
||||
// eventEmitter is used to emit events to the task
|
||||
eventEmitter ti.EventEmitter
|
||||
|
@ -97,7 +97,7 @@ type vaultHook struct {
|
|||
func newVaultHook(config *vaultHookConfig) *vaultHook {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
h := &vaultHook{
|
||||
vaultStanza: config.vaultStanza,
|
||||
vaultBlock: config.vaultBlock,
|
||||
client: config.client,
|
||||
eventEmitter: config.events,
|
||||
lifecycle: config.lifecycle,
|
||||
|
@ -239,9 +239,9 @@ OUTER:
|
|||
h.future.Set(token)
|
||||
|
||||
if updatedToken {
|
||||
switch h.vaultStanza.ChangeMode {
|
||||
switch h.vaultBlock.ChangeMode {
|
||||
case structs.VaultChangeModeSignal:
|
||||
s, err := signals.Parse(h.vaultStanza.ChangeSignal)
|
||||
s, err := signals.Parse(h.vaultBlock.ChangeSignal)
|
||||
if err != nil {
|
||||
h.logger.Error("failed to parse signal", "error", err)
|
||||
h.lifecycle.Kill(h.ctx,
|
||||
|
@ -252,7 +252,7 @@ OUTER:
|
|||
}
|
||||
|
||||
event := structs.NewTaskEvent(structs.TaskSignaling).SetTaskSignal(s).SetDisplayMessage("Vault: new Vault token acquired")
|
||||
if err := h.lifecycle.Signal(event, h.vaultStanza.ChangeSignal); err != nil {
|
||||
if err := h.lifecycle.Signal(event, h.vaultBlock.ChangeSignal); err != nil {
|
||||
h.logger.Error("failed to send signal", "error", err)
|
||||
h.lifecycle.Kill(h.ctx,
|
||||
structs.NewTaskEvent(structs.TaskKilling).
|
||||
|
@ -268,7 +268,7 @@ OUTER:
|
|||
case structs.VaultChangeModeNoop:
|
||||
fallthrough
|
||||
default:
|
||||
h.logger.Error("invalid Vault change mode", "mode", h.vaultStanza.ChangeMode)
|
||||
h.logger.Error("invalid Vault change mode", "mode", h.vaultBlock.ChangeMode)
|
||||
}
|
||||
|
||||
// We have handled it
|
||||
|
|
|
@ -18,7 +18,7 @@ type ServiceRegistrationHandler struct {
|
|||
cfg *ServiceRegistrationHandlerCfg
|
||||
|
||||
// checkWatcher watches checks of services in the Nomad service provider,
|
||||
// and restarts associated tasks in accordance with their check_restart stanza.
|
||||
// and restarts associated tasks in accordance with their check_restart block.
|
||||
checkWatcher serviceregistration.CheckWatcher
|
||||
|
||||
// registrationEnabled tracks whether this handler is enabled for
|
||||
|
@ -57,7 +57,7 @@ type ServiceRegistrationHandlerCfg struct {
|
|||
RPCFn func(method string, args, resp interface{}) error
|
||||
|
||||
// CheckWatcher watches checks of services in the Nomad service provider,
|
||||
// and restarts associated tasks in accordance with their check_restart stanza.
|
||||
// and restarts associated tasks in accordance with their check_restart block.
|
||||
CheckWatcher serviceregistration.CheckWatcher
|
||||
}
|
||||
|
||||
|
|
|
@ -20,13 +20,13 @@ type WorkloadServices struct {
|
|||
ProviderNamespace string
|
||||
|
||||
// Restarter allows restarting the task or task group depending on the
|
||||
// check_restart stanzas.
|
||||
// check_restart blocks.
|
||||
Restarter WorkloadRestarter
|
||||
|
||||
// Services and checks to register for the task.
|
||||
Services []*structs.Service
|
||||
|
||||
// Networks from the task's resources stanza.
|
||||
// Networks from the task's resources block.
|
||||
// TODO: remove and use Ports
|
||||
Networks structs.Networks
|
||||
|
||||
|
|
|
@ -408,7 +408,7 @@ func (c *Command) IsValidConfig(config, cmdConfig *Config) bool {
|
|||
}
|
||||
|
||||
if err := config.Client.Artifact.Validate(); err != nil {
|
||||
c.Ui.Error(fmt.Sprintf("client.artifact stanza invalid: %v", err))
|
||||
c.Ui.Error(fmt.Sprintf("client.artifact block invalid: %v", err))
|
||||
return false
|
||||
}
|
||||
|
||||
|
@ -1191,7 +1191,7 @@ func (c *Command) startupJoin(config *Config) error {
|
|||
new = len(config.Server.ServerJoin.StartJoin)
|
||||
}
|
||||
if old != 0 && new != 0 {
|
||||
return fmt.Errorf("server_join and start_join cannot both be defined; prefer setting the server_join stanza")
|
||||
return fmt.Errorf("server_join and start_join cannot both be defined; prefer setting the server_join block")
|
||||
}
|
||||
|
||||
// Nothing to do
|
||||
|
|
|
@ -398,7 +398,7 @@ func TestIsValidConfig(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
err: "client.artifact stanza invalid: http_read_timeout must be > 0",
|
||||
err: "client.artifact block invalid: http_read_timeout must be > 0",
|
||||
},
|
||||
}
|
||||
|
||||
|
|
|
@ -19,7 +19,7 @@ import (
|
|||
func newConnect(serviceID string, info structs.AllocInfo, serviceName string, nc *structs.ConsulConnect, networks structs.Networks, ports structs.AllocatedPorts) (*api.AgentServiceConnect, error) {
|
||||
switch {
|
||||
case nc == nil:
|
||||
// no connect stanza means there is no connect service to register
|
||||
// no connect block means there is no connect service to register
|
||||
return nil, nil
|
||||
|
||||
case nc.IsGateway():
|
||||
|
@ -57,7 +57,7 @@ func newConnectGateway(connect *structs.ConsulConnect) *api.AgentServiceConnectP
|
|||
|
||||
var envoyConfig map[string]interface{}
|
||||
|
||||
// Populate the envoy configuration from the gateway.proxy stanza, if
|
||||
// Populate the envoy configuration from the gateway.proxy block, if
|
||||
// such configuration is provided.
|
||||
if proxy := connect.Gateway.Proxy; proxy != nil {
|
||||
envoyConfig = make(map[string]interface{})
|
||||
|
@ -94,7 +94,7 @@ func newConnectGateway(connect *structs.ConsulConnect) *api.AgentServiceConnectP
|
|||
|
||||
func connectSidecarRegistration(serviceID string, info structs.AllocInfo, css *structs.ConsulSidecarService, networks structs.Networks, ports structs.AllocatedPorts) (*api.AgentServiceRegistration, error) {
|
||||
if css == nil {
|
||||
// no sidecar stanza means there is no sidecar service to register
|
||||
// no sidecar block means there is no sidecar service to register
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -87,7 +87,7 @@ const (
|
|||
|
||||
// Additional Consul ACLs required
|
||||
// - Consul Template: key:read
|
||||
// Used in tasks with template stanza that use Consul keys.
|
||||
// Used in tasks with template block that use Consul keys.
|
||||
|
||||
// CatalogAPI is the consul/api.Catalog API used by Nomad.
|
||||
//
|
||||
|
@ -1120,8 +1120,8 @@ func (c *ServiceClient) serviceRegs(
|
|||
Port: port,
|
||||
Meta: meta,
|
||||
TaggedAddresses: taggedAddresses,
|
||||
Connect: connect, // will be nil if no Connect stanza
|
||||
Proxy: gateway, // will be nil if no Connect Gateway stanza
|
||||
Connect: connect, // will be nil if no Connect block
|
||||
Proxy: gateway, // will be nil if no Connect Gateway block
|
||||
Checks: make([]*api.AgentServiceCheck, 0, len(service.Checks)),
|
||||
}
|
||||
ops.regServices = append(ops.regServices, serviceReg)
|
||||
|
|
|
@ -404,7 +404,7 @@ func (s *HTTPServer) jobUpdate(resp http.ResponseWriter, req *http.Request,
|
|||
if args.Job.Type != nil && *args.Job.Type == api.JobTypeSystem {
|
||||
for _, tg := range args.Job.TaskGroups {
|
||||
if tg.Scaling != nil {
|
||||
return nil, CodedError(400, "Task groups with job type system do not support scaling stanzas")
|
||||
return nil, CodedError(400, "Task groups with job type system do not support scaling blocks")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -661,7 +661,7 @@ func TestHTTP_jobUpdate_systemScaling(t *testing.T) {
|
|||
// Make the request
|
||||
obj, err := s.Server.JobSpecificRequest(respW, req)
|
||||
assert.Nil(t, obj)
|
||||
assert.Equal(t, CodedError(400, "Task groups with job type system do not support scaling stanzas"), err)
|
||||
assert.Equal(t, CodedError(400, "Task groups with job type system do not support scaling blocks"), err)
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -3517,7 +3517,7 @@ func TestJobs_Matching_Resources(t *testing.T) {
|
|||
}
|
||||
|
||||
// TestHTTP_JobValidate_SystemMigrate asserts that a system job with a migrate
|
||||
// stanza fails to validate but does not panic (see #5477).
|
||||
// block fails to validate but does not panic (see #5477).
|
||||
func TestHTTP_JobValidate_SystemMigrate(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
httpTest(t, nil, func(s *TestAgent) {
|
||||
|
@ -3534,7 +3534,7 @@ func TestHTTP_JobValidate_SystemMigrate(t *testing.T) {
|
|||
// System job...
|
||||
Type: pointer.Of("system"),
|
||||
|
||||
// ...with an empty migrate stanza
|
||||
// ...with an empty migrate block
|
||||
Migrate: &api.MigrateStrategy{},
|
||||
}
|
||||
|
||||
|
|
|
@ -2,11 +2,10 @@ package agent
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
golog "log"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
golog "log"
|
||||
|
||||
log "github.com/hashicorp/go-hclog"
|
||||
)
|
||||
|
||||
|
@ -56,25 +55,25 @@ type retryJoiner struct {
|
|||
}
|
||||
|
||||
// Validate ensures that the configuration passes validity checks for the
|
||||
// retry_join stanza. If the configuration is not valid, returns an error that
|
||||
// retry_join block. If the configuration is not valid, returns an error that
|
||||
// will be displayed to the operator, otherwise nil.
|
||||
func (r *retryJoiner) Validate(config *Config) error {
|
||||
|
||||
// If retry_join is defined for the server, ensure that deprecated
|
||||
// fields and the server_join stanza are not both set
|
||||
// fields and the server_join block are not both set
|
||||
if config.Server != nil && config.Server.ServerJoin != nil && len(config.Server.ServerJoin.RetryJoin) != 0 {
|
||||
if len(config.Server.RetryJoin) != 0 {
|
||||
return fmt.Errorf("server_join and retry_join cannot both be defined; prefer setting the server_join stanza")
|
||||
return fmt.Errorf("server_join and retry_join cannot both be defined; prefer setting the server_join block")
|
||||
}
|
||||
if len(config.Server.StartJoin) != 0 {
|
||||
return fmt.Errorf("server_join and start_join cannot both be defined; prefer setting the server_join stanza")
|
||||
return fmt.Errorf("server_join and start_join cannot both be defined; prefer setting the server_join block")
|
||||
}
|
||||
if config.Server.RetryMaxAttempts != 0 {
|
||||
return fmt.Errorf("server_join and retry_max cannot both be defined; prefer setting the server_join stanza")
|
||||
return fmt.Errorf("server_join and retry_max cannot both be defined; prefer setting the server_join block")
|
||||
}
|
||||
|
||||
if config.Server.RetryInterval != 0 {
|
||||
return fmt.Errorf("server_join and retry_interval cannot both be defined; prefer setting the server_join stanza")
|
||||
return fmt.Errorf("server_join and retry_interval cannot both be defined; prefer setting the server_join block")
|
||||
}
|
||||
|
||||
if len(config.Server.ServerJoin.StartJoin) != 0 {
|
||||
|
|
|
@ -222,7 +222,7 @@ func TestRetryJoin_Validate(t *testing.T) {
|
|||
},
|
||||
},
|
||||
isValid: false,
|
||||
reason: "server_join cannot be defined if retry_join is defined on the server stanza",
|
||||
reason: "server_join cannot be defined if retry_join is defined on the server block",
|
||||
},
|
||||
{
|
||||
config: &Config{
|
||||
|
@ -240,7 +240,7 @@ func TestRetryJoin_Validate(t *testing.T) {
|
|||
},
|
||||
},
|
||||
isValid: false,
|
||||
reason: "server_join cannot be defined if start_join is defined on the server stanza",
|
||||
reason: "server_join cannot be defined if start_join is defined on the server block",
|
||||
},
|
||||
{
|
||||
config: &Config{
|
||||
|
@ -258,7 +258,7 @@ func TestRetryJoin_Validate(t *testing.T) {
|
|||
},
|
||||
},
|
||||
isValid: false,
|
||||
reason: "server_join cannot be defined if retry_max_attempts is defined on the server stanza",
|
||||
reason: "server_join cannot be defined if retry_max_attempts is defined on the server block",
|
||||
},
|
||||
{
|
||||
config: &Config{
|
||||
|
@ -276,7 +276,7 @@ func TestRetryJoin_Validate(t *testing.T) {
|
|||
},
|
||||
},
|
||||
isValid: false,
|
||||
reason: "server_join cannot be defined if retry_interval is defined on the server stanza",
|
||||
reason: "server_join cannot be defined if retry_interval is defined on the server block",
|
||||
},
|
||||
{
|
||||
config: &Config{
|
||||
|
|
|
@ -884,11 +884,11 @@ FOUND:
|
|||
if len(hostVolumesOutput) > 1 {
|
||||
c.Ui.Output("Host Volumes:")
|
||||
c.Ui.Output(formatList(hostVolumesOutput))
|
||||
c.Ui.Output("") // line padding to next stanza
|
||||
c.Ui.Output("") // line padding to next block
|
||||
}
|
||||
if len(csiVolumesOutput) > 1 {
|
||||
c.Ui.Output("CSI Volumes:")
|
||||
c.Ui.Output(formatList(csiVolumesOutput))
|
||||
c.Ui.Output("") // line padding to next stanza
|
||||
c.Ui.Output("") // line padding to next block
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
# There can only be a single job definition per file. This job is named
|
||||
# "countdash" so it will create a job with the ID and Name "countdash".
|
||||
|
||||
# The "job" stanza is the top-most configuration option in the job
|
||||
# The "job" block is the top-most configuration option in the job
|
||||
# specification. A job is a declarative specification of tasks that Nomad
|
||||
# should run. Jobs have a globally unique name, one or many task groups, which
|
||||
# are themselves collections of one or many tasks.
|
||||
#
|
||||
# For more information and examples on the "job" stanza, please see
|
||||
# For more information and examples on the "job" block, please see
|
||||
# the online documentation at:
|
||||
#
|
||||
# https://www.nomadproject.io/docs/job-specification/job.html
|
||||
|
@ -31,11 +31,11 @@ job "countdash" {
|
|||
#
|
||||
type = "service"
|
||||
|
||||
# The "constraint" stanza defines additional constraints for placing this job,
|
||||
# in addition to any resource or driver constraints. This stanza may be placed
|
||||
# The "constraint" block defines additional constraints for placing this job,
|
||||
# in addition to any resource or driver constraints. This block may be placed
|
||||
# at the "job", "group", or "task" level, and supports variable interpolation.
|
||||
#
|
||||
# For more information and examples on the "constraint" stanza, please see
|
||||
# For more information and examples on the "constraint" block, please see
|
||||
# the online documentation at:
|
||||
#
|
||||
# https://www.nomadproject.io/docs/job-specification/constraint.html
|
||||
|
@ -45,14 +45,14 @@ job "countdash" {
|
|||
# value = "linux"
|
||||
# }
|
||||
|
||||
# The "update" stanza specifies the update strategy of task groups. The update
|
||||
# The "update" block specifies the update strategy of task groups. The update
|
||||
# strategy is used to control things like rolling upgrades, canaries, and
|
||||
# blue/green deployments. If omitted, no update strategy is enforced. The
|
||||
# "update" stanza may be placed at the job or task group. When placed at the
|
||||
# "update" block may be placed at the job or task group. When placed at the
|
||||
# job, it applies to all groups within the job. When placed at both the job and
|
||||
# group level, the stanzas are merged with the group's taking precedence.
|
||||
# group level, the blocks are merged with the group's taking precedence.
|
||||
#
|
||||
# For more information and examples on the "update" stanza, please see
|
||||
# For more information and examples on the "update" block, please see
|
||||
# the online documentation at:
|
||||
#
|
||||
# https://www.nomadproject.io/docs/job-specification/update.html
|
||||
|
@ -99,10 +99,10 @@ job "countdash" {
|
|||
# version is deployed and upon promotion the old version is stopped.
|
||||
canary = 0
|
||||
}
|
||||
# The migrate stanza specifies the group's strategy for migrating off of
|
||||
# The migrate block specifies the group's strategy for migrating off of
|
||||
# draining nodes. If omitted, a default migration strategy is applied.
|
||||
#
|
||||
# For more information on the "migrate" stanza, please see
|
||||
# For more information on the "migrate" block, please see
|
||||
# the online documentation at:
|
||||
#
|
||||
# https://www.nomadproject.io/docs/job-specification/migrate.html
|
||||
|
@ -127,11 +127,11 @@ job "countdash" {
|
|||
# is specified using a label suffix like "2m" or "1h".
|
||||
healthy_deadline = "5m"
|
||||
}
|
||||
# The "group" stanza defines a series of tasks that should be co-located on
|
||||
# The "group" block defines a series of tasks that should be co-located on
|
||||
# the same Nomad client. Any task within a group will be placed on the same
|
||||
# client.
|
||||
#
|
||||
# For more information and examples on the "group" stanza, please see
|
||||
# For more information and examples on the "group" block, please see
|
||||
# the online documentation at:
|
||||
#
|
||||
# https://www.nomadproject.io/docs/job-specification/group.html
|
||||
|
@ -142,10 +142,10 @@ job "countdash" {
|
|||
# to 1.
|
||||
count = 1
|
||||
|
||||
# The "restart" stanza configures a group's behavior on task failure. If
|
||||
# The "restart" block configures a group's behavior on task failure. If
|
||||
# left unspecified, a default restart policy is used based on the job type.
|
||||
#
|
||||
# For more information and examples on the "restart" stanza, please see
|
||||
# For more information and examples on the "restart" block, please see
|
||||
# the online documentation at:
|
||||
#
|
||||
# https://www.nomadproject.io/docs/job-specification/restart.html
|
||||
|
@ -166,12 +166,12 @@ job "countdash" {
|
|||
mode = "fail"
|
||||
}
|
||||
|
||||
# The "ephemeral_disk" stanza instructs Nomad to utilize an ephemeral disk
|
||||
# instead of a hard disk requirement. Clients using this stanza should
|
||||
# not specify disk requirements in the resources stanza of the task. All
|
||||
# The "ephemeral_disk" block instructs Nomad to utilize an ephemeral disk
|
||||
# instead of a hard disk requirement. Clients using this block should
|
||||
# not specify disk requirements in the resources block of the task. All
|
||||
# tasks in this group will share the same ephemeral disk.
|
||||
#
|
||||
# For more information and examples on the "ephemeral_disk" stanza, please
|
||||
# For more information and examples on the "ephemeral_disk" block, please
|
||||
# see the online documentation at:
|
||||
#
|
||||
# https://www.nomadproject.io/docs/job-specification/ephemeral_disk.html
|
||||
|
@ -192,10 +192,10 @@ job "countdash" {
|
|||
size = 300
|
||||
}
|
||||
|
||||
# The "affinity" stanza enables operators to express placement preferences
|
||||
# The "affinity" block enables operators to express placement preferences
|
||||
# based on node attributes or metadata.
|
||||
#
|
||||
# For more information and examples on the "affinity" stanza, please
|
||||
# For more information and examples on the "affinity" block, please
|
||||
# see the online documentation at:
|
||||
#
|
||||
# https://www.nomadproject.io/docs/job-specification/affinity.html
|
||||
|
@ -214,11 +214,11 @@ job "countdash" {
|
|||
# }
|
||||
|
||||
|
||||
# The "spread" stanza allows operators to increase the failure tolerance of
|
||||
# The "spread" block allows operators to increase the failure tolerance of
|
||||
# their applications by specifying a node attribute that allocations
|
||||
# should be spread over.
|
||||
#
|
||||
# For more information and examples on the "spread" stanza, please
|
||||
# For more information and examples on the "spread" block, please
|
||||
# see the online documentation at:
|
||||
#
|
||||
# https://www.nomadproject.io/docs/job-specification/spread.html
|
||||
|
@ -239,7 +239,7 @@ job "countdash" {
|
|||
# }
|
||||
# }
|
||||
|
||||
# The "network" stanza for a group creates a network namespace shared
|
||||
# The "network" block for a group creates a network namespace shared
|
||||
# by all tasks within the group.
|
||||
network {
|
||||
# "mode" is the CNI plugin used to configure the network namespace.
|
||||
|
@ -255,23 +255,23 @@ job "countdash" {
|
|||
# to = "8080"
|
||||
# }
|
||||
|
||||
# The "dns" stanza allows operators to override the DNS configuration
|
||||
# The "dns" block allows operators to override the DNS configuration
|
||||
# inherited by the host client.
|
||||
# dns {
|
||||
# servers = ["1.1.1.1"]
|
||||
# }
|
||||
}
|
||||
# The "service" stanza enables Consul Connect.
|
||||
# The "service" block enables Consul Connect.
|
||||
service {
|
||||
name = "count-api"
|
||||
|
||||
# The port in the service stanza is the port the service listens on.
|
||||
# The port in the service block is the port the service listens on.
|
||||
# The Envoy proxy will automatically route traffic to that port
|
||||
# inside the network namespace. If the application binds to localhost
|
||||
# on this port, the task needs no additional network configuration.
|
||||
port = "9001"
|
||||
|
||||
# The "check" stanza specifies a health check associated with the service.
|
||||
# The "check" block specifies a health check associated with the service.
|
||||
# This can be specified multiple times to define multiple checks for the
|
||||
# service. Note that checks run inside the task indicated by the "task"
|
||||
# field.
|
||||
|
@ -285,7 +285,7 @@ job "countdash" {
|
|||
# }
|
||||
|
||||
connect {
|
||||
# The "sidecar_service" stanza configures the Envoy sidecar admission
|
||||
# The "sidecar_service" block configures the Envoy sidecar admission
|
||||
# controller. For each task group with a sidecar_service, Nomad will
|
||||
# inject an Envoy task into the task group. A group network will be
|
||||
# required and a dynamic port will be registered for remote services
|
||||
|
@ -295,10 +295,10 @@ job "countdash" {
|
|||
sidecar_service {}
|
||||
}
|
||||
}
|
||||
# The "task" stanza creates an individual unit of work, such as a Docker
|
||||
# The "task" block creates an individual unit of work, such as a Docker
|
||||
# container, web application, or batch processing.
|
||||
#
|
||||
# For more information and examples on the "task" stanza, please see
|
||||
# For more information and examples on the "task" block, please see
|
||||
# the online documentation at:
|
||||
#
|
||||
# https://www.nomadproject.io/docs/job-specification/task.html
|
||||
|
@ -308,7 +308,7 @@ job "countdash" {
|
|||
# run the task.
|
||||
driver = "docker"
|
||||
|
||||
# The "config" stanza specifies the driver configuration, which is passed
|
||||
# The "config" block specifies the driver configuration, which is passed
|
||||
# directly to the driver to start the task. The details of configurations
|
||||
# are specific to each driver, so please see specific driver
|
||||
# documentation for more information.
|
||||
|
@ -321,13 +321,13 @@ job "countdash" {
|
|||
auth_soft_fail = true
|
||||
}
|
||||
|
||||
# The "artifact" stanza instructs Nomad to download an artifact from a
|
||||
# The "artifact" block instructs Nomad to download an artifact from a
|
||||
# remote source prior to starting the task. This provides a convenient
|
||||
# mechanism for downloading configuration files or data needed to run the
|
||||
# task. It is possible to specify the "artifact" stanza multiple times to
|
||||
# task. It is possible to specify the "artifact" block multiple times to
|
||||
# download multiple artifacts.
|
||||
#
|
||||
# For more information and examples on the "artifact" stanza, please see
|
||||
# For more information and examples on the "artifact" block, please see
|
||||
# the online documentation at:
|
||||
#
|
||||
# https://www.nomadproject.io/docs/job-specification/artifact.html
|
||||
|
@ -340,12 +340,12 @@ job "countdash" {
|
|||
# }
|
||||
|
||||
|
||||
# The "logs" stanza instructs the Nomad client on how many log files and
|
||||
# The "logs" block instructs the Nomad client on how many log files and
|
||||
# the maximum size of those logs files to retain. Logging is enabled by
|
||||
# default, but the "logs" stanza allows for finer-grained control over
|
||||
# default, but the "logs" block allows for finer-grained control over
|
||||
# the log rotation and storage configuration.
|
||||
#
|
||||
# For more information and examples on the "logs" stanza, please see
|
||||
# For more information and examples on the "logs" block, please see
|
||||
# the online documentation at:
|
||||
#
|
||||
# https://www.nomadproject.io/docs/job-specification/logs.html
|
||||
|
@ -355,12 +355,12 @@ job "countdash" {
|
|||
# max_file_size = 15
|
||||
# }
|
||||
|
||||
# The "resources" stanza describes the requirements a task needs to
|
||||
# The "resources" block describes the requirements a task needs to
|
||||
# execute. Resource requirements include memory, network, cpu, and more.
|
||||
# This ensures the task will execute on a machine that contains enough
|
||||
# resource capacity.
|
||||
#
|
||||
# For more information and examples on the "resources" stanza, please see
|
||||
# For more information and examples on the "resources" block, please see
|
||||
# the online documentation at:
|
||||
#
|
||||
# https://www.nomadproject.io/docs/job-specification/resources.html
|
||||
|
@ -372,13 +372,13 @@ job "countdash" {
|
|||
}
|
||||
|
||||
# The Envoy sidecar admission controller will inject an Envoy task into
|
||||
# any task group for each service with a sidecar_service stanza it contains.
|
||||
# any task group for each service with a sidecar_service block it contains.
|
||||
# A group network will be required and a dynamic port will be registered for
|
||||
# remote services to connect to Envoy with the name `connect-proxy-<service>`.
|
||||
# By default, Envoy will be run via its official upstream Docker image.
|
||||
#
|
||||
# There are two ways to modify the default behavior:
|
||||
# * Tasks can define a `sidecar_task` stanza in the `connect` stanza
|
||||
# * Tasks can define a `sidecar_task` block in the `connect` block
|
||||
# that merges into the default sidecar configuration.
|
||||
# * Add the `kind = "connect-proxy:<service>"` field to another task.
|
||||
# That task will be replace the default Envoy proxy task entirely.
|
||||
|
@ -406,7 +406,7 @@ job "countdash" {
|
|||
# }
|
||||
# }
|
||||
}
|
||||
# This job has a second "group" stanza to define tasks that might be placed
|
||||
# This job has a second "group" block to define tasks that might be placed
|
||||
# on a separate Nomad client from the group above.
|
||||
#
|
||||
group "dashboard" {
|
||||
|
@ -429,7 +429,7 @@ job "countdash" {
|
|||
connect {
|
||||
sidecar_service {
|
||||
proxy {
|
||||
# The upstreams stanza defines the remote service to access
|
||||
# The upstreams block defines the remote service to access
|
||||
# (count-api) and what port to expose that service on inside
|
||||
# the network namespace. This allows this task to reach the
|
||||
# upstream at localhost:8080.
|
||||
|
@ -440,7 +440,7 @@ job "countdash" {
|
|||
}
|
||||
}
|
||||
|
||||
# The `sidecar_task` stanza modifies the default configuration
|
||||
# The `sidecar_task` block modifies the default configuration
|
||||
# of the Envoy proxy task.
|
||||
# sidecar_task {
|
||||
# resources {
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
# There can only be a single job definition per file. This job is named
|
||||
# "example" so it will create a job with the ID and Name "example".
|
||||
|
||||
# The "job" stanza is the top-most configuration option in the job
|
||||
# The "job" block is the top-most configuration option in the job
|
||||
# specification. A job is a declarative specification of tasks that Nomad
|
||||
# should run. Jobs have a globally unique name, one or many task groups, which
|
||||
# are themselves collections of one or many tasks.
|
||||
#
|
||||
# For more information and examples on the "job" stanza, please see
|
||||
# For more information and examples on the "job" block, please see
|
||||
# the online documentation at:
|
||||
#
|
||||
# https://www.nomadproject.io/docs/job-specification/job
|
||||
|
@ -31,11 +31,11 @@ job "example" {
|
|||
#
|
||||
type = "service"
|
||||
|
||||
# The "constraint" stanza defines additional constraints for placing this job,
|
||||
# in addition to any resource or driver constraints. This stanza may be placed
|
||||
# The "constraint" block defines additional constraints for placing this job,
|
||||
# in addition to any resource or driver constraints. This block may be placed
|
||||
# at the "job", "group", or "task" level, and supports variable interpolation.
|
||||
#
|
||||
# For more information and examples on the "constraint" stanza, please see
|
||||
# For more information and examples on the "constraint" block, please see
|
||||
# the online documentation at:
|
||||
#
|
||||
# https://www.nomadproject.io/docs/job-specification/constraint
|
||||
|
@ -45,14 +45,14 @@ job "example" {
|
|||
# value = "linux"
|
||||
# }
|
||||
|
||||
# The "update" stanza specifies the update strategy of task groups. The update
|
||||
# The "update" block specifies the update strategy of task groups. The update
|
||||
# strategy is used to control things like rolling upgrades, canaries, and
|
||||
# blue/green deployments. If omitted, no update strategy is enforced. The
|
||||
# "update" stanza may be placed at the job or task group. When placed at the
|
||||
# "update" block may be placed at the job or task group. When placed at the
|
||||
# job, it applies to all groups within the job. When placed at both the job and
|
||||
# group level, the stanzas are merged with the group's taking precedence.
|
||||
# group level, the blocks are merged with the group's taking precedence.
|
||||
#
|
||||
# For more information and examples on the "update" stanza, please see
|
||||
# For more information and examples on the "update" block, please see
|
||||
# the online documentation at:
|
||||
#
|
||||
# https://www.nomadproject.io/docs/job-specification/update
|
||||
|
@ -99,10 +99,10 @@ job "example" {
|
|||
# version is deployed and upon promotion the old version is stopped.
|
||||
canary = 0
|
||||
}
|
||||
# The migrate stanza specifies the group's strategy for migrating off of
|
||||
# The migrate block specifies the group's strategy for migrating off of
|
||||
# draining nodes. If omitted, a default migration strategy is applied.
|
||||
#
|
||||
# For more information on the "migrate" stanza, please see
|
||||
# For more information on the "migrate" block, please see
|
||||
# the online documentation at:
|
||||
#
|
||||
# https://www.nomadproject.io/docs/job-specification/migrate
|
||||
|
@ -127,11 +127,11 @@ job "example" {
|
|||
# is specified using a label suffix like "2m" or "1h".
|
||||
healthy_deadline = "5m"
|
||||
}
|
||||
# The "group" stanza defines a series of tasks that should be co-located on
|
||||
# The "group" block defines a series of tasks that should be co-located on
|
||||
# the same Nomad client. Any task within a group will be placed on the same
|
||||
# client.
|
||||
#
|
||||
# For more information and examples on the "group" stanza, please see
|
||||
# For more information and examples on the "group" block, please see
|
||||
# the online documentation at:
|
||||
#
|
||||
# https://www.nomadproject.io/docs/job-specification/group
|
||||
|
@ -142,10 +142,10 @@ job "example" {
|
|||
# to 1.
|
||||
count = 1
|
||||
|
||||
# The "network" stanza specifies the network configuration for the allocation
|
||||
# The "network" block specifies the network configuration for the allocation
|
||||
# including requesting port bindings.
|
||||
#
|
||||
# For more information and examples on the "network" stanza, please see
|
||||
# For more information and examples on the "network" block, please see
|
||||
# the online documentation at:
|
||||
#
|
||||
# https://www.nomadproject.io/docs/job-specification/network
|
||||
|
@ -156,12 +156,12 @@ job "example" {
|
|||
}
|
||||
}
|
||||
|
||||
# The "service" stanza instructs Nomad to register this task as a service
|
||||
# The "service" block instructs Nomad to register this task as a service
|
||||
# in the service discovery engine, which is currently Nomad or Consul. This
|
||||
# will make the service discoverable after Nomad has placed it on a host and
|
||||
# port.
|
||||
#
|
||||
# For more information and examples on the "service" stanza, please see
|
||||
# For more information and examples on the "service" block, please see
|
||||
# the online documentation at:
|
||||
#
|
||||
# https://www.nomadproject.io/docs/job-specification/service
|
||||
|
@ -172,10 +172,10 @@ job "example" {
|
|||
port = "db"
|
||||
provider = "nomad"
|
||||
|
||||
# The "check" stanza instructs Nomad to create a Consul health check for
|
||||
# The "check" block instructs Nomad to create a Consul health check for
|
||||
# this service. A sample check is provided here for your convenience;
|
||||
# uncomment it to enable it. The "check" stanza is documented in the
|
||||
# "service" stanza documentation.
|
||||
# uncomment it to enable it. The "check" block is documented in the
|
||||
# "service" block documentation.
|
||||
|
||||
# check {
|
||||
# name = "alive"
|
||||
|
@ -186,10 +186,10 @@ job "example" {
|
|||
|
||||
}
|
||||
|
||||
# The "restart" stanza configures a group's behavior on task failure. If
|
||||
# The "restart" block configures a group's behavior on task failure. If
|
||||
# left unspecified, a default restart policy is used based on the job type.
|
||||
#
|
||||
# For more information and examples on the "restart" stanza, please see
|
||||
# For more information and examples on the "restart" block, please see
|
||||
# the online documentation at:
|
||||
#
|
||||
# https://www.nomadproject.io/docs/job-specification/restart
|
||||
|
@ -210,12 +210,12 @@ job "example" {
|
|||
mode = "fail"
|
||||
}
|
||||
|
||||
# The "ephemeral_disk" stanza instructs Nomad to utilize an ephemeral disk
|
||||
# instead of a hard disk requirement. Clients using this stanza should
|
||||
# not specify disk requirements in the resources stanza of the task. All
|
||||
# The "ephemeral_disk" block instructs Nomad to utilize an ephemeral disk
|
||||
# instead of a hard disk requirement. Clients using this block should
|
||||
# not specify disk requirements in the resources block of the task. All
|
||||
# tasks in this group will share the same ephemeral disk.
|
||||
#
|
||||
# For more information and examples on the "ephemeral_disk" stanza, please
|
||||
# For more information and examples on the "ephemeral_disk" block, please
|
||||
# see the online documentation at:
|
||||
#
|
||||
# https://www.nomadproject.io/docs/job-specification/ephemeral_disk
|
||||
|
@ -236,10 +236,10 @@ job "example" {
|
|||
size = 300
|
||||
}
|
||||
|
||||
# The "affinity" stanza enables operators to express placement preferences
|
||||
# The "affinity" block enables operators to express placement preferences
|
||||
# based on node attributes or metadata.
|
||||
#
|
||||
# For more information and examples on the "affinity" stanza, please
|
||||
# For more information and examples on the "affinity" block, please
|
||||
# see the online documentation at:
|
||||
#
|
||||
# https://www.nomadproject.io/docs/job-specification/affinity
|
||||
|
@ -260,11 +260,11 @@ job "example" {
|
|||
# }
|
||||
|
||||
|
||||
# The "spread" stanza allows operators to increase the failure tolerance of
|
||||
# The "spread" block allows operators to increase the failure tolerance of
|
||||
# their applications by specifying a node attribute that allocations
|
||||
# should be spread over.
|
||||
#
|
||||
# For more information and examples on the "spread" stanza, please
|
||||
# For more information and examples on the "spread" block, please
|
||||
# see the online documentation at:
|
||||
#
|
||||
# https://www.nomadproject.io/docs/job-specification/spread
|
||||
|
@ -285,10 +285,10 @@ job "example" {
|
|||
# }
|
||||
# }
|
||||
|
||||
# The "task" stanza creates an individual unit of work, such as a Docker
|
||||
# The "task" block creates an individual unit of work, such as a Docker
|
||||
# container, web application, or batch processing.
|
||||
#
|
||||
# For more information and examples on the "task" stanza, please see
|
||||
# For more information and examples on the "task" block, please see
|
||||
# the online documentation at:
|
||||
#
|
||||
# https://www.nomadproject.io/docs/job-specification/task
|
||||
|
@ -298,7 +298,7 @@ job "example" {
|
|||
# run the task.
|
||||
driver = "docker"
|
||||
|
||||
# The "config" stanza specifies the driver configuration, which is passed
|
||||
# The "config" block specifies the driver configuration, which is passed
|
||||
# directly to the driver to start the task. The details of configurations
|
||||
# are specific to each driver, so please see specific driver
|
||||
# documentation for more information.
|
||||
|
@ -312,13 +312,13 @@ job "example" {
|
|||
auth_soft_fail = true
|
||||
}
|
||||
|
||||
# The "artifact" stanza instructs Nomad to download an artifact from a
|
||||
# The "artifact" block instructs Nomad to download an artifact from a
|
||||
# remote source prior to starting the task. This provides a convenient
|
||||
# mechanism for downloading configuration files or data needed to run the
|
||||
# task. It is possible to specify the "artifact" stanza multiple times to
|
||||
# task. It is possible to specify the "artifact" block multiple times to
|
||||
# download multiple artifacts.
|
||||
#
|
||||
# For more information and examples on the "artifact" stanza, please see
|
||||
# For more information and examples on the "artifact" block, please see
|
||||
# the online documentation at:
|
||||
#
|
||||
# https://www.nomadproject.io/docs/job-specification/artifact
|
||||
|
@ -331,12 +331,12 @@ job "example" {
|
|||
# }
|
||||
|
||||
|
||||
# The "logs" stanza instructs the Nomad client on how many log files and
|
||||
# The "logs" block instructs the Nomad client on how many log files and
|
||||
# the maximum size of those logs files to retain. Logging is enabled by
|
||||
# default, but the "logs" stanza allows for finer-grained control over
|
||||
# default, but the "logs" block allows for finer-grained control over
|
||||
# the log rotation and storage configuration.
|
||||
#
|
||||
# For more information and examples on the "logs" stanza, please see
|
||||
# For more information and examples on the "logs" block, please see
|
||||
# the online documentation at:
|
||||
#
|
||||
# https://www.nomadproject.io/docs/job-specification/logs
|
||||
|
@ -346,12 +346,12 @@ job "example" {
|
|||
# max_file_size = 15
|
||||
# }
|
||||
|
||||
# The "resources" stanza describes the requirements a task needs to
|
||||
# The "resources" block describes the requirements a task needs to
|
||||
# execute. Resource requirements include memory, cpu, and more.
|
||||
# This ensures the task will execute on a machine that contains enough
|
||||
# resource capacity.
|
||||
#
|
||||
# For more information and examples on the "resources" stanza, please see
|
||||
# For more information and examples on the "resources" block, please see
|
||||
# the online documentation at:
|
||||
#
|
||||
# https://www.nomadproject.io/docs/job-specification/resources
|
||||
|
@ -362,11 +362,11 @@ job "example" {
|
|||
}
|
||||
|
||||
|
||||
# The "template" stanza instructs Nomad to manage a template, such as
|
||||
# The "template" block instructs Nomad to manage a template, such as
|
||||
# a configuration file or script. This template can optionally pull data
|
||||
# from Consul or Vault to populate runtime configuration data.
|
||||
#
|
||||
# For more information and examples on the "template" stanza, please see
|
||||
# For more information and examples on the "template" block, please see
|
||||
# the online documentation at:
|
||||
#
|
||||
# https://www.nomadproject.io/docs/job-specification/template
|
||||
|
@ -378,7 +378,7 @@ job "example" {
|
|||
# change_signal = "SIGHUP"
|
||||
# }
|
||||
|
||||
# The "template" stanza can also be used to create environment variables
|
||||
# The "template" block can also be used to create environment variables
|
||||
# for tasks that prefer those to config files. The task will be restarted
|
||||
# when data pulled from Consul or Vault changes.
|
||||
#
|
||||
|
@ -388,14 +388,14 @@ job "example" {
|
|||
# env = true
|
||||
# }
|
||||
|
||||
# The "vault" stanza instructs the Nomad client to acquire a token from
|
||||
# The "vault" block instructs the Nomad client to acquire a token from
|
||||
# a HashiCorp Vault server. The Nomad servers must be configured and
|
||||
# authorized to communicate with Vault. By default, Nomad will inject
|
||||
# The token into the job via an environment variable and make the token
|
||||
# available to the "template" stanza. The Nomad client handles the renewal
|
||||
# available to the "template" block. The Nomad client handles the renewal
|
||||
# and revocation of the Vault token.
|
||||
#
|
||||
# For more information and examples on the "vault" stanza, please see
|
||||
# For more information and examples on the "vault" block, please see
|
||||
# the online documentation at:
|
||||
#
|
||||
# https://www.nomadproject.io/docs/job-specification/vault
|
||||
|
|
|
@ -29,7 +29,7 @@ Usage: nomad deployment <subcommand> [options] [args]
|
|||
$ nomad deployment promote <deployment-id>
|
||||
|
||||
Mark a deployment as failed. This will stop new allocations from being placed
|
||||
and if the job's upgrade stanza specifies auto_revert, causes the job to
|
||||
and if the job's upgrade block specifies auto_revert, causes the job to
|
||||
revert back to the last stable version of the job:
|
||||
|
||||
$ nomad deployment fail <deployment-id>
|
||||
|
|
|
@ -100,13 +100,13 @@ Plan Options:
|
|||
-vault-token
|
||||
Used to validate if the user submitting the job has permission to run the job
|
||||
according to its Vault policies. A Vault token must be supplied if the vault
|
||||
stanza allow_unauthenticated is disabled in the Nomad server configuration.
|
||||
block allow_unauthenticated is disabled in the Nomad server configuration.
|
||||
If the -vault-token flag is set, the passed Vault token is added to the jobspec
|
||||
before sending to the Nomad servers. This allows passing the Vault token
|
||||
without storing it in the job file. This overrides the token found in the
|
||||
$VAULT_TOKEN environment variable and the vault_token field in the job file.
|
||||
This token is cleared from the job after validating and cannot be used within
|
||||
the job executing environment. Use the vault stanza when templating in a job
|
||||
the job executing environment. Use the vault block when templating in a job
|
||||
with a Vault token.
|
||||
|
||||
-vault-namespace
|
||||
|
|
|
@ -121,13 +121,13 @@ Run Options:
|
|||
-vault-token
|
||||
Used to validate if the user submitting the job has permission to run the job
|
||||
according to its Vault policies. A Vault token must be supplied if the vault
|
||||
stanza allow_unauthenticated is disabled in the Nomad server configuration.
|
||||
block allow_unauthenticated is disabled in the Nomad server configuration.
|
||||
If the -vault-token flag is set, the passed Vault token is added to the jobspec
|
||||
before sending to the Nomad servers. This allows passing the Vault token
|
||||
without storing it in the job file. This overrides the token found in the
|
||||
$VAULT_TOKEN environment variable and the vault_token field in the job file.
|
||||
This token is cleared from the job after validating and cannot be used within
|
||||
the job executing environment. Use the vault stanza when templating in a job
|
||||
the job executing environment. Use the vault block when templating in a job
|
||||
with a Vault token.
|
||||
|
||||
-vault-namespace
|
||||
|
|
|
@ -58,13 +58,13 @@ Validate Options:
|
|||
-vault-token
|
||||
Used to validate if the user submitting the job has permission to run the job
|
||||
according to its Vault policies. A Vault token must be supplied if the vault
|
||||
stanza allow_unauthenticated is disabled in the Nomad server configuration.
|
||||
block allow_unauthenticated is disabled in the Nomad server configuration.
|
||||
If the -vault-token flag is set, the passed Vault token is added to the jobspec
|
||||
before sending to the Nomad servers. This allows passing the Vault token
|
||||
without storing it in the job file. This overrides the token found in the
|
||||
$VAULT_TOKEN environment variable and the vault_token field in the job file.
|
||||
This token is cleared from the job after validating and cannot be used within
|
||||
the job executing environment. Use the vault stanza when templating in a job
|
||||
the job executing environment. Use the vault block when templating in a job
|
||||
with a Vault token.
|
||||
|
||||
-vault-namespace
|
||||
|
|
|
@ -31,7 +31,7 @@ Usage: nomad var <subcommand> [options] [args]
|
|||
|
||||
This command groups subcommands for interacting with variables. Variables
|
||||
allow operators to provide credentials and otherwise sensitive material to
|
||||
Nomad jobs at runtime via the template stanza or directly through
|
||||
Nomad jobs at runtime via the template block or directly through
|
||||
the Nomad API and CLI.
|
||||
|
||||
Users can create new variables; list, inspect, and delete existing
|
||||
|
|
|
@ -21,7 +21,7 @@ Refer to the official plugin
|
|||
|
||||
* `--endpoint=${CSI_ENDPOINT}`: if you don't use the `CSI_ENDPOINT`
|
||||
environment variable, this option must match the `mount_dir`
|
||||
specified in the `csi_plugin` stanza for the task.
|
||||
specified in the `csi_plugin` block for the task.
|
||||
|
||||
* `--nodeid=${node.unique.id}`: a unique ID for the node the task is running
|
||||
on.
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
|
||||
The containers that run the Node/Controller applications require a cloud-config file be mounted in the containers and the path specified in the containers `args`.
|
||||
|
||||
The example plugin job creates a file at `local/cloud.conf` using a [`template`](https://www.nomadproject.io/docs/job-specification/template) stanza which pulls the necessary credentials from a [Vault kv-v2](https://www.vaultproject.io/docs/secrets/kv/kv-v2) secrets store. However, other methods, such as using the [`artifact`](https://www.nomadproject.io/docs/job-specification/artifact) stanza, will work as well for delivering the `cloud.conf` file to the CSI drivers.
|
||||
The example plugin job creates a file at `local/cloud.conf` using a [`template`](https://www.nomadproject.io/docs/job-specification/template) block which pulls the necessary credentials from a [Vault kv-v2](https://www.vaultproject.io/docs/secrets/kv/kv-v2) secrets store. However, other methods, such as using the [`artifact`](https://www.nomadproject.io/docs/job-specification/artifact) block, will work as well for delivering the `cloud.conf` file to the CSI drivers.
|
||||
|
||||
### Example cloud.conf
|
||||
|
||||
|
@ -26,7 +26,7 @@ The Cinder CSI Node task requires that [`privileged = true`](https://www.nomadpr
|
|||
|
||||
* `--endpoint=${CSI_ENDPOINT}`: If you don't use the `CSI_ENDPOINT`
|
||||
environment variable, this option must match the `mount_dir`
|
||||
specified in the `csi_plugin` stanza for the task.
|
||||
specified in the `csi_plugin` block for the task.
|
||||
|
||||
* `--cloud-config=/etc/config/cloud.conf`: The location that the
|
||||
cloud.conf file was mounted inside the container
|
||||
|
|
|
@ -134,7 +134,7 @@ job "kadalu-csi-controller" {
|
|||
}
|
||||
|
||||
mount {
|
||||
# If you are not using gluster native quota comment out this stanza
|
||||
# If you are not using gluster native quota comment out this block
|
||||
type = "bind"
|
||||
source = "./${NOMAD_SECRETS_DIR}/ssh-privatekey"
|
||||
target = "/etc/secret-volume/ssh-privatekey"
|
||||
|
|
|
@ -1128,7 +1128,7 @@ func (d *Driver) createContainerConfig(task *drivers.TaskConfig, driverConfig *T
|
|||
if mapping, ok := task.Resources.Ports.Get(port); ok {
|
||||
ports.add(mapping.Label, mapping.HostIP, mapping.Value, mapping.To)
|
||||
} else {
|
||||
return c, fmt.Errorf("Port %q not found, check network stanza", port)
|
||||
return c, fmt.Errorf("Port %q not found, check network block", port)
|
||||
}
|
||||
}
|
||||
case len(task.Resources.NomadResources.Networks) > 0:
|
||||
|
|
|
@ -69,7 +69,7 @@ func (tc *OnUpdateChecksTest) TestOnUpdateCheck_IgnoreWarning_IgnoreErrors(f *fr
|
|||
}
|
||||
|
||||
// TestOnUpdate_CheckRestart ensures that a service check set to ignore
|
||||
// warnings still follows the check_restart stanza if the task becomes
|
||||
// warnings still follows the check_restart block if the task becomes
|
||||
// unhealthy after a deployment is successful. on_update_check_restart has a
|
||||
// script check that should report as a warning status for the deployment to
|
||||
// become healthy. The script check then reports unhealthy and the
|
||||
|
|
|
@ -107,7 +107,7 @@ job: {{ env "NOMAD_JOB_NAME" }}
|
|||
_, err := tc.Consul().KV().Delete(key, nil)
|
||||
f.NoError(err)
|
||||
|
||||
// Parse job so we can replace the template stanza with isolated keys
|
||||
// Parse job so we can replace the template block with isolated keys
|
||||
job, err := jobspec.ParseFile("consultemplate/input/templating.nomad")
|
||||
f.NoError(err)
|
||||
job.ID = &jobID
|
||||
|
|
|
@ -56,7 +56,7 @@ func Parse(r io.Reader) (*api.Job, error) {
|
|||
// Parse the job out
|
||||
matches := list.Filter("job")
|
||||
if len(matches.Items) == 0 {
|
||||
return nil, fmt.Errorf("'job' stanza not found")
|
||||
return nil, fmt.Errorf("'job' block not found")
|
||||
}
|
||||
if err := parseJob(&job, matches); err != nil {
|
||||
return nil, fmt.Errorf("error parsing 'job': %s", err)
|
||||
|
|
|
@ -128,7 +128,7 @@ func parseJob(result *api.Job, list *ast.ObjectList) error {
|
|||
}
|
||||
}
|
||||
|
||||
// If we have a reschedule stanza, then parse that
|
||||
// If we have a reschedule block, then parse that
|
||||
if o := listVal.Filter("reschedule"); len(o.Items) > 0 {
|
||||
if err := parseReschedulePolicy(&result.Reschedule, o); err != nil {
|
||||
return multierror.Prefix(err, "reschedule ->")
|
||||
|
|
|
@ -53,7 +53,7 @@ func ParseNetwork(o *ast.ObjectList) (*api.NetworkResource, error) {
|
|||
// Filter dns
|
||||
if dns := networkObj.Filter("dns"); len(dns.Items) > 0 {
|
||||
if len(dns.Items) > 1 {
|
||||
return nil, multierror.Prefix(fmt.Errorf("cannot have more than 1 dns stanza"), "network ->")
|
||||
return nil, multierror.Prefix(fmt.Errorf("cannot have more than 1 dns block"), "network ->")
|
||||
}
|
||||
|
||||
d, err := parseDNS(dns.Items[0])
|
||||
|
|
|
@ -105,7 +105,7 @@ func parseService(o *ast.ObjectItem) (*api.Service, error) {
|
|||
// Filter connect
|
||||
if co := listVal.Filter("connect"); len(co.Items) > 0 {
|
||||
if len(co.Items) > 1 {
|
||||
return nil, fmt.Errorf("connect '%s': cannot have more than 1 connect stanza", service.Name)
|
||||
return nil, fmt.Errorf("connect '%s': cannot have more than 1 connect block", service.Name)
|
||||
}
|
||||
c, err := parseConnect(co.Items[0])
|
||||
if err != nil {
|
||||
|
@ -290,7 +290,7 @@ func parseGateway(o *ast.ObjectItem) (*api.ConsulGateway, error) {
|
|||
// extract and parse the ingress block
|
||||
if io := listVal.Filter("ingress"); len(io.Items) > 0 {
|
||||
if len(io.Items) > 1 {
|
||||
return nil, fmt.Errorf("ingress, %s", "multiple ingress stanzas not allowed")
|
||||
return nil, fmt.Errorf("ingress, %s", "multiple ingress blocks not allowed")
|
||||
}
|
||||
|
||||
ingress, err := parseIngressConfigEntry(io.Items[0])
|
||||
|
@ -302,7 +302,7 @@ func parseGateway(o *ast.ObjectItem) (*api.ConsulGateway, error) {
|
|||
|
||||
if to := listVal.Filter("terminating"); len(to.Items) > 0 {
|
||||
if len(to.Items) > 1 {
|
||||
return nil, fmt.Errorf("terminating, %s", "multiple terminating stanzas not allowed")
|
||||
return nil, fmt.Errorf("terminating, %s", "multiple terminating blocks not allowed")
|
||||
}
|
||||
|
||||
terminating, err := parseTerminatingConfigEntry(to.Items[0])
|
||||
|
@ -314,7 +314,7 @@ func parseGateway(o *ast.ObjectItem) (*api.ConsulGateway, error) {
|
|||
|
||||
if mo := listVal.Filter("mesh"); len(mo.Items) > 0 {
|
||||
if len(mo.Items) > 1 {
|
||||
return nil, fmt.Errorf("mesh, %s", "multiple mesh stanzas not allowed")
|
||||
return nil, fmt.Errorf("mesh, %s", "multiple mesh blocks not allowed")
|
||||
}
|
||||
|
||||
// mesh should have no keys
|
||||
|
@ -1033,7 +1033,7 @@ func parseChecks(service *api.Service, checkObjs *ast.ObjectList) error {
|
|||
return err
|
||||
}
|
||||
|
||||
// HCL allows repeating stanzas so merge 'header' into a single
|
||||
// HCL allows repeating blocks so merge 'header' into a single
|
||||
// map[string][]string.
|
||||
if headerI, ok := cm["header"]; ok {
|
||||
headerRaw, ok := headerI.([]map[string]interface{})
|
||||
|
|
|
@ -154,7 +154,7 @@ func parseTask(item *ast.ObjectItem, keys []string) (*api.Task, error) {
|
|||
|
||||
if o := listVal.Filter("csi_plugin"); len(o.Items) > 0 {
|
||||
if len(o.Items) != 1 {
|
||||
return nil, fmt.Errorf("csi_plugin -> Expected single stanza, got %d", len(o.Items))
|
||||
return nil, fmt.Errorf("csi_plugin -> Expected single block, got %d", len(o.Items))
|
||||
}
|
||||
i := o.Elem().Items[0]
|
||||
|
||||
|
@ -497,7 +497,7 @@ func parseTemplates(result *[]*api.Template, list *ast.ObjectList) error {
|
|||
if o := listVal.Filter("change_script"); len(o.Items) > 0 {
|
||||
if len(o.Items) != 1 {
|
||||
return fmt.Errorf(
|
||||
"change_script -> expected single stanza, got %d", len(o.Items),
|
||||
"change_script -> expected single block, got %d", len(o.Items),
|
||||
)
|
||||
}
|
||||
var m map[string]interface{}
|
||||
|
|
|
@ -612,7 +612,7 @@ func (w *deploymentWatcher) handleAllocUpdate(allocs []*structs.AllocListStub) (
|
|||
continue
|
||||
}
|
||||
|
||||
// Determine if the update stanza for this group is progress based
|
||||
// Determine if the update block for this group is progress based
|
||||
progressBased := dstate.ProgressDeadline != 0
|
||||
|
||||
// Check if the allocation has failed and we need to mark it for allow
|
||||
|
|
|
@ -540,7 +540,7 @@ func (n *nomadFSM) applyUpsertJob(msgType structs.MessageType, buf []byte, index
|
|||
* un-intended destructive updates in scheduler since we use
|
||||
* reflect.DeepEqual. Starting Nomad 0.4.1, job submission sanitizes
|
||||
* the incoming job.
|
||||
* - Migrate from old style upgrade stanza that used only a stagger.
|
||||
* - Migrate from old style upgrade block that used only a stagger.
|
||||
*/
|
||||
req.Job.Canonicalize()
|
||||
|
||||
|
@ -1523,7 +1523,7 @@ func (n *nomadFSM) restoreImpl(old io.ReadCloser, filter *FSMFilter) error {
|
|||
* - Empty maps and slices should be treated as nil to avoid
|
||||
* un-intended destructive updates in scheduler since we use
|
||||
* reflect.DeepEqual. Job submission sanitizes the incoming job.
|
||||
* - Migrate from old style upgrade stanza that used only a stagger.
|
||||
* - Migrate from old style upgrade block that used only a stagger.
|
||||
*/
|
||||
job.Canonicalize()
|
||||
if err := restore.JobRestore(job); err != nil {
|
||||
|
|
|
@ -332,7 +332,7 @@ func groupConnectHook(job *structs.Job, g *structs.TaskGroup) error {
|
|||
task := newConnectGatewayTask(prefix, service.Name, netHost, customizedTLS)
|
||||
g.Tasks = append(g.Tasks, task)
|
||||
|
||||
// the connect.sidecar_task stanza can also be used to configure
|
||||
// the connect.sidecar_task block can also be used to configure
|
||||
// a custom task to use as a gateway proxy
|
||||
if service.Connect.SidecarTask != nil {
|
||||
service.Connect.SidecarTask.MergeIntoTask(task)
|
||||
|
|
|
@ -138,7 +138,7 @@ func (jobCanonicalizer) Mutate(job *structs.Job) (*structs.Job, []error, error)
|
|||
}
|
||||
|
||||
// jobImpliedConstraints adds constraints to a job implied by other job fields
|
||||
// and stanzas.
|
||||
// and blocks.
|
||||
type jobImpliedConstraints struct{}
|
||||
|
||||
func (jobImpliedConstraints) Name() string {
|
||||
|
|
|
@ -829,7 +829,7 @@ func TestJobEndpoint_Register_ConnectWithSidecarTask(t *testing.T) {
|
|||
require.Equal("connect-proxy:backend", string(sidecarTask.Kind))
|
||||
require.Equal("connect-proxy-backend", out.TaskGroups[0].Networks[0].DynamicPorts[0].Label)
|
||||
|
||||
// Check that the correct fields were overridden from the sidecar_task stanza
|
||||
// Check that the correct fields were overridden from the sidecar_task block
|
||||
require.Equal("test", sidecarTask.Meta["source"])
|
||||
require.Equal(500, sidecarTask.Resources.CPU)
|
||||
require.Equal(connectSidecarResources().MemoryMB, sidecarTask.Resources.MemoryMB)
|
||||
|
@ -6316,7 +6316,7 @@ func TestJobEndpoint_Plan_NoDiff(t *testing.T) {
|
|||
}
|
||||
|
||||
// TestJobEndpoint_Plan_Scaling asserts that the plan endpoint handles
|
||||
// jobs with scaling stanza
|
||||
// jobs with scaling block
|
||||
func TestJobEndpoint_Plan_Scaling(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
|
|
|
@ -9,7 +9,7 @@ import (
|
|||
"github.com/hashicorp/nomad/helper/pointer"
|
||||
)
|
||||
|
||||
// ArtifactConfig is the configuration specific to the Artifact stanza
|
||||
// ArtifactConfig is the configuration specific to the Artifact block
|
||||
type ArtifactConfig struct {
|
||||
// HTTPReadTimeout is the duration in which a download must complete or
|
||||
// it will be canceled. Defaults to 30m.
|
||||
|
|
|
@ -20,11 +20,11 @@ const CSISocketName = "csi.sock"
|
|||
// where Nomad will expect plugins to create intermediary mounts for volumes.
|
||||
const CSIIntermediaryDirname = "volumes"
|
||||
|
||||
// VolumeTypeCSI is the type in the volume stanza of a TaskGroup
|
||||
// VolumeTypeCSI is the type in the volume block of a TaskGroup
|
||||
const VolumeTypeCSI = "csi"
|
||||
|
||||
// CSIPluginType is an enum string that encapsulates the valid options for a
|
||||
// CSIPlugin stanza's Type. These modes will allow the plugin to be used in
|
||||
// CSIPlugin block's Type. These modes will allow the plugin to be used in
|
||||
// different ways by the client.
|
||||
type CSIPluginType string
|
||||
|
||||
|
|
|
@ -497,9 +497,9 @@ func incIP(ip net.IP) {
|
|||
}
|
||||
|
||||
// AssignPorts based on an ask from the scheduler processing a group.network
|
||||
// stanza. Supports multi-interfaces through node configured host_networks.
|
||||
// block. Supports multi-interfaces through node configured host_networks.
|
||||
//
|
||||
// AssignTaskNetwork supports the deprecated task.resources.network stanza.
|
||||
// AssignTaskNetwork supports the deprecated task.resources.network block.
|
||||
func (idx *NetworkIndex) AssignPorts(ask *NetworkResource) (AllocatedPorts, error) {
|
||||
var offer AllocatedPorts
|
||||
|
||||
|
|
|
@ -135,7 +135,7 @@ func (a *AutopilotConfig) Copy() *AutopilotConfig {
|
|||
}
|
||||
|
||||
// SchedulerAlgorithm is an enum string that encapsulates the valid options for a
|
||||
// SchedulerConfiguration stanza's SchedulerAlgorithm. These modes will allow the
|
||||
// SchedulerConfiguration block's SchedulerAlgorithm. These modes will allow the
|
||||
// scheduler to be user-selectable.
|
||||
type SchedulerAlgorithm string
|
||||
|
||||
|
|
|
@ -84,7 +84,7 @@ func (sc *ServiceCheck) IsReadiness() bool {
|
|||
return sc != nil && sc.OnUpdate == OnUpdateIgnore
|
||||
}
|
||||
|
||||
// Copy the stanza recursively. Returns nil if nil.
|
||||
// Copy the block recursively. Returns nil if nil.
|
||||
func (sc *ServiceCheck) Copy() *ServiceCheck {
|
||||
if sc == nil {
|
||||
return nil
|
||||
|
@ -595,7 +595,7 @@ type Service struct {
|
|||
Provider string
|
||||
}
|
||||
|
||||
// Copy the stanza recursively. Returns nil if nil.
|
||||
// Copy the block recursively. Returns nil if nil.
|
||||
func (s *Service) Copy() *Service {
|
||||
if s == nil {
|
||||
return nil
|
||||
|
@ -953,7 +953,7 @@ func (s *Service) Equal(o *Service) bool {
|
|||
return true
|
||||
}
|
||||
|
||||
// ConsulConnect represents a Consul Connect jobspec stanza.
|
||||
// ConsulConnect represents a Consul Connect jobspec block.
|
||||
type ConsulConnect struct {
|
||||
// Native indicates whether the service is Consul Connect Native enabled.
|
||||
Native bool
|
||||
|
@ -968,7 +968,7 @@ type ConsulConnect struct {
|
|||
Gateway *ConsulGateway
|
||||
}
|
||||
|
||||
// Copy the stanza recursively. Returns nil if nil.
|
||||
// Copy the block recursively. Returns nil if nil.
|
||||
func (c *ConsulConnect) Copy() *ConsulConnect {
|
||||
if c == nil {
|
||||
return nil
|
||||
|
@ -1085,7 +1085,7 @@ func (c *ConsulConnect) Validate() error {
|
|||
}
|
||||
|
||||
// ConsulSidecarService represents a Consul Connect SidecarService jobspec
|
||||
// stanza.
|
||||
// block.
|
||||
type ConsulSidecarService struct {
|
||||
// Tags are optional service tags that get registered with the sidecar service
|
||||
// in Consul. If unset, the sidecar service inherits the parent service tags.
|
||||
|
@ -1095,7 +1095,7 @@ type ConsulSidecarService struct {
|
|||
// a port label or a literal port number.
|
||||
Port string
|
||||
|
||||
// Proxy stanza defining the sidecar proxy configuration.
|
||||
// Proxy block defining the sidecar proxy configuration.
|
||||
Proxy *ConsulProxy
|
||||
|
||||
// DisableDefaultTCPCheck, if true, instructs Nomad to avoid setting a
|
||||
|
@ -1108,7 +1108,7 @@ func (s *ConsulSidecarService) HasUpstreams() bool {
|
|||
return s != nil && s.Proxy != nil && len(s.Proxy.Upstreams) > 0
|
||||
}
|
||||
|
||||
// Copy the stanza recursively. Returns nil if nil.
|
||||
// Copy the block recursively. Returns nil if nil.
|
||||
func (s *ConsulSidecarService) Copy() *ConsulSidecarService {
|
||||
if s == nil {
|
||||
return nil
|
||||
|
@ -1143,7 +1143,7 @@ func (s *ConsulSidecarService) Equal(o *ConsulSidecarService) bool {
|
|||
}
|
||||
|
||||
// SidecarTask represents a subset of Task fields that are able to be overridden
|
||||
// from the sidecar_task stanza
|
||||
// from the sidecar_task block
|
||||
type SidecarTask struct {
|
||||
// Name of the task
|
||||
Name string
|
||||
|
@ -1337,7 +1337,7 @@ func (t *SidecarTask) MergeIntoTask(task *Task) {
|
|||
}
|
||||
}
|
||||
|
||||
// ConsulProxy represents a Consul Connect sidecar proxy jobspec stanza.
|
||||
// ConsulProxy represents a Consul Connect sidecar proxy jobspec block.
|
||||
type ConsulProxy struct {
|
||||
|
||||
// LocalServiceAddress is the address the local service binds to.
|
||||
|
@ -1354,7 +1354,7 @@ type ConsulProxy struct {
|
|||
// connect to.
|
||||
Upstreams []ConsulUpstream
|
||||
|
||||
// Expose configures the consul proxy.expose stanza to "open up" endpoints
|
||||
// Expose configures the consul proxy.expose block to "open up" endpoints
|
||||
// used by task-group level service checks using HTTP or gRPC protocols.
|
||||
//
|
||||
// Use json tag to match with field name in api/
|
||||
|
@ -1365,7 +1365,7 @@ type ConsulProxy struct {
|
|||
Config map[string]interface{}
|
||||
}
|
||||
|
||||
// Copy the stanza recursively. Returns nil if nil.
|
||||
// Copy the block recursively. Returns nil if nil.
|
||||
func (p *ConsulProxy) Copy() *ConsulProxy {
|
||||
if p == nil {
|
||||
return nil
|
||||
|
@ -1460,7 +1460,7 @@ func (c *ConsulMeshGateway) Validate() error {
|
|||
}
|
||||
}
|
||||
|
||||
// ConsulUpstream represents a Consul Connect upstream jobspec stanza.
|
||||
// ConsulUpstream represents a Consul Connect upstream jobspec block.
|
||||
type ConsulUpstream struct {
|
||||
// DestinationName is the name of the upstream service.
|
||||
DestinationName string
|
||||
|
@ -1524,7 +1524,7 @@ func upstreamsEquals(a, b []ConsulUpstream) bool {
|
|||
return setA.Equal(setB)
|
||||
}
|
||||
|
||||
// ConsulExposeConfig represents a Consul Connect expose jobspec stanza.
|
||||
// ConsulExposeConfig represents a Consul Connect expose jobspec block.
|
||||
type ConsulExposeConfig struct {
|
||||
// Use json tag to match with field name in api/
|
||||
Paths []ConsulExposePath `json:"Path"`
|
||||
|
@ -1541,7 +1541,7 @@ func exposePathsEqual(a, b []ConsulExposePath) bool {
|
|||
return helper.SliceSetEq(a, b)
|
||||
}
|
||||
|
||||
// Copy the stanza. Returns nil if e is nil.
|
||||
// Copy the block. Returns nil if e is nil.
|
||||
func (e *ConsulExposeConfig) Copy() *ConsulExposeConfig {
|
||||
if e == nil {
|
||||
return nil
|
||||
|
|
|
@ -490,7 +490,7 @@ func TestConsulConnect_Validate(t *testing.T) {
|
|||
|
||||
c := &ConsulConnect{}
|
||||
|
||||
// An empty Connect stanza is invalid
|
||||
// An empty Connect block is invalid
|
||||
require.Error(t, c.Validate())
|
||||
|
||||
c.Native = true
|
||||
|
@ -1004,7 +1004,7 @@ func TestConsulGateway_Equal_ingress(t *testing.T) {
|
|||
require.True(t, modifiable.Equal(modifiable))
|
||||
}
|
||||
|
||||
// proxy stanza equality checks
|
||||
// proxy block equality checks
|
||||
|
||||
t.Run("mod gateway timeout", func(t *testing.T) {
|
||||
try(t, func(g *cg) { g.Proxy.ConnectTimeout = pointer.Of(9 * time.Second) })
|
||||
|
@ -1090,7 +1090,7 @@ func TestConsulGateway_Equal_terminating(t *testing.T) {
|
|||
require.True(t, modifiable.Equal(modifiable))
|
||||
}
|
||||
|
||||
// proxy stanza equality checks
|
||||
// proxy block equality checks
|
||||
|
||||
t.Run("mod dns discovery type", func(t *testing.T) {
|
||||
try(t, func(g *cg) { g.Proxy.EnvoyDNSDiscoveryType = "LOGICAL_DNS" })
|
||||
|
|
|
@ -2690,7 +2690,7 @@ func (p AllocatedPorts) Get(label string) (AllocatedPortMapping, bool) {
|
|||
}
|
||||
|
||||
type Port struct {
|
||||
// Label is the key for HCL port stanzas: port "foo" {}
|
||||
// Label is the key for HCL port blocks: port "foo" {}
|
||||
Label string
|
||||
|
||||
// Value is the static or dynamic port value. For dynamic ports this
|
||||
|
@ -3002,7 +3002,7 @@ type NodeResources struct {
|
|||
|
||||
// Networks is the node's bridge network and default interface. It is
|
||||
// only used when scheduling jobs with a deprecated
|
||||
// task.resources.network stanza.
|
||||
// task.resources.network block.
|
||||
Networks Networks
|
||||
|
||||
// MinDynamicPort and MaxDynamicPort represent the inclusive port range
|
||||
|
@ -4235,7 +4235,7 @@ type Job struct {
|
|||
TaskGroups []*TaskGroup
|
||||
|
||||
// See agent.ApiJobToStructJob
|
||||
// Update provides defaults for the TaskGroup Update stanzas
|
||||
// Update provides defaults for the TaskGroup Update blocks
|
||||
Update UpdateStrategy
|
||||
|
||||
Multiregion *Multiregion
|
||||
|
@ -4294,7 +4294,7 @@ type Job struct {
|
|||
// of a deployment and can be manually set via APIs. This field is updated
|
||||
// when the status of a corresponding deployment transitions to Failed
|
||||
// or Successful. This field is not meaningful for jobs that don't have an
|
||||
// update stanza.
|
||||
// update block.
|
||||
Stable bool
|
||||
|
||||
// Version is a monotonically increasing version number that is incremented
|
||||
|
@ -4460,7 +4460,7 @@ func (j *Job) Validate() error {
|
|||
}
|
||||
if j.Type == JobTypeSystem {
|
||||
if j.Affinities != nil {
|
||||
mErr.Errors = append(mErr.Errors, fmt.Errorf("System jobs may not have an affinity stanza"))
|
||||
mErr.Errors = append(mErr.Errors, fmt.Errorf("System jobs may not have an affinity block"))
|
||||
}
|
||||
} else {
|
||||
for idx, affinity := range j.Affinities {
|
||||
|
@ -4473,7 +4473,7 @@ func (j *Job) Validate() error {
|
|||
|
||||
if j.Type == JobTypeSystem {
|
||||
if j.Spreads != nil {
|
||||
mErr.Errors = append(mErr.Errors, fmt.Errorf("System jobs may not have a spread stanza"))
|
||||
mErr.Errors = append(mErr.Errors, fmt.Errorf("System jobs may not have a spread block"))
|
||||
}
|
||||
} else {
|
||||
for idx, spread := range j.Spreads {
|
||||
|
@ -6492,7 +6492,7 @@ func (tg *TaskGroup) Validate(j *Job) error {
|
|||
}
|
||||
if j.Type == JobTypeSystem {
|
||||
if tg.Affinities != nil {
|
||||
mErr.Errors = append(mErr.Errors, fmt.Errorf("System jobs may not have an affinity stanza"))
|
||||
mErr.Errors = append(mErr.Errors, fmt.Errorf("System jobs may not have an affinity block"))
|
||||
}
|
||||
} else {
|
||||
for idx, affinity := range tg.Affinities {
|
||||
|
@ -6513,7 +6513,7 @@ func (tg *TaskGroup) Validate(j *Job) error {
|
|||
|
||||
if j.Type == JobTypeSystem {
|
||||
if tg.Spreads != nil {
|
||||
mErr.Errors = append(mErr.Errors, fmt.Errorf("System jobs may not have a spread stanza"))
|
||||
mErr.Errors = append(mErr.Errors, fmt.Errorf("System jobs may not have a spread block"))
|
||||
}
|
||||
} else {
|
||||
for idx, spread := range tg.Spreads {
|
||||
|
@ -7389,7 +7389,7 @@ func (t *Task) Validate(ephemeralDisk *EphemeralDisk, jobType string, tgServices
|
|||
|
||||
if jobType == JobTypeSystem {
|
||||
if t.Affinities != nil {
|
||||
mErr.Errors = append(mErr.Errors, fmt.Errorf("System jobs may not have an affinity stanza"))
|
||||
mErr.Errors = append(mErr.Errors, fmt.Errorf("System jobs may not have an affinity block"))
|
||||
}
|
||||
} else {
|
||||
for idx, affinity := range t.Affinities {
|
||||
|
@ -7459,9 +7459,9 @@ func (t *Task) Validate(ephemeralDisk *EphemeralDisk, jobType string, tgServices
|
|||
|
||||
// Validation for TaskKind field which is used for Consul Connect integration
|
||||
if t.Kind.IsConnectProxy() {
|
||||
// This task is a Connect proxy so it should not have service stanzas
|
||||
// This task is a Connect proxy so it should not have service blocks
|
||||
if len(t.Services) > 0 {
|
||||
mErr.Errors = append(mErr.Errors, fmt.Errorf("Connect proxy task must not have a service stanza"))
|
||||
mErr.Errors = append(mErr.Errors, fmt.Errorf("Connect proxy task must not have a service block"))
|
||||
}
|
||||
if t.Leader {
|
||||
mErr.Errors = append(mErr.Errors, fmt.Errorf("Connect proxy task must not have leader set"))
|
||||
|
@ -7654,7 +7654,7 @@ func (t *Task) Warnings() error {
|
|||
|
||||
// Validate the resources
|
||||
if t.Resources != nil && t.Resources.IOPS != 0 {
|
||||
mErr.Errors = append(mErr.Errors, fmt.Errorf("IOPS has been deprecated as of Nomad 0.9.0. Please remove IOPS from resource stanza."))
|
||||
mErr.Errors = append(mErr.Errors, fmt.Errorf("IOPS has been deprecated as of Nomad 0.9.0. Please remove IOPS from resource block."))
|
||||
}
|
||||
|
||||
if t.Resources != nil && len(t.Resources.Networks) != 0 {
|
||||
|
@ -7981,7 +7981,7 @@ func (t *Template) Warnings() error {
|
|||
|
||||
// Deprecation notice for vault_grace
|
||||
if t.VaultGrace != 0 {
|
||||
mErr.Errors = append(mErr.Errors, fmt.Errorf("VaultGrace has been deprecated as of Nomad 0.11 and ignored since Vault 0.5. Please remove VaultGrace / vault_grace from template stanza."))
|
||||
mErr.Errors = append(mErr.Errors, fmt.Errorf("VaultGrace has been deprecated as of Nomad 0.11 and ignored since Vault 0.5. Please remove VaultGrace / vault_grace from template block."))
|
||||
}
|
||||
|
||||
return mErr.ErrorOrNil()
|
||||
|
@ -9151,7 +9151,7 @@ func (s *Spread) Validate() error {
|
|||
mErr.Errors = append(mErr.Errors, errors.New("Missing spread attribute"))
|
||||
}
|
||||
if s.Weight <= 0 || s.Weight > 100 {
|
||||
mErr.Errors = append(mErr.Errors, errors.New("Spread stanza must have a positive weight from 0 to 100"))
|
||||
mErr.Errors = append(mErr.Errors, errors.New("Spread block must have a positive weight from 0 to 100"))
|
||||
}
|
||||
seen := make(map[string]struct{})
|
||||
sumPercent := uint32(0)
|
||||
|
|
|
@ -180,7 +180,7 @@ func TestJob_Warnings(t *testing.T) {
|
|||
Expected []string
|
||||
}{
|
||||
{
|
||||
Name: "Higher counts for update stanza",
|
||||
Name: "Higher counts for update block",
|
||||
Expected: []string{"max parallel count is greater"},
|
||||
Job: &Job{
|
||||
Type: JobTypeService,
|
||||
|
@ -237,7 +237,7 @@ func TestJob_Warnings(t *testing.T) {
|
|||
},
|
||||
{
|
||||
Name: "Template.VaultGrace Deprecated",
|
||||
Expected: []string{"VaultGrace has been deprecated as of Nomad 0.11 and ignored since Vault 0.5. Please remove VaultGrace / vault_grace from template stanza."},
|
||||
Expected: []string{"VaultGrace has been deprecated as of Nomad 0.11 and ignored since Vault 0.5. Please remove VaultGrace / vault_grace from template block."},
|
||||
Job: &Job{
|
||||
Type: JobTypeService,
|
||||
TaskGroups: []*TaskGroup{
|
||||
|
@ -577,7 +577,7 @@ func TestJob_SystemJob_Validate(t *testing.T) {
|
|||
}}
|
||||
err = j.Validate()
|
||||
require.NotNil(t, err)
|
||||
require.Contains(t, err.Error(), "System jobs may not have an affinity stanza")
|
||||
require.Contains(t, err.Error(), "System jobs may not have an affinity block")
|
||||
|
||||
// Add spread at job and task group level, that should fail validation
|
||||
j.Spreads = []*Spread{{
|
||||
|
@ -591,7 +591,7 @@ func TestJob_SystemJob_Validate(t *testing.T) {
|
|||
|
||||
err = j.Validate()
|
||||
require.NotNil(t, err)
|
||||
require.Contains(t, err.Error(), "System jobs may not have a spread stanza")
|
||||
require.Contains(t, err.Error(), "System jobs may not have a spread block")
|
||||
|
||||
}
|
||||
|
||||
|
@ -2400,7 +2400,7 @@ func TestTask_Validate_ConnectProxyKind(t *testing.T) {
|
|||
Service: &Service{
|
||||
Name: "redis",
|
||||
},
|
||||
ErrContains: "Connect proxy task must not have a service stanza",
|
||||
ErrContains: "Connect proxy task must not have a service block",
|
||||
},
|
||||
{
|
||||
Desc: "Leader should not be set",
|
||||
|
@ -2425,7 +2425,7 @@ func TestTask_Validate_ConnectProxyKind(t *testing.T) {
|
|||
ErrContains: `No Connect services in task group with Connect proxy ("redis")`,
|
||||
},
|
||||
{
|
||||
Desc: "Connect stanza not configured in group",
|
||||
Desc: "Connect block not configured in group",
|
||||
Kind: "connect-proxy:redis",
|
||||
TgService: []*Service{{
|
||||
Name: "redis",
|
||||
|
@ -6536,7 +6536,7 @@ func TestSpread_Validate(t *testing.T) {
|
|||
Attribute: "${node.datacenter}",
|
||||
Weight: -1,
|
||||
},
|
||||
err: fmt.Errorf("Spread stanza must have a positive weight from 0 to 100"),
|
||||
err: fmt.Errorf("Spread block must have a positive weight from 0 to 100"),
|
||||
name: "Invalid weight",
|
||||
},
|
||||
{
|
||||
|
@ -6544,7 +6544,7 @@ func TestSpread_Validate(t *testing.T) {
|
|||
Attribute: "${node.datacenter}",
|
||||
Weight: 110,
|
||||
},
|
||||
err: fmt.Errorf("Spread stanza must have a positive weight from 0 to 100"),
|
||||
err: fmt.Errorf("Spread block must have a positive weight from 0 to 100"),
|
||||
name: "Invalid weight",
|
||||
},
|
||||
{
|
||||
|
|
|
@ -7,7 +7,7 @@ The example device plugin models files within a specified directory as devices.
|
|||
|
||||
# Config
|
||||
|
||||
The configuration should be passed via an HCL file that begins with a top level `config` stanza:
|
||||
The configuration should be passed via an HCL file that begins with a top level `config` block:
|
||||
|
||||
```
|
||||
config {
|
||||
|
|
|
@ -405,7 +405,7 @@ type LinuxResources struct {
|
|||
// and thus the calculation for CPUQuota cannot be done on the client.
|
||||
// This is a capatability and should only be used by docker until the docker
|
||||
// specific options are deprecated in favor of exposes CPUPeriod and
|
||||
// CPUQuota at the task resource stanza.
|
||||
// CPUQuota at the task resource block.
|
||||
PercentTicks float64
|
||||
}
|
||||
|
||||
|
|
|
@ -308,7 +308,7 @@ func (m *TaskConfigSchemaRequest) XXX_DiscardUnknown() {
|
|||
var xxx_messageInfo_TaskConfigSchemaRequest proto.InternalMessageInfo
|
||||
|
||||
type TaskConfigSchemaResponse struct {
|
||||
// Spec is the configuration schema for the job driver config stanza
|
||||
// Spec is the configuration schema for the job driver config block
|
||||
Spec *hclspec.Spec `protobuf:"bytes,1,opt,name=spec,proto3" json:"spec,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
|
|
|
@ -91,7 +91,7 @@ message TaskConfigSchemaRequest {}
|
|||
|
||||
message TaskConfigSchemaResponse {
|
||||
|
||||
// Spec is the configuration schema for the job driver config stanza
|
||||
// Spec is the configuration schema for the job driver config block
|
||||
hashicorp.nomad.plugins.shared.hclspec.Spec spec = 1;
|
||||
}
|
||||
|
||||
|
|
|
@ -492,7 +492,7 @@ func (s *GenericScheduler) downgradedJobForPlacement(p placementResult) (string,
|
|||
}
|
||||
}
|
||||
|
||||
// check if the non-promoted version is a job without update stanza. This version should be the latest "stable" version,
|
||||
// check if the non-promoted version is a job without update block. This version should be the latest "stable" version,
|
||||
// as all subsequent versions must be canaried deployments. Otherwise, we would have found a deployment above,
|
||||
// or the alloc would have been replaced already by a newer non-deployment job.
|
||||
if job, err := s.state.JobByIDAndVersion(nil, ns, jobID, p.MinJobVersion()); err == nil && job != nil && job.Update.IsEmpty() {
|
||||
|
|
|
@ -9,7 +9,7 @@ import (
|
|||
|
||||
// maxParallelPenalty is a score penalty applied to allocations to mitigate against
|
||||
// too many allocations of the same job being preempted. This penalty is applied after the
|
||||
// number of allocations being preempted exceeds max_parallel value in the job's migrate stanza
|
||||
// number of allocations being preempted exceeds max_parallel value in the job's migrate block
|
||||
const maxParallelPenalty = 50.0
|
||||
|
||||
type groupedAllocs struct {
|
||||
|
|
|
@ -98,13 +98,13 @@ func (p *propertySet) setConstraint(constraint *structs.Constraint, taskGroup st
|
|||
}
|
||||
|
||||
// SetTargetAttribute is used to populate this property set without also storing allowed count
|
||||
// This is used when evaluating spread stanzas
|
||||
// This is used when evaluating spread blocks
|
||||
func (p *propertySet) SetTargetAttribute(targetAttribute string, taskGroup string) {
|
||||
p.setTargetAttributeWithCount(targetAttribute, 0, taskGroup)
|
||||
}
|
||||
|
||||
// setTargetAttributeWithCount is a shared helper for setting a job or task group attribute and allowedCount
|
||||
// allowedCount can be zero when this is used in evaluating spread stanzas
|
||||
// allowedCount can be zero when this is used in evaluating spread blocks
|
||||
func (p *propertySet) setTargetAttributeWithCount(targetAttribute string, allowedCount uint64, taskGroup string) {
|
||||
// Store that this is for a task group
|
||||
if taskGroup != "" {
|
||||
|
|
|
@ -1421,8 +1421,8 @@ func TestReconciler_MultiTG(t *testing.T) {
|
|||
}
|
||||
|
||||
// Tests the reconciler properly handles jobs with multiple task groups with
|
||||
// only one having an update stanza and a deployment already being created
|
||||
func TestReconciler_MultiTG_SingleUpdateStanza(t *testing.T) {
|
||||
// only one having an update block and a deployment already being created
|
||||
func TestReconciler_MultiTG_SingleUpdateBlock(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
job := mock.Job()
|
||||
|
@ -1957,7 +1957,7 @@ func TestReconciler_RescheduleNow_Service(t *testing.T) {
|
|||
tgName := job.TaskGroups[0].Name
|
||||
now := time.Now()
|
||||
|
||||
// Set up reschedule policy and update stanza
|
||||
// Set up reschedule policy and update block
|
||||
job.TaskGroups[0].ReschedulePolicy = &structs.ReschedulePolicy{
|
||||
Attempts: 1,
|
||||
Interval: 24 * time.Hour,
|
||||
|
@ -2040,7 +2040,7 @@ func TestReconciler_RescheduleNow_WithinAllowedTimeWindow(t *testing.T) {
|
|||
tgName := job.TaskGroups[0].Name
|
||||
now := time.Now()
|
||||
|
||||
// Set up reschedule policy and update stanza
|
||||
// Set up reschedule policy and update block
|
||||
job.TaskGroups[0].ReschedulePolicy = &structs.ReschedulePolicy{
|
||||
Attempts: 1,
|
||||
Interval: 24 * time.Hour,
|
||||
|
@ -2122,7 +2122,7 @@ func TestReconciler_RescheduleNow_EvalIDMatch(t *testing.T) {
|
|||
tgName := job.TaskGroups[0].Name
|
||||
now := time.Now()
|
||||
|
||||
// Set up reschedule policy and update stanza
|
||||
// Set up reschedule policy and update block
|
||||
job.TaskGroups[0].ReschedulePolicy = &structs.ReschedulePolicy{
|
||||
Attempts: 1,
|
||||
Interval: 24 * time.Hour,
|
||||
|
@ -2206,7 +2206,7 @@ func TestReconciler_RescheduleNow_Service_WithCanaries(t *testing.T) {
|
|||
tgName := job.TaskGroups[0].Name
|
||||
now := time.Now()
|
||||
|
||||
// Set up reschedule policy and update stanza
|
||||
// Set up reschedule policy and update block
|
||||
job.TaskGroups[0].ReschedulePolicy = &structs.ReschedulePolicy{
|
||||
Attempts: 1,
|
||||
Interval: 24 * time.Hour,
|
||||
|
@ -2317,7 +2317,7 @@ func TestReconciler_RescheduleNow_Service_Canaries(t *testing.T) {
|
|||
tgName := job.TaskGroups[0].Name
|
||||
now := time.Now()
|
||||
|
||||
// Set up reschedule policy and update stanza
|
||||
// Set up reschedule policy and update block
|
||||
job.TaskGroups[0].ReschedulePolicy = &structs.ReschedulePolicy{
|
||||
Delay: 5 * time.Second,
|
||||
DelayFunction: "constant",
|
||||
|
@ -2445,7 +2445,7 @@ func TestReconciler_RescheduleNow_Service_Canaries_Limit(t *testing.T) {
|
|||
tgName := job.TaskGroups[0].Name
|
||||
now := time.Now()
|
||||
|
||||
// Set up reschedule policy and update stanza
|
||||
// Set up reschedule policy and update block
|
||||
job.TaskGroups[0].ReschedulePolicy = &structs.ReschedulePolicy{
|
||||
Attempts: 1,
|
||||
Interval: 24 * time.Hour,
|
||||
|
@ -4989,7 +4989,7 @@ func TestReconciler_ForceReschedule_Service(t *testing.T) {
|
|||
job.TaskGroups[0].Count = 5
|
||||
tgName := job.TaskGroups[0].Name
|
||||
|
||||
// Set up reschedule policy and update stanza
|
||||
// Set up reschedule policy and update block
|
||||
job.TaskGroups[0].ReschedulePolicy = &structs.ReschedulePolicy{
|
||||
Attempts: 1,
|
||||
Interval: 24 * time.Hour,
|
||||
|
@ -5068,7 +5068,7 @@ func TestReconciler_RescheduleNot_Service(t *testing.T) {
|
|||
tgName := job.TaskGroups[0].Name
|
||||
now := time.Now()
|
||||
|
||||
// Set up reschedule policy and update stanza
|
||||
// Set up reschedule policy and update block
|
||||
job.TaskGroups[0].ReschedulePolicy = &structs.ReschedulePolicy{
|
||||
Attempts: 0,
|
||||
Interval: 24 * time.Hour,
|
||||
|
|
|
@ -27,7 +27,7 @@ type SpreadIterator struct {
|
|||
tgSpreadInfo map[string]spreadAttributeMap
|
||||
|
||||
// sumSpreadWeights tracks the total weight across all spread
|
||||
// stanzas
|
||||
// blocks
|
||||
sumSpreadWeights int32
|
||||
|
||||
// hasSpread is used to early return when the job/task group
|
||||
|
@ -248,7 +248,7 @@ func (iter *SpreadIterator) computeSpreadInfo(tg *structs.TaskGroup) {
|
|||
spreadInfos := make(spreadAttributeMap, len(tg.Spreads))
|
||||
totalCount := tg.Count
|
||||
|
||||
// Always combine any spread stanzas defined at the job level here
|
||||
// Always combine any spread blocks defined at the job level here
|
||||
combinedSpreads := make([]*structs.Spread, 0, len(tg.Spreads)+len(iter.jobSpreads))
|
||||
combinedSpreads = append(combinedSpreads, tg.Spreads...)
|
||||
combinedSpreads = append(combinedSpreads, iter.jobSpreads...)
|
||||
|
|
|
@ -420,10 +420,10 @@ func NewGenericStack(batch bool, ctx Context) *GenericStack {
|
|||
// node where the allocation failed previously
|
||||
s.nodeReschedulingPenalty = NewNodeReschedulingPenaltyIterator(ctx, s.jobAntiAff)
|
||||
|
||||
// Apply scores based on affinity stanza
|
||||
// Apply scores based on affinity block
|
||||
s.nodeAffinity = NewNodeAffinityIterator(ctx, s.nodeReschedulingPenalty)
|
||||
|
||||
// Apply scores based on spread stanza
|
||||
// Apply scores based on spread block
|
||||
s.spread = NewSpreadIterator(ctx, s.nodeAffinity)
|
||||
|
||||
// Add the preemption options scoring iterator
|
||||
|
|
|
@ -615,7 +615,7 @@ func consulNamespaceUpdated(tgA, tgB *structs.TaskGroup) bool {
|
|||
return tgA.Consul.GetNamespace() != tgB.Consul.GetNamespace()
|
||||
}
|
||||
|
||||
// connectServiceUpdated returns true if any services with a connect stanza have
|
||||
// connectServiceUpdated returns true if any services with a connect block have
|
||||
// been changed in such a way that requires a destructive update.
|
||||
//
|
||||
// Ordinary services can be updated in-place by updating the service definition
|
||||
|
|
|
@ -261,7 +261,7 @@ The `Job` object supports the following keys:
|
|||
token and is not stored after job submission.
|
||||
|
||||
- `VaultToken` - Specifies the Vault token that proves the submitter of the job
|
||||
has access to the specified policies in the `vault` stanza. This field is
|
||||
has access to the specified policies in the `vault` block. This field is
|
||||
only used to transfer the token and is not stored after job submission.
|
||||
|
||||
- `Namespace` - The namespace to execute the job in, defaults to "default".
|
||||
|
@ -303,7 +303,7 @@ The `Job` object supports the following keys:
|
|||
- `Update` - Specifies an update strategy to be applied to all task groups
|
||||
within the job. When specified both at the job level and the task group level,
|
||||
the update blocks are merged with the task group's taking precedence. For more
|
||||
details on the update stanza, please see below.
|
||||
details on the update block, please see below.
|
||||
|
||||
- `Periodic` - `Periodic` allows the job to be scheduled at fixed times, dates
|
||||
or intervals. The periodic expression is always evaluated in the UTC
|
||||
|
@ -403,7 +403,7 @@ attributes:
|
|||
- `Update` - Specifies an update strategy to be applied to all task groups
|
||||
within the job. When specified both at the job level and the task group level,
|
||||
the update blocks are merged with the task group's taking precedence. For more
|
||||
details on the update stanza, please see below.
|
||||
details on the update block, please see below.
|
||||
|
||||
- `Tasks` - A list of `Task` object that are part of the task group.
|
||||
|
||||
|
@ -569,7 +569,7 @@ The `Task` object supports the following keys:
|
|||
- `PortLabel`: Specifies the label of the port on which the check will
|
||||
be performed. Note this is the _label_ of the port and not the port
|
||||
number unless `AddressMode: "driver"`. The port label must match one
|
||||
defined in the Network stanza. If a port value was declared on the
|
||||
defined in the Network block. If a port value was declared on the
|
||||
`Service`, this will inherit from that value if not supplied. If
|
||||
supplied, this value takes precedence over the `Service.PortLabel`
|
||||
value. This is useful for services which operate on multiple ports.
|
||||
|
@ -759,9 +759,9 @@ The `RestartPolicy` object supports the following keys:
|
|||
### Update
|
||||
|
||||
Specifies the task group update strategy. When omitted, rolling updates are
|
||||
disabled. The update stanza can be specified at the job or task group level.
|
||||
When specified at the job, the update stanza is inherited by all task groups.
|
||||
When specified in both the job and in a task group, the stanzas are merged with
|
||||
disabled. The update block can be specified at the job or task group level.
|
||||
When specified at the job, the update block is inherited by all task groups.
|
||||
When specified in both the job and in a task group, the blocks are merged with
|
||||
the task group's taking precedence. The `Update` object supports the following
|
||||
attributes:
|
||||
|
||||
|
|
|
@ -119,7 +119,7 @@ The table below shows this endpoint's support for
|
|||
|
||||
### Bootstrap Configuration Element
|
||||
|
||||
The [`default_scheduler_config`][] attribute of the server stanza will provide a
|
||||
The [`default_scheduler_config`][] attribute of the server block will provide a
|
||||
starting value for this configuration. Once bootstrapped, the value in the
|
||||
server state is authoritative.
|
||||
|
||||
|
|
|
@ -127,7 +127,7 @@ The `/search/fuzzy` endpoint returns partial substring matches for a given searc
|
|||
term and context, where a context can be jobs, allocations, nodes, plugins, or namespaces.
|
||||
Additionally, fuzzy searching can be done across all contexts. For better control
|
||||
over the performance implications of fuzzy searching on Nomad servers, aspects of
|
||||
fuzzy searching can be tuned through the <code>[search]</code> stanza in Nomad agent config.
|
||||
fuzzy searching can be tuned through the <code>[search]</code> block in Nomad agent config.
|
||||
|
||||
Fuzzy search results are ordered starting with closest matching terms. Items of
|
||||
a name that exactly matches the search term are listed first.
|
||||
|
|
|
@ -80,14 +80,14 @@ capability for the job's namespace.
|
|||
|
||||
- `-vault-token`: Used to validate if the user submitting the job has
|
||||
permission to run the job according to its Vault policies. A Vault token must
|
||||
be supplied if the [`vault` stanza `allow_unauthenticated`] is disabled in
|
||||
be supplied if the [`vault` block `allow_unauthenticated`] is disabled in
|
||||
the Nomad server configuration. If the `-vault-token` flag is set, the passed
|
||||
Vault token is added to the jobspec before sending to the Nomad servers. This
|
||||
allows passing the Vault token without storing it in the job file. This
|
||||
overrides the token found in the `$VAULT_TOKEN` environment variable and the
|
||||
[`vault_token`] field in the job file. This token is cleared from the job
|
||||
after planning and cannot be used within the job executing environment. Use
|
||||
the `vault` stanza when templating in a job with a Vault token.
|
||||
the `vault` block when templating in a job with a Vault token.
|
||||
|
||||
- `-vault-namespace`: If set, the passed Vault namespace is stored in the job
|
||||
before sending to the Nomad servers.
|
||||
|
@ -260,5 +260,5 @@ if a change is detected.
|
|||
[`go-getter`]: https://github.com/hashicorp/go-getter
|
||||
[`nomad job run -check-index`]: /nomad/docs/commands/job/run#check-index
|
||||
[`tee`]: https://man7.org/linux/man-pages/man1/tee.1.html
|
||||
[`vault` stanza `allow_unauthenticated`]: /nomad/docs/configuration/vault#allow_unauthenticated
|
||||
[`vault` block `allow_unauthenticated`]: /nomad/docs/configuration/vault#allow_unauthenticated
|
||||
[`vault_token`]: /nomad/docs/job-specification/job#vault_token
|
||||
|
|
|
@ -100,22 +100,22 @@ that volume.
|
|||
`$CONSUL_HTTP_TOKEN` environment variable and that found in the job.
|
||||
|
||||
- `-consul-namespace`: <EnterpriseAlert inline/> If set, any services in the job will be registered into the
|
||||
specified Consul namespace. Any `template` stanza reading from Consul KV will
|
||||
specified Consul namespace. Any `template` block reading from Consul KV will
|
||||
scoped to the specified Consul namespace. If Consul ACLs are enabled and the
|
||||
[`consul` stanza `allow_unauthenticated`] is disabled in the Nomad server configuration, then
|
||||
[`consul` block `allow_unauthenticated`] is disabled in the Nomad server configuration, then
|
||||
a Consul token must be supplied with appropriate service and kv Consul ACL policy
|
||||
permissions.
|
||||
|
||||
- `-vault-token`: Used to validate if the user submitting the job has
|
||||
permission to run the job according to its Vault policies. A Vault token must
|
||||
be supplied if the [`vault` stanza `allow_unauthenticated`] is disabled in
|
||||
be supplied if the [`vault` block `allow_unauthenticated`] is disabled in
|
||||
the Nomad server configuration. If the `-vault-token` flag is set, the passed
|
||||
Vault token is added to the jobspec before sending to the Nomad servers. This
|
||||
allows passing the Vault token without storing it in the job file. This
|
||||
overrides the token found in the `$VAULT_TOKEN` environment variable and the
|
||||
[`vault_token`] field in the job file. This token is cleared from the job
|
||||
after validating and cannot be used within the job executing environment. Use
|
||||
the `vault` stanza when templating in a job with a Vault token.
|
||||
the `vault` block when templating in a job with a Vault token.
|
||||
|
||||
- `-vault-namespace`: If set, the passed Vault namespace is stored in the job
|
||||
before sending to the Nomad servers.
|
||||
|
@ -237,7 +237,7 @@ $ nomad job run example.nomad
|
|||
```
|
||||
|
||||
[`batch`]: /nomad/docs/schedulers#batch
|
||||
[`consul` stanza `allow_unauthenticated`]: /nomad/docs/configuration/consul#allow_unauthenticated
|
||||
[`consul` block `allow_unauthenticated`]: /nomad/docs/configuration/consul#allow_unauthenticated
|
||||
[deployment status]: /nomad/docs/commands/deployment#status
|
||||
[eval status]: /nomad/docs/commands/eval/status
|
||||
[`go-getter`]: https://github.com/hashicorp/go-getter
|
||||
|
@ -245,5 +245,5 @@ $ nomad job run example.nomad
|
|||
[job specification]: /nomad/docs/job-specification
|
||||
[JSON jobs]: /nomad/api-docs/json-jobs
|
||||
[`system`]: /nomad/docs/schedulers#system
|
||||
[`vault` stanza `allow_unauthenticated`]: /nomad/docs/configuration/vault#allow_unauthenticated
|
||||
[`vault` block `allow_unauthenticated`]: /nomad/docs/configuration/vault#allow_unauthenticated
|
||||
[`vault_token`]: /nomad/docs/job-specification/job#vault_token
|
||||
|
|
|
@ -55,14 +55,14 @@ capability for the job's namespace.
|
|||
|
||||
- `-vault-token`: Used to validate if the user submitting the job has
|
||||
permission to run the job according to its Vault policies. A Vault token must
|
||||
be supplied if the [`vault` stanza `allow_unauthenticated`] is disabled in
|
||||
be supplied if the [`vault` block `allow_unauthenticated`] is disabled in
|
||||
the Nomad server configuration. If the `-vault-token` flag is set, the passed
|
||||
Vault token is added to the jobspec before sending to the Nomad servers. This
|
||||
allows passing the Vault token without storing it in the job file. This
|
||||
overrides the token found in the `$VAULT_TOKEN` environment variable and the
|
||||
[`vault_token`] field in the job file. This token is cleared from the job
|
||||
after validating and cannot be used within the job executing environment. Use
|
||||
the `vault` stanza when templating in a job with a Vault token.
|
||||
the `vault` block when templating in a job with a Vault token.
|
||||
|
||||
- `-vault-namespace`: If set, the passed Vault namespace is stored in the job
|
||||
before sending to the Nomad servers.
|
||||
|
@ -98,5 +98,5 @@ Job validation successful
|
|||
|
||||
[`go-getter`]: https://github.com/hashicorp/go-getter
|
||||
[job specification]: /nomad/docs/job-specification
|
||||
[`vault` stanza `allow_unauthenticated`]: /nomad/docs/configuration/vault#allow_unauthenticated
|
||||
[`vault` block `allow_unauthenticated`]: /nomad/docs/configuration/vault#allow_unauthenticated
|
||||
[`vault_token`]: /nomad/docs/job-specification/job#vault_token
|
||||
|
|
|
@ -10,7 +10,7 @@ description: |
|
|||
The `node drain` command is used to toggle drain mode on a given node. Drain
|
||||
mode prevents any new tasks from being allocated to the node, and begins
|
||||
migrating all existing allocations away. Allocations will be migrated according
|
||||
to their [`migrate`][migrate] stanza until the drain's deadline is reached.
|
||||
to their [`migrate`][migrate] block until the drain's deadline is reached.
|
||||
|
||||
By default the `node drain` command blocks until a node is done draining and
|
||||
all allocations have terminated. Canceling the `node drain` command _will not_
|
||||
|
|
|
@ -50,7 +50,7 @@ allocation directory like the one below.
|
|||
modes. Within the `alloc/` directory are three standard directories:
|
||||
|
||||
- **alloc/data/**: This directory is the location used by the
|
||||
[`ephemeral_disk`] stanza for shared data.
|
||||
[`ephemeral_disk`] block for shared data.
|
||||
|
||||
- **alloc/logs/**: This directory is the location of the log files for every
|
||||
task within an allocation. The `nomad alloc logs` command streams these
|
||||
|
|
|
@ -1,16 +1,16 @@
|
|||
---
|
||||
layout: docs
|
||||
page_title: acl Stanza - Agent Configuration
|
||||
page_title: acl Block - Agent Configuration
|
||||
description: >-
|
||||
The "acl" stanza configures the Nomad agent to enable ACLs and tune various
|
||||
The "acl" block configures the Nomad agent to enable ACLs and tune various
|
||||
parameters.
|
||||
---
|
||||
|
||||
# `acl` Stanza
|
||||
# `acl` Block
|
||||
|
||||
<Placement groups={['acl']} />
|
||||
|
||||
The `acl` stanza configures the Nomad agent to enable ACLs and tunes various
|
||||
The `acl` block configures the Nomad agent to enable ACLs and tunes various
|
||||
ACL parameters. Learn more about configuring Nomad's ACL system in the [Secure
|
||||
Nomad with Access Control guide][secure-guide].
|
||||
|
||||
|
|
|
@ -1,16 +1,16 @@
|
|||
---
|
||||
layout: docs
|
||||
page_title: audit Stanza - Agent Configuration
|
||||
page_title: audit Block - Agent Configuration
|
||||
description: >-
|
||||
The "audit" stanza configures the Nomad agent to configure Audit Logging
|
||||
The "audit" block configures the Nomad agent to configure Audit Logging
|
||||
behavior. This is an Enterprise-only feature.
|
||||
---
|
||||
|
||||
# `audit` Stanza
|
||||
# `audit` Block
|
||||
|
||||
<Placement groups={['audit']} />
|
||||
|
||||
The `audit` stanza configures the Nomad agent to configure Audit logging behavior.
|
||||
The `audit` block configures the Nomad agent to configure Audit logging behavior.
|
||||
Audit logging is an Enterprise-only feature.
|
||||
|
||||
```hcl
|
||||
|
@ -26,7 +26,7 @@ generate two audit log entries. These two entries correspond to a stage,
|
|||
event will be sent after the request has been processed, but before the response
|
||||
body is returned to the end user.
|
||||
|
||||
By default, with a minimally configured audit stanza (`audit { enabled = true }`)
|
||||
By default, with a minimally configured audit block (`audit { enabled = true }`)
|
||||
The following default sink will be added with no filters.
|
||||
|
||||
```hcl
|
||||
|
@ -52,18 +52,18 @@ in order for HTTP requests to successfully complete.
|
|||
When enabled, audit logging will occur for every request, unless it is
|
||||
filtered by a `filter`.
|
||||
|
||||
- `sink` <code>([sink](#sink-stanza): default)</code> - Configures a sink
|
||||
- `sink` <code>([sink](#sink-block): default)</code> - Configures a sink
|
||||
for audit logs to be sent to.
|
||||
|
||||
- `filter` <code>(array<[filter](#filter-stanza)>: [])</code> - Configures a filter
|
||||
- `filter` <code>(array<[filter](#filter-block)>: [])</code> - Configures a filter
|
||||
to exclude matching events from being sent to audit logging sinks.
|
||||
|
||||
### `sink` Stanza
|
||||
### `sink` Block
|
||||
|
||||
The `sink` stanza is used to make audit logging sinks for events to be
|
||||
The `sink` block is used to make audit logging sinks for events to be
|
||||
sent to. Currently only a single sink is supported.
|
||||
|
||||
The key of the stanza corresponds to the name of the sink which is used
|
||||
The key of the block corresponds to the name of the sink which is used
|
||||
for logging purposes
|
||||
|
||||
```hcl
|
||||
|
@ -119,9 +119,9 @@ audit {
|
|||
- `rotate_max_files` `(int: 0)` - Specifies the maximum number of older audit
|
||||
log file archives to keep. If 0, no files are ever deleted.
|
||||
|
||||
### `filter` Stanza
|
||||
### `filter` Block
|
||||
|
||||
The `filter` stanza is used to create filters to filter **out** matching events
|
||||
The `filter` block is used to create filters to filter **out** matching events
|
||||
from being written to the audit log. By default, all events will be sent to an
|
||||
audit log for all stages (OperationReceived and OperationComplete). Filters
|
||||
are useful for operators who want to limit the performance impact of audit
|
||||
|
|
|
@ -1,16 +1,16 @@
|
|||
---
|
||||
layout: docs
|
||||
page_title: autopilot Stanza - Agent Configuration
|
||||
page_title: autopilot Block - Agent Configuration
|
||||
description: >-
|
||||
The "autopilot" stanza configures the Nomad agent to configure Autopilot
|
||||
The "autopilot" block configures the Nomad agent to configure Autopilot
|
||||
behavior.
|
||||
---
|
||||
|
||||
# `autopilot` Stanza
|
||||
# `autopilot` Block
|
||||
|
||||
<Placement groups={['autopilot']} />
|
||||
|
||||
The `autopilot` stanza configures the Nomad agent to configure Autopilot behavior.
|
||||
The `autopilot` block configures the Nomad agent to configure Autopilot behavior.
|
||||
For more information about Autopilot, see the [Autopilot Guide](/nomad/tutorials/manage-clusters/autopilot).
|
||||
|
||||
```hcl
|
||||
|
|
|
@ -1,16 +1,16 @@
|
|||
---
|
||||
layout: docs
|
||||
page_title: client Stanza - Agent Configuration
|
||||
page_title: client Block - Agent Configuration
|
||||
description: |-
|
||||
The "client" stanza configures the Nomad agent to accept jobs as assigned by
|
||||
The "client" block configures the Nomad agent to accept jobs as assigned by
|
||||
the Nomad server, join the cluster, and specify driver-specific configuration.
|
||||
---
|
||||
|
||||
# `client` Stanza
|
||||
# `client` Block
|
||||
|
||||
<Placement groups={['client']} />
|
||||
|
||||
The `client` stanza configures the Nomad agent to accept jobs as assigned by
|
||||
The `client` block configures the Nomad agent to accept jobs as assigned by
|
||||
the Nomad server, join the cluster, and specify driver-specific configuration.
|
||||
|
||||
```hcl
|
||||
|
@ -155,16 +155,16 @@ client {
|
|||
|
||||
- `artifact` <code>([Artifact](#artifact-parameters): varied)</code> -
|
||||
Specifies controls on the behavior of task
|
||||
[`artifact`](/nomad/docs/job-specification/artifact) stanzas.
|
||||
[`artifact`](/nomad/docs/job-specification/artifact) blocks.
|
||||
|
||||
- `template` <code>([Template](#template-parameters): nil)</code> - Specifies
|
||||
controls on the behavior of task
|
||||
[`template`](/nomad/docs/job-specification/template) stanzas.
|
||||
[`template`](/nomad/docs/job-specification/template) blocks.
|
||||
|
||||
- `host_volume` <code>([host_volume](#host_volume-stanza): nil)</code> - Exposes
|
||||
- `host_volume` <code>([host_volume](#host_volume-block): nil)</code> - Exposes
|
||||
paths from the host as volumes that can be mounted into jobs.
|
||||
|
||||
- `host_network` <code>([host_network](#host_network-stanza): nil)</code> - Registers
|
||||
- `host_network` <code>([host_network](#host_network-block): nil)</code> - Registers
|
||||
additional host networks with the node that can be selected when port mapping.
|
||||
|
||||
- `cgroup_parent` `(string: "/nomad")` - Specifies the cgroup parent for which cgroup
|
||||
|
@ -210,7 +210,7 @@ chroot as doing so would cause infinite recursion.
|
|||
### `options` Parameters
|
||||
|
||||
~> Note: In Nomad 0.9 client configuration options for drivers were deprecated.
|
||||
See the [plugin stanza][plugin-stanza] documentation for more information.
|
||||
See the [plugin block][plugin-block] documentation for more information.
|
||||
|
||||
The following is not an exhaustive list of options for only the Nomad
|
||||
client. To find the options supported by each individual Nomad driver, please
|
||||
|
@ -360,7 +360,7 @@ see the [drivers documentation](/nomad/docs/drivers).
|
|||
- `reserved_ports` `(string: "")` - Specifies a comma-separated list of ports
|
||||
to reserve on all fingerprinted network devices. Ranges can be specified by
|
||||
using a hyphen separating the two inclusive ends. See also
|
||||
[`host_network`](#host_network-stanza) for reserving ports on specific host
|
||||
[`host_network`](#host_network-block) for reserving ports on specific host
|
||||
networks.
|
||||
|
||||
|
||||
|
@ -430,7 +430,7 @@ see the [drivers documentation](/nomad/docs/drivers).
|
|||
re-render the template with the data available at the time. This is useful to enable in
|
||||
systems where Consul is in a degraded state, or the referenced data values are changing
|
||||
rapidly, because it will reduce the number of times a template is rendered. This
|
||||
configuration is also exposed in the _task template stanza_ to allow overrides per task.
|
||||
configuration is also exposed in the _task template block_ to allow overrides per task.
|
||||
|
||||
```hcl
|
||||
wait {
|
||||
|
@ -532,11 +532,11 @@ see the [drivers documentation](/nomad/docs/drivers).
|
|||
}
|
||||
```
|
||||
|
||||
### `host_volume` Stanza
|
||||
### `host_volume` Block
|
||||
|
||||
The `host_volume` stanza is used to make volumes available to jobs.
|
||||
The `host_volume` block is used to make volumes available to jobs.
|
||||
|
||||
The key of the stanza corresponds to the name of the volume for use in the
|
||||
The key of the block corresponds to the name of the volume for use in the
|
||||
`source` parameter of a `"host"` type [`volume`](/nomad/docs/job-specification/volume)
|
||||
and ACLs.
|
||||
|
||||
|
@ -558,12 +558,12 @@ client {
|
|||
- `read_only` `(bool: false)` - Specifies whether the volume should only ever be
|
||||
allowed to be mounted `read_only`, or if it should be writeable.
|
||||
|
||||
### `host_network` Stanza
|
||||
### `host_network` Block
|
||||
|
||||
The `host_network` stanza is used to register additional host networks with
|
||||
The `host_network` block is used to register additional host networks with
|
||||
the node that can be used when port mapping.
|
||||
|
||||
The key of the stanza corresponds to the name of the network used in the
|
||||
The key of the block corresponds to the name of the network used in the
|
||||
[`host_network`](/nomad/docs/job-specification/network#host-networks).
|
||||
|
||||
```hcl
|
||||
|
@ -647,7 +647,7 @@ client {
|
|||
```
|
||||
|
||||
[plugin-options]: #plugin-options
|
||||
[plugin-stanza]: /nomad/docs/configuration/plugin
|
||||
[plugin-block]: /nomad/docs/configuration/plugin
|
||||
[server-join]: /nomad/docs/configuration/server_join 'Server Join'
|
||||
[metadata_constraint]: /nomad/docs/job-specification/constraint#user-specified-metadata 'Nomad User-Specified Metadata Constraint Example'
|
||||
[task working directory]: /nomad/docs/runtime/environment#task-directories 'Task directories'
|
||||
|
|
|
@ -1,18 +1,18 @@
|
|||
---
|
||||
layout: docs
|
||||
page_title: consul Stanza - Agent Configuration
|
||||
page_title: consul Block - Agent Configuration
|
||||
description: |-
|
||||
The "consul" stanza configures the Nomad agent's communication with
|
||||
The "consul" block configures the Nomad agent's communication with
|
||||
Consul for service discovery and key-value integration. When
|
||||
configured, tasks can register themselves with Consul, and the Nomad cluster
|
||||
can automatically bootstrap itself.
|
||||
---
|
||||
|
||||
# `consul` Stanza
|
||||
# `consul` Block
|
||||
|
||||
<Placement groups={['consul']} />
|
||||
|
||||
The `consul` stanza configures the Nomad agent's communication with
|
||||
The `consul` block configures the Nomad agent's communication with
|
||||
[Consul][consul] for service discovery and key-value integration. When
|
||||
configured, tasks can register themselves with Consul, and the Nomad cluster can
|
||||
[automatically bootstrap][bootstrap] itself.
|
||||
|
@ -25,7 +25,7 @@ consul {
|
|||
}
|
||||
```
|
||||
|
||||
A default `consul` stanza is automatically merged with all Nomad agent
|
||||
A default `consul` block is automatically merged with all Nomad agent
|
||||
configurations. These sane defaults automatically enable Consul integration if
|
||||
Consul is detected on the system. This allows for seamless bootstrapping of the
|
||||
cluster with zero configuration. To put it another way: if you have a Consul
|
||||
|
|
|
@ -82,7 +82,7 @@ testing.
|
|||
- `acl` `(`[`ACL`]`: nil)` - Specifies configuration which is specific to ACLs.
|
||||
|
||||
- `addresses` `(Addresses: see below)` - Specifies the bind address for
|
||||
individual network services. Any values configured in this stanza take
|
||||
individual network services. Any values configured in this block take
|
||||
precedence over the default [bind_addr](#bind_addr). These values should be
|
||||
specified in IP format without a port (ex. `"0.0.0.0"`). To set the port,
|
||||
see the [`ports`](#ports) field. The values support [go-sockaddr/template
|
||||
|
@ -104,7 +104,7 @@ testing.
|
|||
to the peers of a server or a client node to support more complex network
|
||||
configurations such as NAT. This configuration is optional, and defaults to
|
||||
the bind address of the specific network service if it is not provided. Any
|
||||
values configured in this stanza take precedence over the default
|
||||
values configured in this block take precedence over the default
|
||||
[bind_addr](#bind_addr).
|
||||
|
||||
If the bind address is `0.0.0.0` then the IP address of the default private
|
||||
|
@ -269,8 +269,8 @@ testing.
|
|||
This must be an absolute path.
|
||||
|
||||
- `plugin` `(`[`Plugin`]`: nil)` - Specifies configuration for a
|
||||
specific plugin. The plugin stanza may be repeated, once for each plugin being
|
||||
configured. The key of the stanza is the plugin's executable name relative to
|
||||
specific plugin. The plugin block may be repeated, once for each plugin being
|
||||
configured. The key of the block is the plugin's executable name relative to
|
||||
the [plugin_dir](#plugin_dir).
|
||||
|
||||
- `ports` `(Port: see below)` - Specifies the network ports used for different
|
||||
|
|
|
@ -1,14 +1,14 @@
|
|||
---
|
||||
layout: docs
|
||||
page_title: plugin Stanza - Agent Configuration
|
||||
description: The "plugin" stanza is used to configure a Nomad plugin.
|
||||
page_title: plugin Block - Agent Configuration
|
||||
description: The "plugin" block is used to configure a Nomad plugin.
|
||||
---
|
||||
|
||||
# `plugin` Stanza
|
||||
# `plugin` Block
|
||||
|
||||
<Placement groups={['plugin']} />
|
||||
|
||||
The `plugin` stanza is used to configure plugins.
|
||||
The `plugin` block is used to configure plugins.
|
||||
|
||||
```hcl
|
||||
plugin "example-plugin" {
|
||||
|
|
|
@ -1,13 +1,13 @@
|
|||
---
|
||||
layout: docs
|
||||
page_title: search Stanza - Agent Configuration
|
||||
page_title: search Block - Agent Configuration
|
||||
sidebar_title: search
|
||||
description: >-
|
||||
The "search" stanza specifies configuration for the search API provided
|
||||
The "search" block specifies configuration for the search API provided
|
||||
by the Nomad servers.
|
||||
---
|
||||
|
||||
# `search` Stanza
|
||||
# `search` Block
|
||||
|
||||
<Placement
|
||||
groups={[
|
||||
|
@ -15,7 +15,7 @@ description: >-
|
|||
]}
|
||||
/>
|
||||
|
||||
The `search` stanza specifies configuration for the search API provided by the
|
||||
The `search` block specifies configuration for the search API provided by the
|
||||
Nomad servers.
|
||||
|
||||
```hcl
|
||||
|
|
|
@ -1,16 +1,16 @@
|
|||
---
|
||||
layout: docs
|
||||
page_title: sentinel Stanza - Agent Configuration
|
||||
page_title: sentinel Block - Agent Configuration
|
||||
description: >-
|
||||
The "sentinel" stanza configures the Nomad agent for Sentinel policies and
|
||||
The "sentinel" block configures the Nomad agent for Sentinel policies and
|
||||
tune various parameters.
|
||||
---
|
||||
|
||||
# `sentinel` Stanza
|
||||
# `sentinel` Block
|
||||
|
||||
<Placement groups={['sentinel']} />
|
||||
|
||||
The `sentinel` stanza configures the Sentinel policy engine and tunes various parameters.
|
||||
The `sentinel` block configures the Sentinel policy engine and tunes various parameters.
|
||||
|
||||
```hcl
|
||||
sentinel {
|
||||
|
|
|
@ -1,17 +1,17 @@
|
|||
---
|
||||
layout: docs
|
||||
page_title: server Stanza - Agent Configuration
|
||||
page_title: server Block - Agent Configuration
|
||||
description: |-
|
||||
The "server" stanza configures the Nomad agent to operate in server mode to
|
||||
The "server" block configures the Nomad agent to operate in server mode to
|
||||
participate in scheduling decisions, register with service discovery, handle
|
||||
join failures, and more.
|
||||
---
|
||||
|
||||
# `server` Stanza
|
||||
# `server` Block
|
||||
|
||||
<Placement groups={['server']} />
|
||||
|
||||
The `server` stanza configures the Nomad agent to operate in server mode to
|
||||
The `server` block configures the Nomad agent to operate in server mode to
|
||||
participate in scheduling decisions, register with service discovery, handle
|
||||
join failures, and more.
|
||||
|
||||
|
@ -252,23 +252,23 @@ server {
|
|||
Use `retry_join` with an array as a replacement for `start_join`, **do not use
|
||||
both options**. See the [server_join][server-join]
|
||||
section for more information on the format of the string. This field is
|
||||
deprecated in favor of the [server_join stanza][server-join].
|
||||
deprecated in favor of the [server_join block][server-join].
|
||||
|
||||
- `retry_interval` `(string: "30s")` - Specifies the time to wait between retry
|
||||
join attempts. This field is deprecated in favor of the [server_join
|
||||
stanza][server-join].
|
||||
block][server-join].
|
||||
|
||||
- `retry_max` `(int: 0)` - Specifies the maximum number of join attempts to be
|
||||
made before exiting with a return code of 1. By default, this is set to 0
|
||||
which is interpreted as infinite retries. This field is deprecated in favor of
|
||||
the [server_join stanza][server-join].
|
||||
the [server_join block][server-join].
|
||||
|
||||
- `start_join` `(array<string>: [])` - Specifies a list of server addresses to
|
||||
join on startup. If Nomad is unable to join with any of the specified
|
||||
addresses, agent startup will fail. See the [server address
|
||||
format](/nomad/docs/configuration/server_join#server-address-format)
|
||||
section for more information on the format of the string. This field is
|
||||
deprecated in favor of the [server_join stanza][server-join].
|
||||
deprecated in favor of the [server_join block][server-join].
|
||||
|
||||
### `plan_rejection_tracker` Parameters
|
||||
|
||||
|
@ -298,7 +298,7 @@ account.
|
|||
|
||||
### Common Setup
|
||||
|
||||
This example shows a common Nomad agent `server` configuration stanza. The two
|
||||
This example shows a common Nomad agent `server` configuration block. The two
|
||||
IP addresses could also be DNS, and should point to the other Nomad servers in
|
||||
the cluster
|
||||
|
||||
|
@ -346,7 +346,7 @@ server {
|
|||
|
||||
### Bootstrapping with a Custom Scheduler Config ((#configuring-scheduler-config))
|
||||
|
||||
While [bootstrapping a cluster], you can use the `default_scheduler_config` stanza
|
||||
While [bootstrapping a cluster], you can use the `default_scheduler_config` block
|
||||
to prime the cluster with a [`SchedulerConfig`][update-scheduler-config]. The
|
||||
scheduler configuration determines which scheduling algorithm is configured—
|
||||
spread scheduling or binpacking—and which job types are eligible for preemption.
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
---
|
||||
layout: docs
|
||||
page_title: server_join Stanza - Agent Configuration
|
||||
page_title: server_join Block - Agent Configuration
|
||||
description: >-
|
||||
The "server_join" stanza specifies how the Nomad agent will discover and
|
||||
The "server_join" block specifies how the Nomad agent will discover and
|
||||
connect to Nomad servers.
|
||||
---
|
||||
|
||||
# `server_join` Stanza
|
||||
# `server_join` Block
|
||||
|
||||
<Placement
|
||||
groups={[
|
||||
|
@ -15,7 +15,7 @@ description: >-
|
|||
]}
|
||||
/>
|
||||
|
||||
The `server_join` stanza specifies how the Nomad agent will discover and connect
|
||||
The `server_join` block specifies how the Nomad agent will discover and connect
|
||||
to Nomad servers.
|
||||
|
||||
```hcl
|
||||
|
|
|
@ -1,16 +1,16 @@
|
|||
---
|
||||
layout: docs
|
||||
page_title: telemetry Stanza - Agent Configuration
|
||||
page_title: telemetry Block - Agent Configuration
|
||||
description: |-
|
||||
The "telemetry" stanza configures Nomad's publication of metrics and telemetry
|
||||
The "telemetry" block configures Nomad's publication of metrics and telemetry
|
||||
to third-party systems.
|
||||
---
|
||||
|
||||
# `telemetry` Stanza
|
||||
# `telemetry` Block
|
||||
|
||||
<Placement groups={['telemetry']} />
|
||||
|
||||
The `telemetry` stanza configures Nomad's publication of metrics and telemetry
|
||||
The `telemetry` block configures Nomad's publication of metrics and telemetry
|
||||
to third-party systems.
|
||||
|
||||
```hcl
|
||||
|
@ -21,12 +21,12 @@ telemetry {
|
|||
```
|
||||
|
||||
This section of the documentation only covers the configuration options for
|
||||
`telemetry` stanza. To understand the architecture and metrics themselves,
|
||||
`telemetry` block. To understand the architecture and metrics themselves,
|
||||
please see the [Telemetry guide](/nomad/docs/operations/monitoring-nomad).
|
||||
|
||||
## `telemetry` Parameters
|
||||
|
||||
Due to the number of configurable parameters to the `telemetry` stanza,
|
||||
Due to the number of configurable parameters to the `telemetry` block,
|
||||
parameters on this page are grouped by the telemetry provider.
|
||||
|
||||
### Common
|
||||
|
|
|
@ -1,16 +1,16 @@
|
|||
---
|
||||
layout: docs
|
||||
page_title: tls Stanza - Agent Configuration
|
||||
page_title: tls Block - Agent Configuration
|
||||
description: |-
|
||||
The "tls" stanza configures Nomad's TLS communication via HTTP and RPC to
|
||||
The "tls" block configures Nomad's TLS communication via HTTP and RPC to
|
||||
enforce secure cluster communication between servers and clients.
|
||||
---
|
||||
|
||||
# `tls` Stanza
|
||||
# `tls` Block
|
||||
|
||||
<Placement groups={['tls']} />
|
||||
|
||||
The `tls` stanza configures Nomad's TLS communication via HTTP and RPC to
|
||||
The `tls` block configures Nomad's TLS communication via HTTP and RPC to
|
||||
enforce secure cluster communication between servers, clients, and between.
|
||||
|
||||
```hcl
|
||||
|
@ -24,7 +24,7 @@ tls {
|
|||
start the Nomad agent.
|
||||
|
||||
This section of the documentation only covers the configuration options for
|
||||
`tls` stanza. To understand how to setup the certificates themselves, please see
|
||||
`tls` block. To understand how to setup the certificates themselves, please see
|
||||
the [Enable TLS Encryption for Nomad Tutorial](/nomad/tutorials/transport-security/security-enable-tls).
|
||||
|
||||
## `tls` Parameters
|
||||
|
@ -79,8 +79,8 @@ the [Enable TLS Encryption for Nomad Tutorial](/nomad/tutorials/transport-securi
|
|||
|
||||
## `tls` Examples
|
||||
|
||||
The following examples only show the `tls` stanzas. Remember that the
|
||||
`tls` stanza is only valid in the placements listed above.
|
||||
The following examples only show the `tls` blocks. Remember that the
|
||||
`tls` block is only valid in the placements listed above.
|
||||
|
||||
### Enabling TLS
|
||||
|
||||
|
|
|
@ -1,16 +1,16 @@
|
|||
---
|
||||
layout: docs
|
||||
page_title: ui Stanza - Agent Configuration
|
||||
page_title: ui Block - Agent Configuration
|
||||
description: |-
|
||||
The "ui" stanza configures the Nomad agent's web UI.
|
||||
The "ui" block configures the Nomad agent's web UI.
|
||||
|
||||
---
|
||||
|
||||
# `ui` Stanza
|
||||
# `ui` Block
|
||||
|
||||
<Placement groups={['ui']} />
|
||||
|
||||
The `ui` stanza configures the Nomad agent's [web UI].
|
||||
The `ui` block configures the Nomad agent's [web UI].
|
||||
|
||||
```hcl
|
||||
ui {
|
||||
|
@ -26,7 +26,7 @@ ui {
|
|||
}
|
||||
```
|
||||
|
||||
A default `ui` stanza is automatically merged with all Nomad agent
|
||||
A default `ui` block is automatically merged with all Nomad agent
|
||||
configurations. Note that the UI can be served from any Nomad agent,
|
||||
and the configuration is individual to each agent.
|
||||
## `ui` Parameters
|
||||
|
|
|
@ -1,17 +1,17 @@
|
|||
---
|
||||
layout: docs
|
||||
page_title: vault Stanza - Agent Configuration
|
||||
page_title: vault Block - Agent Configuration
|
||||
description: |-
|
||||
The "vault" stanza configures Nomad's integration with HashiCorp's Vault.
|
||||
The "vault" block configures Nomad's integration with HashiCorp's Vault.
|
||||
When configured, Nomad can create and distribute Vault tokens to tasks
|
||||
automatically.
|
||||
---
|
||||
|
||||
# `vault` Stanza
|
||||
# `vault` Block
|
||||
|
||||
<Placement groups={['vault']} />
|
||||
|
||||
The `vault` stanza configures Nomad's integration with [HashiCorp's
|
||||
The `vault` block configures Nomad's integration with [HashiCorp's
|
||||
Vault][vault]. When configured, Nomad can create and distribute Vault tokens to
|
||||
tasks automatically. For more information on the architecture and setup, please
|
||||
see the [Nomad and Vault integration documentation][nomad-vault].
|
||||
|
@ -96,8 +96,8 @@ vault {
|
|||
|
||||
## `vault` Examples
|
||||
|
||||
The following examples only show the `vault` stanzas. Remember that the
|
||||
`vault` stanza is only valid in the placements listed above.
|
||||
The following examples only show the `vault` blocks. Remember that the
|
||||
`vault` block is only valid in the placements listed above.
|
||||
|
||||
### Nomad Server
|
||||
|
||||
|
|
|
@ -71,7 +71,7 @@ The `docker` driver supports the following configuration in the job spec. Only
|
|||
|
||||
- `auth_soft_fail` `(bool: false)` - Don't fail the task on an auth failure.
|
||||
Attempt to continue without auth. If the Nomad client configuration has an
|
||||
[`auth.helper`](#plugin_auth_helper) stanza, the helper will be tried for
|
||||
[`auth.helper`](#plugin_auth_helper) block, the helper will be tried for
|
||||
all images, including public images. If you mix private and public images,
|
||||
you will need to include `auth_soft_fail=true` in every job using a public
|
||||
image.
|
||||
|
@ -292,7 +292,7 @@ config {
|
|||
group-wide bridge networking, you may encounter issues preventing your
|
||||
containers from reaching networks outside of the bridge interface on systems with
|
||||
firewalld enabled. This behavior is often caused by the CNI plugin not registering the group
|
||||
network as trusted and can be resolved as described in the [network stanza] documentation.
|
||||
network as trusted and can be resolved as described in the [network block] documentation.
|
||||
|
||||
- `pid_mode` - (Optional) `host` or not set (default). Set to `host` to share
|
||||
the PID namespace with the host. Note that this also requires the Nomad agent
|
||||
|
@ -744,12 +744,12 @@ expose and port forwarding.
|
|||
|
||||
#### Deprecated `port_map` Syntax
|
||||
|
||||
Up until Nomad 0.12, ports could be specified in a task's resource stanza and set using the docker
|
||||
Up until Nomad 0.12, ports could be specified in a task's resource block and set using the docker
|
||||
`port_map` field. As more features have been added to the group network resource allocation, task based
|
||||
network resources are deprecated. With it the `port_map` field is also deprecated and can only be used
|
||||
with task network resources.
|
||||
|
||||
Users should migrate their jobs to define ports in the group network stanza and specified which ports
|
||||
Users should migrate their jobs to define ports in the group network block and specified which ports
|
||||
a task maps with the `ports` field.
|
||||
|
||||
### Advertising Container IPs
|
||||
|
@ -807,7 +807,7 @@ For the best performance and security features you should use recent versions
|
|||
of the Linux Kernel and Docker daemon.
|
||||
|
||||
If you would like to change any of the options related to the `docker` driver
|
||||
on a Nomad client, you can modify them with the [plugin stanza][plugin-stanza]
|
||||
on a Nomad client, you can modify them with the [plugin block][plugin-block]
|
||||
syntax. Below is an example of a configuration (many of the values are the
|
||||
default). See the next section for more information on the options.
|
||||
|
||||
|
@ -897,7 +897,7 @@ host system.
|
|||
- `allow_runtimes` - defaults to `["runc", "nvidia"]` - A list of the allowed
|
||||
docker runtimes a task may use.
|
||||
|
||||
- `auth` stanza:
|
||||
- `auth` block:
|
||||
|
||||
- `config`<a id="plugin_auth_file"></a> - Allows an operator to specify a
|
||||
JSON file which is in the dockercfg format containing authentication
|
||||
|
@ -914,7 +914,7 @@ host system.
|
|||
public images. If you mix private and public images, you will need to
|
||||
include [`auth_soft_fail=true`] in every job using a public image.
|
||||
|
||||
- `tls` stanza:
|
||||
- `tls` block:
|
||||
|
||||
- `cert` - Path to the server's certificate file (`.pem`). Specify this
|
||||
along with `key` and `ca` to use a TLS client to connect to the docker
|
||||
|
@ -937,7 +937,7 @@ host system.
|
|||
Available options are `job_name`, `job_id`, `task_group_name`, `task_name`,
|
||||
`namespace`, `node_name`, `node_id`. Globs are supported (e.g. `task*`)
|
||||
|
||||
- `logging` stanza:
|
||||
- `logging` block:
|
||||
|
||||
- `type` - Defaults to `"json-file"`. Specifies the logging driver docker
|
||||
should use for all containers Nomad starts. Note that for older versions
|
||||
|
@ -950,7 +950,7 @@ host system.
|
|||
[configuration](https://docs.docker.com/config/containers/logging/configure/)
|
||||
to the logging driver.
|
||||
|
||||
- `gc` stanza:
|
||||
- `gc` block:
|
||||
|
||||
- `image` - Defaults to `true`. Changing this to `false` will prevent Nomad
|
||||
from removing images from stopped tasks.
|
||||
|
@ -965,7 +965,7 @@ host system.
|
|||
from removing a container when the task exits. Under a name conflict,
|
||||
Nomad may still remove the dead container.
|
||||
|
||||
- `dangling_containers` stanza for controlling dangling container detection
|
||||
- `dangling_containers` block for controlling dangling container detection
|
||||
and cleanup:
|
||||
|
||||
- `enabled` - Defaults to `true`. Enables dangling container handling.
|
||||
|
@ -982,7 +982,7 @@ host system.
|
|||
GC. Should not need adjusting higher but may be adjusted lower to GC
|
||||
more aggressively.
|
||||
|
||||
- `volumes` stanza:
|
||||
- `volumes` block:
|
||||
|
||||
- `enabled` - Defaults to `false`. Allows tasks to bind host paths
|
||||
(`volumes`) inside their container and use volume drivers
|
||||
|
@ -1006,7 +1006,7 @@ host system.
|
|||
## Client Configuration
|
||||
|
||||
~> Note: client configuration options will soon be deprecated. Please use
|
||||
[plugin options][plugin-options] instead. See the [plugin stanza][plugin-stanza]
|
||||
[plugin options][plugin-options] instead. See the [plugin block][plugin-block]
|
||||
documentation for more information.
|
||||
|
||||
The `docker` driver has the following [client configuration
|
||||
|
@ -1186,7 +1186,7 @@ Containers that don't match Nomad container patterns are left untouched.
|
|||
|
||||
Operators can run the reaper in a dry-run mode, where it only logs dangling
|
||||
container ids without killing them, or disable it by setting the
|
||||
`gc.dangling_containers` config stanza.
|
||||
`gc.dangling_containers` config block.
|
||||
|
||||
### Docker for Windows
|
||||
|
||||
|
@ -1197,7 +1197,7 @@ Windows is relatively new and rapidly evolving you may want to consult the
|
|||
[faq-win-mac]: /nomad/docs/faq#q-how-to-connect-to-my-host-network-when-using-docker-desktop-windows-and-macos
|
||||
[winissues]: https://github.com/hashicorp/nomad/issues?q=is%3Aopen+is%3Aissue+label%3Atheme%2Fdriver%2Fdocker+label%3Atheme%2Fplatform-windows
|
||||
[plugin-options]: #plugin-options
|
||||
[plugin-stanza]: /nomad/docs/configuration/plugin
|
||||
[plugin-block]: /nomad/docs/configuration/plugin
|
||||
[allocation working directory]: /nomad/docs/runtime/environment#task-directories 'Task Directories'
|
||||
[`auth_soft_fail=true`]: #auth_soft_fail
|
||||
[cap_add]: /nomad/docs/drivers/docker#cap_add
|
||||
|
@ -1209,6 +1209,6 @@ Windows is relatively new and rapidly evolving you may want to consult the
|
|||
[allow_caps]: /nomad/docs/drivers/docker#allow_caps
|
||||
[Connect]: /nomad/docs/job-specification/connect
|
||||
[`bridge`]: /nomad/docs/job-specification/network#bridge
|
||||
[network stanza]: /nomad/docs/job-specification/network#bridge-mode
|
||||
[network block]: /nomad/docs/job-specification/network#bridge-mode
|
||||
[`pids_limit`]: /nomad/docs/drivers/docker#pids_limit
|
||||
[Windows isolation]: https://learn.microsoft.com/en-us/virtualization/windowscontainers/manage-containers/hyperv-container
|
|
@ -253,5 +253,5 @@ This list is configurable through the agent client
|
|||
[no_net_raw]: /nomad/docs/upgrade/upgrade-specific#nomad-1-1-0-rc1-1-0-5-0-12-12
|
||||
[allow_caps]: /nomad/docs/drivers/exec#allow_caps
|
||||
[docker_caps]: https://docs.docker.com/engine/reference/run/#runtime-privilege-and-linux-capabilities
|
||||
[host volume]: /nomad/docs/configuration/client#host_volume-stanza
|
||||
[host volume]: /nomad/docs/configuration/client#host_volume-block
|
||||
[volume_mount]: /nomad/docs/job-specification/volume_mount
|
||||
|
|
|
@ -12,8 +12,8 @@ support a broad set of workloads across all major operating systems.
|
|||
|
||||
Starting with Nomad 0.9, task drivers are now pluggable. This gives users the
|
||||
flexibility to introduce their own drivers without having to recompile Nomad.
|
||||
You can view the [plugin stanza][plugin] documentation for examples on how to
|
||||
use the `plugin` stanza in Nomad's client configuration. Note that we have
|
||||
You can view the [plugin block][plugin] documentation for examples on how to
|
||||
use the `plugin` block in Nomad's client configuration. Note that we have
|
||||
introduced new syntax when specifying driver options in the client configuration
|
||||
(see [docker][docker_plugin] for an example). Keep in mind that even though all
|
||||
built-in drivers are now plugins, Nomad remains a single binary and maintains
|
||||
|
|
|
@ -99,7 +99,7 @@ plugin "raw_exec" {
|
|||
```
|
||||
|
||||
Nomad versions before v0.9 use the following client configuration. This configuration is
|
||||
also supported in Nomad v0.9.0, but is deprecated in favor of the plugin stanza:
|
||||
also supported in Nomad v0.9.0, but is deprecated in favor of the plugin block:
|
||||
|
||||
```
|
||||
client {
|
||||
|
@ -125,7 +125,7 @@ client {
|
|||
## Client Options
|
||||
|
||||
~> Note: client configuration options will soon be deprecated. Please use
|
||||
[plugin options][plugin-options] instead. See the [plugin stanza][plugin-stanza] documentation for more information.
|
||||
[plugin options][plugin-options] instead. See the [plugin block][plugin-block] documentation for more information.
|
||||
|
||||
- `driver.raw_exec.enable` - Specifies whether the driver should be enabled or
|
||||
disabled. Defaults to `false`.
|
||||
|
@ -154,4 +154,4 @@ appropriate privileges, the cgroup system is mounted and the operator hasn't
|
|||
disabled cgroups for the driver.
|
||||
|
||||
[plugin-options]: #plugin-options
|
||||
[plugin-stanza]: /nomad/docs/configuration/plugin
|
||||
[plugin-block]: /nomad/docs/configuration/plugin
|
||||
|
|
|
@ -269,7 +269,7 @@ The API service is defined as a task group with a bridge network:
|
|||
```
|
||||
|
||||
Since the API service is only accessible via Consul service mesh, it does not define
|
||||
any ports in its network. The service stanza enables service mesh.
|
||||
any ports in its network. The service block enables service mesh.
|
||||
|
||||
```hcl
|
||||
group "api" {
|
||||
|
@ -290,7 +290,7 @@ any ports in its network. The service stanza enables service mesh.
|
|||
}
|
||||
```
|
||||
|
||||
The `port` in the service stanza is the port the API service listens on. The
|
||||
The `port` in the service block is the port the API service listens on. The
|
||||
Envoy proxy will automatically route traffic to that port inside the network
|
||||
namespace. Note that currently this cannot be a named port; it must be a
|
||||
hard-coded port value. See [GH-9907].
|
||||
|
@ -345,7 +345,7 @@ The web frontend connects to the API service via Consul service mesh.
|
|||
}
|
||||
```
|
||||
|
||||
The `upstreams` stanza defines the remote service to access (`count-api`) and
|
||||
The `upstreams` block defines the remote service to access (`count-api`) and
|
||||
what port to expose that service on inside the network namespace (`8080`).
|
||||
|
||||
The web frontend is configured to communicate with the API service with an
|
||||
|
|
|
@ -43,12 +43,12 @@ To configure a job to register with service discovery, please see the
|
|||
|
||||
## Dynamic Configuration
|
||||
|
||||
Nomad's job specification includes a [`template` stanza](/nomad/docs/job-specification/template)
|
||||
Nomad's job specification includes a [`template` block](/nomad/docs/job-specification/template)
|
||||
that utilizes a Consul ecosystem tool called [Consul Template](https://github.com/hashicorp/consul-template). This mechanism creates a convenient way to ship configuration files
|
||||
that are populated from environment variables, Consul data, Vault secrets, or just
|
||||
general configurations within a Nomad task.
|
||||
|
||||
For more information on Nomad's template stanza and how it leverages Consul Template,
|
||||
For more information on Nomad's template block and how it leverages Consul Template,
|
||||
please see the [`template` job specification documentation](/nomad/docs/job-specification/template).
|
||||
|
||||
## Consul Namespaces
|
||||
|
|
|
@ -275,7 +275,7 @@ More information about creating orphan tokens can be found in
|
|||
The [`-period` flag](/vault/docs/commands/token/create#period) is required to allow the automatic renewal of the token. If this is left out, a [`vault token renew` command](/vault/docs/commands/token/renew) will need to be run manually to renew the token.
|
||||
|
||||
The token can then be set in the server configuration's
|
||||
[`vault` stanza][config], as a command-line flag, or via an environment
|
||||
[`vault` block][config], as a command-line flag, or via an environment
|
||||
variable.
|
||||
|
||||
```shell-session
|
||||
|
@ -318,7 +318,7 @@ but will log the reasons the token is invalid and disable Vault integration.
|
|||
|
||||
### Permission Denied errors
|
||||
|
||||
If you are using a Vault version less than 0.7.1 with a Nomad version greater than or equal to 0.6.1, you will need to update your task's policy (listed in [the `vault` stanza of the job specification][vault-spec]) to add the following:
|
||||
If you are using a Vault version less than 0.7.1 with a Nomad version greater than or equal to 0.6.1, you will need to update your task's policy (listed in [the `vault` block of the job specification][vault-spec]) to add the following:
|
||||
|
||||
```hcl
|
||||
path "sys/leases/renew" {
|
||||
|
|
|
@ -1,13 +1,13 @@
|
|||
---
|
||||
layout: docs
|
||||
page_title: affinity Stanza - Job Specification
|
||||
page_title: affinity Block - Job Specification
|
||||
description: |-
|
||||
The "affinity" stanza allows restricting the set of eligible nodes.
|
||||
The "affinity" block allows restricting the set of eligible nodes.
|
||||
Affinities may filter on attributes or metadata. Additionally affinities may
|
||||
be specified at the job, group, or task levels for ultimate flexibility.
|
||||
---
|
||||
|
||||
# `affinity` Stanza
|
||||
# `affinity` Block
|
||||
|
||||
<Placement
|
||||
groups={[
|
||||
|
@ -17,7 +17,7 @@ description: |-
|
|||
]}
|
||||
/>
|
||||
|
||||
The `affinity` stanza allows operators to express placement preference for a set of nodes. Affinities may
|
||||
The `affinity` block allows operators to express placement preference for a set of nodes. Affinities may
|
||||
be expressed on [attributes][interpolation] or [client metadata][client-meta].
|
||||
Additionally affinities may be specified at the [job][job], [group][group], or
|
||||
[task][task] levels for ultimate flexibility.
|
||||
|
@ -52,7 +52,7 @@ job "docs" {
|
|||
}
|
||||
```
|
||||
|
||||
Affinities apply to task groups but may be specified within job and task stanzas as well.
|
||||
Affinities apply to task groups but may be specified within job and task blocks as well.
|
||||
Job affinities apply to all groups within the job. Task affinities apply to the whole task group
|
||||
that the task is a part of.
|
||||
|
||||
|
@ -170,8 +170,8 @@ affinity {
|
|||
|
||||
## `affinity` Examples
|
||||
|
||||
The following examples only show the `affinity` stanzas. Remember that the
|
||||
`affinity` stanza is only valid in the placements listed above.
|
||||
The following examples only show the `affinity` blocks. Remember that the
|
||||
`affinity` block is only valid in the placements listed above.
|
||||
|
||||
### Kernel Data
|
||||
|
||||
|
@ -275,4 +275,4 @@ The placement score is affected by the following factors.
|
|||
of a job on the same node.
|
||||
- `node-reschedule-penalty` - Used when the job is being rescheduled. Nomad adds a penalty to avoid placing the job on a node where
|
||||
it has failed to run before.
|
||||
- `node-affinity` - Used when the criteria specified in the `affinity` stanza matches the node.
|
||||
- `node-affinity` - Used when the criteria specified in the `affinity` block matches the node.
|
||||
|
|
|
@ -1,17 +1,17 @@
|
|||
---
|
||||
layout: docs
|
||||
page_title: artifact Stanza - Job Specification
|
||||
page_title: artifact Block - Job Specification
|
||||
description: |-
|
||||
The "artifact" stanza instructs Nomad to fetch and unpack a remote resource,
|
||||
The "artifact" block instructs Nomad to fetch and unpack a remote resource,
|
||||
such as a file, tarball, or binary, and permits downloading artifacts from a
|
||||
variety of locations using a URL as the input source.
|
||||
---
|
||||
|
||||
# `artifact` Stanza
|
||||
# `artifact` Block
|
||||
|
||||
<Placement groups={['job', 'group', 'task', 'artifact']} />
|
||||
|
||||
The `artifact` stanza instructs Nomad to fetch and unpack a remote resource,
|
||||
The `artifact` block instructs Nomad to fetch and unpack a remote resource,
|
||||
such as a file, tarball, or binary. Nomad downloads artifacts using the popular
|
||||
[`go-getter`][go-getter] library, which permits downloading artifacts from a
|
||||
variety of locations using a URL as the input source.
|
||||
|
@ -73,8 +73,8 @@ interrupted and fail to start. Refer to the task events for more information.
|
|||
|
||||
## `artifact` Examples
|
||||
|
||||
The following examples only show the `artifact` stanzas. Remember that the
|
||||
`artifact` stanza is only valid in the placements listed above.
|
||||
The following examples only show the `artifact` blocks. Remember that the
|
||||
`artifact` block is only valid in the placements listed above.
|
||||
|
||||
### Download File
|
||||
|
||||
|
|
|
@ -1,15 +1,15 @@
|
|||
---
|
||||
layout: docs
|
||||
page_title: change_script Stanza - Job Specification
|
||||
description: The "change_script" stanza configures a script to be run on template re-render.
|
||||
page_title: change_script Block - Job Specification
|
||||
description: The "change_script" block configures a script to be run on template re-render.
|
||||
---
|
||||
|
||||
# `change_script` Stanza
|
||||
# `change_script` Block
|
||||
|
||||
<Placement groups={['job', 'group', 'task', 'template', 'change_script']} />
|
||||
|
||||
The `change_script` stanza allows operators to configure scripts that
|
||||
will be executed on template change. This stanza is only used when template
|
||||
The `change_script` block allows operators to configure scripts that
|
||||
will be executed on template change. This block is only used when template
|
||||
`change_mode` is set to `script`.
|
||||
|
||||
```hcl
|
||||
|
@ -53,7 +53,7 @@ job "docs" {
|
|||
### Template as a script example
|
||||
|
||||
Below is an example of how a script can be embedded in a `data` block of another
|
||||
`template` stanza:
|
||||
`template` block:
|
||||
|
||||
```hcl
|
||||
job "docs" {
|
||||
|
|
|
@ -5,7 +5,7 @@ description: |-
|
|||
The "check" block declares service check definition for a service registered into the Nomad or Consul service provider.
|
||||
---
|
||||
|
||||
# `check` Stanza
|
||||
# `check` Block
|
||||
|
||||
<Placement
|
||||
groups={[
|
||||
|
@ -63,7 +63,7 @@ job "example" {
|
|||
- `args` `(array<string>: [])` - Specifies additional arguments to the
|
||||
`command`. This only applies to script-based health checks.
|
||||
|
||||
- `check_restart` - See [`check_restart` stanza][check_restart_stanza].
|
||||
- `check_restart` - See [`check_restart` block][check_restart_block].
|
||||
|
||||
- `command` `(string: <varies>)` - Specifies the command to run for performing
|
||||
the health check. The script must exit: 0 for passing, 1 for warning, or any
|
||||
|
@ -123,7 +123,7 @@ job "example" {
|
|||
- `port` `(string: <varies>)` - Specifies the label of the port on which the
|
||||
check will be performed. Note this is the _label_ of the port and not the port
|
||||
number unless `address_mode = driver`. The port label must match one defined
|
||||
in the [`network`][network] stanza. If a port value was declared on the
|
||||
in the [`network`][network] block. If a port value was declared on the
|
||||
`service`, this will inherit from that value if not supplied. If supplied,
|
||||
this value takes precedence over the `service.port` value. This is useful for
|
||||
services which operate on multiple ports. `grpc`, `http`, and `tcp` checks
|
||||
|
@ -136,7 +136,7 @@ job "example" {
|
|||
|
||||
- `task` `(string: "")` - Specifies the task associated with this
|
||||
check. Scripts are executed within the task's environment, and
|
||||
`check_restart` stanzas will apply to the specified task. Inherits
|
||||
`check_restart` blocks will apply to the specified task. Inherits
|
||||
the [`service.task`][service_task] value if not set. Must be unset
|
||||
or equivelent to `service.task` in task-level services.
|
||||
|
||||
|
@ -177,14 +177,14 @@ job "example" {
|
|||
- `ignore` - Any status will be treated as healthy.
|
||||
|
||||
~> **Caveat:** `on_update` is only compatible with certain
|
||||
[`check_restart`][check_restart_stanza] configurations. `on_update = "ignore_warnings"` requires that `check_restart.ignore_warnings = true`.
|
||||
[`check_restart`][check_restart_block] configurations. `on_update = "ignore_warnings"` requires that `check_restart.ignore_warnings = true`.
|
||||
`check_restart` can however specify `ignore_warnings = true` with `on_update = "require_healthy"`. If `on_update` is set to `ignore`, `check_restart` must
|
||||
be omitted entirely.
|
||||
|
||||
#### `header` Stanza
|
||||
#### `header` Block
|
||||
|
||||
HTTP checks may include a `header` stanza to set HTTP headers. The `header`
|
||||
stanza parameters have lists of strings as values. Multiple values will cause
|
||||
HTTP checks may include a `header` block to set HTTP headers. The `header`
|
||||
block parameters have lists of strings as values. Multiple values will cause
|
||||
the header to be set multiple times, once for each value.
|
||||
|
||||
```hcl
|
||||
|
@ -285,7 +285,7 @@ service {
|
|||
```
|
||||
|
||||
In this example Consul would health check the `example.Service` service on the
|
||||
`rpc` port defined in the task's [network resources][network] stanza. See
|
||||
`rpc` port defined in the task's [network resources][network] block. See
|
||||
[Using Driver Address Mode](#using-driver-address-mode) for details on address
|
||||
selection.
|
||||
|
||||
|
@ -434,7 +434,7 @@ Output = nomad: Get "http://:9999/": dial tcp :9999: connect: connection re
|
|||
does not have access to the file system of a task for that driver.
|
||||
</small>
|
||||
|
||||
[check_restart_stanza]: /nomad/docs/job-specification/check_restart
|
||||
[check_restart_block]: /nomad/docs/job-specification/check_restart
|
||||
[consul_passfail]: /consul/docs/discovery/checks#success-failures-before-passing-critical
|
||||
[network]: /nomad/docs/job-specification/network 'Nomad network Job Specification'
|
||||
[service]: /nomad/docs/job-specification/service
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
---
|
||||
layout: docs
|
||||
page_title: check_restart Stanza - Job Specification
|
||||
page_title: check_restart Block - Job Specification
|
||||
description: |-
|
||||
The "check_restart" stanza instructs Nomad when to restart tasks with
|
||||
The "check_restart" block instructs Nomad when to restart tasks with
|
||||
unhealthy service checks.
|
||||
---
|
||||
|
||||
# `check_restart` Stanza
|
||||
# `check_restart` Block
|
||||
|
||||
<Placement
|
||||
groups={[
|
||||
|
@ -15,13 +15,13 @@ description: |-
|
|||
]}
|
||||
/>
|
||||
|
||||
The `check_restart` stanza instructs Nomad when to restart tasks with unhealthy
|
||||
The `check_restart` block instructs Nomad when to restart tasks with unhealthy
|
||||
service checks. When a health check in Nomad or Consul has been unhealthy for the `limit`
|
||||
specified in a `check_restart` stanza, it is restarted according to the task group's
|
||||
[`restart` policy][restart_stanza]. The `check_restart` settings apply to
|
||||
[`check`s][check_stanza], but may also be placed on [`service`s][service_stanza]
|
||||
specified in a `check_restart` block, it is restarted according to the task group's
|
||||
[`restart` policy][restart_block]. The `check_restart` settings apply to
|
||||
[`check`s][check_block], but may also be placed on [`service`s][service_block]
|
||||
to apply to all checks on a service. If `check_restart` is set on both the check
|
||||
and service, the stanzas are merged with the check values taking precedence.
|
||||
and service, the blocks are merged with the check values taking precedence.
|
||||
|
||||
```hcl
|
||||
job "mysql" {
|
||||
|
@ -105,7 +105,7 @@ check_restart {
|
|||
|
||||
After the grace period if the script check fails, it has 180 seconds (`60s interval * 3 limit`)
|
||||
to pass before a restart is triggered. Once a restart is triggered the task group's
|
||||
[`restart` policy][restart_stanza] takes control:
|
||||
[`restart` policy][restart_block] takes control:
|
||||
|
||||
```hcl
|
||||
restart {
|
||||
|
@ -115,7 +115,7 @@ restart {
|
|||
}
|
||||
```
|
||||
|
||||
The [`restart` stanza][restart_stanza] controls the restart behavior of the
|
||||
The [`restart` block][restart_block] controls the restart behavior of the
|
||||
task. In this case it will stop the task and then wait 10 seconds before
|
||||
starting it again.
|
||||
|
||||
|
@ -134,10 +134,10 @@ restart {
|
|||
If the check continues to fail, the task will be restarted up to `attempts`
|
||||
times within an `interval`. If the `restart` attempts are reached within the
|
||||
`limit` then the `mode` controls the behavior. In this case the task would fail
|
||||
and not be restarted again. See the [`restart` stanza][restart_stanza] for
|
||||
and not be restarted again. See the [`restart` block][restart_block] for
|
||||
details.
|
||||
|
||||
[check_stanza]: /nomad/docs/job-specification/service#check-parameters 'check stanza'
|
||||
[check_block]: /nomad/docs/job-specification/service#check-parameters 'check block'
|
||||
[gh-9176]: https://github.com/hashicorp/nomad/issues/9176
|
||||
[restart_stanza]: /nomad/docs/job-specification/restart 'restart stanza'
|
||||
[service_stanza]: /nomad/docs/job-specification/service 'service stanza'
|
||||
[restart_block]: /nomad/docs/job-specification/restart 'restart block'
|
||||
[service_block]: /nomad/docs/job-specification/service 'service block'
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue