2015-09-21 00:06:02 +00:00
|
|
|
package command
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
2015-09-23 00:06:23 +00:00
|
|
|
"io/ioutil"
|
2015-09-21 00:06:02 +00:00
|
|
|
"os"
|
2015-09-23 00:06:23 +00:00
|
|
|
"strings"
|
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
|
|
|
// DefaultInitName is the default name we use when
|
|
|
|
// initializing the example file
|
|
|
|
DefaultInitName = "example.nomad"
|
2015-09-21 00:06:02 +00:00
|
|
|
)
|
|
|
|
|
2018-03-21 00:37:28 +00:00
|
|
|
// JobInitCommand generates a new job template that you can customize to your
|
2015-09-21 00:06:02 +00:00
|
|
|
// liking, like vagrant init
|
2018-03-21 00:37:28 +00:00
|
|
|
type JobInitCommand struct {
|
2015-09-21 00:06:02 +00:00
|
|
|
Meta
|
|
|
|
}
|
|
|
|
|
2018-03-21 00:37:28 +00:00
|
|
|
func (c *JobInitCommand) Help() string {
|
2015-09-23 00:06:23 +00:00
|
|
|
helpText := `
|
2018-03-21 00:37:28 +00:00
|
|
|
Usage: nomad job init
|
2018-03-21 01:28:14 +00:00
|
|
|
Alias: nomad init
|
2015-09-23 00:06:23 +00:00
|
|
|
|
|
|
|
Creates an example job file that can be used as a starting
|
|
|
|
point to customize further.
|
2018-05-01 16:51:13 +00:00
|
|
|
|
|
|
|
Init Options:
|
|
|
|
|
|
|
|
-short
|
|
|
|
If the short flag is set, a minimal jobspec without comments is emitted.
|
2015-09-23 00:06:23 +00:00
|
|
|
`
|
|
|
|
return strings.TrimSpace(helpText)
|
2015-09-21 00:06:02 +00:00
|
|
|
}
|
|
|
|
|
2018-03-21 00:37:28 +00:00
|
|
|
func (c *JobInitCommand) Synopsis() string {
|
2015-09-23 00:06:23 +00:00
|
|
|
return "Create an example job file"
|
|
|
|
}
|
2015-09-21 00:06:02 +00:00
|
|
|
|
2018-04-18 16:02:11 +00:00
|
|
|
func (c *JobInitCommand) Name() string { return "job init" }
|
|
|
|
|
2018-03-21 00:37:28 +00:00
|
|
|
func (c *JobInitCommand) Run(args []string) int {
|
2018-05-01 16:51:13 +00:00
|
|
|
var short bool
|
|
|
|
|
|
|
|
flags := c.Meta.FlagSet(c.Name(), FlagSetClient)
|
|
|
|
flags.Usage = func() { c.Ui.Output(c.Help()) }
|
|
|
|
flags.BoolVar(&short, "short", false, "")
|
|
|
|
|
|
|
|
if err := flags.Parse(args); err != nil {
|
|
|
|
return 1
|
|
|
|
}
|
|
|
|
|
2015-09-30 21:21:50 +00:00
|
|
|
// Check for misuse
|
2018-05-01 16:51:13 +00:00
|
|
|
if len(flags.Args()) != 0 {
|
2018-04-18 16:02:11 +00:00
|
|
|
c.Ui.Error("This command takes no arguments")
|
|
|
|
c.Ui.Error(commandErrorText(c))
|
2015-09-30 21:21:50 +00:00
|
|
|
return 1
|
|
|
|
}
|
|
|
|
|
2015-09-23 00:06:23 +00:00
|
|
|
// Check if the file already exists
|
|
|
|
_, err := os.Stat(DefaultInitName)
|
2015-09-30 21:21:50 +00:00
|
|
|
if err != nil && !os.IsNotExist(err) {
|
2015-09-23 00:06:23 +00:00
|
|
|
c.Ui.Error(fmt.Sprintf("Failed to stat '%s': %v", DefaultInitName, err))
|
2015-09-21 00:06:02 +00:00
|
|
|
return 1
|
|
|
|
}
|
2015-09-30 21:21:50 +00:00
|
|
|
if !os.IsNotExist(err) {
|
|
|
|
c.Ui.Error(fmt.Sprintf("Job '%s' already exists", DefaultInitName))
|
|
|
|
return 1
|
|
|
|
}
|
2015-09-21 00:06:02 +00:00
|
|
|
|
2018-05-01 16:51:13 +00:00
|
|
|
var jobSpec []byte
|
|
|
|
|
|
|
|
if short {
|
|
|
|
jobSpec = []byte(shortJob)
|
|
|
|
} else {
|
|
|
|
jobSpec = []byte(defaultJob)
|
|
|
|
}
|
|
|
|
|
2015-09-23 00:06:23 +00:00
|
|
|
// Write out the example
|
2018-05-01 16:51:13 +00:00
|
|
|
err = ioutil.WriteFile(DefaultInitName, jobSpec, 0660)
|
2015-09-21 00:06:02 +00:00
|
|
|
if err != nil {
|
2015-09-23 00:06:23 +00:00
|
|
|
c.Ui.Error(fmt.Sprintf("Failed to write '%s': %v", DefaultInitName, err))
|
2015-09-21 00:06:02 +00:00
|
|
|
return 1
|
|
|
|
}
|
|
|
|
|
2015-09-23 00:06:23 +00:00
|
|
|
// Success
|
|
|
|
c.Ui.Output(fmt.Sprintf("Example job file written to %s", DefaultInitName))
|
2015-09-21 00:06:02 +00:00
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
2018-05-01 16:51:13 +00:00
|
|
|
var shortJob = strings.TrimSpace(`
|
|
|
|
job "example" {
|
|
|
|
datacenters = ["dc1"]
|
|
|
|
|
|
|
|
group "cache" {
|
|
|
|
ephemeral_disk {
|
|
|
|
size = 300
|
|
|
|
}
|
|
|
|
|
|
|
|
task "redis" {
|
|
|
|
driver = "docker"
|
|
|
|
|
|
|
|
config {
|
|
|
|
image = "redis:3.2"
|
|
|
|
port_map {
|
|
|
|
db = 6379
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
resources {
|
2018-05-03 19:34:56 +00:00
|
|
|
cpu = 500
|
|
|
|
memory = 256
|
2018-05-01 16:51:13 +00:00
|
|
|
network {
|
|
|
|
mbits = 10
|
|
|
|
port "db" {}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
service {
|
|
|
|
name = "redis-cache"
|
|
|
|
tags = ["global", "cache"]
|
|
|
|
port = "db"
|
|
|
|
check {
|
|
|
|
name = "alive"
|
|
|
|
type = "tcp"
|
|
|
|
interval = "10s"
|
|
|
|
timeout = "2s"
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
`)
|
|
|
|
|
2015-09-30 21:21:50 +00:00
|
|
|
var defaultJob = strings.TrimSpace(`
|
2016-09-24 18:42:25 +00:00
|
|
|
# There can only be a single job definition per file. This job is named
|
|
|
|
# "example" so it will create a job with the ID and Name "example".
|
2016-10-31 23:55:33 +00:00
|
|
|
|
|
|
|
# The "job" stanza is the top-most configuration option in the job
|
|
|
|
# specification. A job is a declarative specification of tasks that Nomad
|
|
|
|
# should run. Jobs have a globally unique name, one or many task groups, which
|
|
|
|
# are themselves collections of one or many tasks.
|
|
|
|
#
|
|
|
|
# For more information and examples on the "job" stanza, please see
|
|
|
|
# the online documentation at:
|
|
|
|
#
|
|
|
|
# https://www.nomadproject.io/docs/job-specification/job.html
|
|
|
|
#
|
2015-09-23 00:06:23 +00:00
|
|
|
job "example" {
|
2016-10-31 23:55:33 +00:00
|
|
|
# The "region" parameter specifies the region in which to execute the job. If
|
|
|
|
# omitted, this inherits the default region name of "global".
|
2016-09-24 18:42:25 +00:00
|
|
|
# region = "global"
|
|
|
|
|
2016-10-31 23:55:33 +00:00
|
|
|
# The "datacenters" parameter specifies the list of datacenters which should
|
|
|
|
# be considered when placing this task. This must be provided.
|
2016-09-24 18:42:25 +00:00
|
|
|
datacenters = ["dc1"]
|
|
|
|
|
2016-10-31 23:55:33 +00:00
|
|
|
# The "type" parameter controls the type of job, which impacts the scheduler's
|
|
|
|
# decision on placement. This configuration is optional and defaults to
|
|
|
|
# "service". For a full list of job types and their differences, please see
|
|
|
|
# the online documentation.
|
2016-11-01 01:52:45 +00:00
|
|
|
#
|
|
|
|
# For more information, please see the online documentation at:
|
|
|
|
#
|
|
|
|
# https://www.nomadproject.io/docs/jobspec/schedulers.html
|
|
|
|
#
|
2016-10-31 23:55:33 +00:00
|
|
|
type = "service"
|
|
|
|
|
|
|
|
# The "constraint" stanza defines additional constraints for placing this job,
|
|
|
|
# in addition to any resource or driver constraints. This stanza may be placed
|
|
|
|
# at the "job", "group", or "task" level, and supports variable interpolation.
|
|
|
|
#
|
|
|
|
# For more information and examples on the "constraint" stanza, please see
|
|
|
|
# the online documentation at:
|
|
|
|
#
|
|
|
|
# https://www.nomadproject.io/docs/job-specification/constraint.html
|
|
|
|
#
|
2016-09-24 18:42:25 +00:00
|
|
|
# constraint {
|
|
|
|
# attribute = "${attr.kernel.name}"
|
|
|
|
# value = "linux"
|
|
|
|
# }
|
|
|
|
|
2017-07-16 18:09:31 +00:00
|
|
|
# The "update" stanza specifies the update strategy of task groups. The update
|
|
|
|
# strategy is used to control things like rolling upgrades, canaries, and
|
|
|
|
# blue/green deployments. If omitted, no update strategy is enforced. The
|
|
|
|
# "update" stanza may be placed at the job or task group. When placed at the
|
|
|
|
# job, it applies to all groups within the job. When placed at both the job and
|
|
|
|
# group level, the stanzas are merged with the group's taking precedence.
|
2016-10-31 23:55:33 +00:00
|
|
|
#
|
|
|
|
# For more information and examples on the "update" stanza, please see
|
|
|
|
# the online documentation at:
|
|
|
|
#
|
|
|
|
# https://www.nomadproject.io/docs/job-specification/update.html
|
|
|
|
#
|
2016-09-24 18:42:25 +00:00
|
|
|
update {
|
2016-10-31 23:55:33 +00:00
|
|
|
# The "max_parallel" parameter specifies the maximum number of updates to
|
2016-11-01 01:52:45 +00:00
|
|
|
# perform in parallel. In this case, this specifies to update a single task
|
2016-10-31 23:55:33 +00:00
|
|
|
# at a time.
|
2016-09-24 18:42:25 +00:00
|
|
|
max_parallel = 1
|
2018-04-18 16:02:11 +00:00
|
|
|
|
2017-07-16 18:09:31 +00:00
|
|
|
# The "min_healthy_time" parameter specifies the minimum time the allocation
|
|
|
|
# must be in the healthy state before it is marked as healthy and unblocks
|
|
|
|
# further allocations from being updated.
|
|
|
|
min_healthy_time = "10s"
|
2018-04-18 16:02:11 +00:00
|
|
|
|
2017-07-16 18:09:31 +00:00
|
|
|
# The "healthy_deadline" parameter specifies the deadline in which the
|
2017-07-17 18:19:37 +00:00
|
|
|
# allocation must be marked as healthy after which the allocation is
|
|
|
|
# automatically transitioned to unhealthy. Transitioning to unhealthy will
|
2017-07-16 18:09:31 +00:00
|
|
|
# fail the deployment and potentially roll back the job if "auto_revert" is
|
|
|
|
# set to true.
|
|
|
|
healthy_deadline = "3m"
|
2018-04-18 16:02:11 +00:00
|
|
|
|
2017-07-16 18:09:31 +00:00
|
|
|
# The "auto_revert" parameter specifies if the job should auto-revert to the
|
|
|
|
# last stable job on deployment failure. A job is marked as stable if all the
|
|
|
|
# allocations as part of its deployment were marked healthy.
|
|
|
|
auto_revert = false
|
2018-04-18 16:02:11 +00:00
|
|
|
|
2017-07-16 18:09:31 +00:00
|
|
|
# The "canary" parameter specifies that changes to the job that would result
|
|
|
|
# in destructive updates should create the specified number of canaries
|
|
|
|
# without stopping any previous allocations. Once the operator determines the
|
|
|
|
# canaries are healthy, they can be promoted which unblocks a rolling update
|
|
|
|
# of the remaining allocations at a rate of "max_parallel".
|
|
|
|
#
|
|
|
|
# Further, setting "canary" equal to the count of the task group allows
|
|
|
|
# blue/green deployments. When the job is updated, a full set of the new
|
|
|
|
# version is deployed and upon promotion the old version is stopped.
|
|
|
|
canary = 0
|
2016-09-24 18:42:25 +00:00
|
|
|
}
|
|
|
|
|
2018-03-30 22:26:35 +00:00
|
|
|
# The migrate stanza specifies the group's strategy for migrating off of
|
|
|
|
# draining nodes. If omitted, a default migration strategy is applied.
|
|
|
|
#
|
2018-04-18 16:02:11 +00:00
|
|
|
# For more information on the "migrate" stanza, please see
|
2018-03-30 22:26:35 +00:00
|
|
|
# the online documentation at:
|
|
|
|
#
|
|
|
|
# https://www.nomadproject.io/docs/job-specification/migrate.html
|
|
|
|
#
|
|
|
|
migrate {
|
|
|
|
# Specifies the number of task groups that can be migrated at the same
|
|
|
|
# time. This number must be less than the total count for the group as
|
|
|
|
# (count - max_parallel) will be left running during migrations.
|
|
|
|
max_parallel = 1
|
|
|
|
|
|
|
|
# Specifies the mechanism in which allocations health is determined. The
|
|
|
|
# potential values are "checks" or "task_states".
|
|
|
|
health_check = "checks"
|
|
|
|
|
|
|
|
# Specifies the minimum time the allocation must be in the healthy state
|
|
|
|
# before it is marked as healthy and unblocks further allocations from being
|
|
|
|
# migrated. This is specified using a label suffix like "30s" or "15m".
|
|
|
|
min_healthy_time = "10s"
|
|
|
|
|
|
|
|
# Specifies the deadline in which the allocation must be marked as healthy
|
|
|
|
# after which the allocation is automatically transitioned to unhealthy. This
|
|
|
|
# is specified using a label suffix like "2m" or "1h".
|
|
|
|
healthy_deadline = "5m"
|
|
|
|
}
|
|
|
|
|
2016-10-31 23:55:33 +00:00
|
|
|
# The "group" stanza defines a series of tasks that should be co-located on
|
|
|
|
# the same Nomad client. Any task within a group will be placed on the same
|
|
|
|
# client.
|
|
|
|
#
|
|
|
|
# For more information and examples on the "group" stanza, please see
|
|
|
|
# the online documentation at:
|
|
|
|
#
|
|
|
|
# https://www.nomadproject.io/docs/job-specification/group.html
|
|
|
|
#
|
2016-09-24 18:42:25 +00:00
|
|
|
group "cache" {
|
2016-10-31 23:55:33 +00:00
|
|
|
# The "count" parameter specifies the number of the task groups that should
|
|
|
|
# be running under this group. This value must be non-negative and defaults
|
|
|
|
# to 1.
|
|
|
|
count = 1
|
|
|
|
|
|
|
|
# The "restart" stanza configures a group's behavior on task failure. If
|
|
|
|
# left unspecified, a default restart policy is used based on the job type.
|
|
|
|
#
|
|
|
|
# For more information and examples on the "restart" stanza, please see
|
|
|
|
# the online documentation at:
|
|
|
|
#
|
|
|
|
# https://www.nomadproject.io/docs/job-specification/restart.html
|
|
|
|
#
|
2016-09-24 18:42:25 +00:00
|
|
|
restart {
|
|
|
|
# The number of attempts to run the job within the specified interval.
|
2018-01-31 22:43:29 +00:00
|
|
|
attempts = 2
|
2018-02-05 23:22:13 +00:00
|
|
|
interval = "30m"
|
2016-09-24 18:42:25 +00:00
|
|
|
|
2016-10-31 23:55:33 +00:00
|
|
|
# The "delay" parameter specifies the duration to wait before restarting
|
|
|
|
# a task after it has failed.
|
2018-01-31 22:43:29 +00:00
|
|
|
delay = "15s"
|
2016-09-24 18:42:25 +00:00
|
|
|
|
2016-10-31 23:55:33 +00:00
|
|
|
# The "mode" parameter controls what happens when a task has restarted
|
|
|
|
# "attempts" times within the interval. "delay" mode delays the next
|
|
|
|
# restart until the next interval. "fail" mode does not restart the task
|
|
|
|
# if "attempts" has been hit within the interval.
|
2018-01-31 22:43:29 +00:00
|
|
|
mode = "fail"
|
2016-09-24 18:42:25 +00:00
|
|
|
}
|
|
|
|
|
2016-10-31 23:55:33 +00:00
|
|
|
# The "ephemeral_disk" stanza instructs Nomad to utilize an ephemeral disk
|
|
|
|
# instead of a hard disk requirement. Clients using this stanza should
|
|
|
|
# not specify disk requirements in the resources stanza of the task. All
|
|
|
|
# tasks in this group will share the same ephemeral disk.
|
|
|
|
#
|
|
|
|
# For more information and examples on the "ephemeral_disk" stanza, please
|
|
|
|
# see the online documentation at:
|
|
|
|
#
|
|
|
|
# https://www.nomadproject.io/docs/job-specification/ephemeral_disk.html
|
|
|
|
#
|
2016-09-24 18:42:25 +00:00
|
|
|
ephemeral_disk {
|
|
|
|
# When sticky is true and the task group is updated, the scheduler
|
|
|
|
# will prefer to place the updated allocation on the same node and
|
|
|
|
# will migrate the data. This is useful for tasks that store data
|
|
|
|
# that should persist across allocation updates.
|
|
|
|
# sticky = true
|
2018-04-18 16:02:11 +00:00
|
|
|
#
|
2016-11-11 00:00:17 +00:00
|
|
|
# Setting migrate to true results in the allocation directory of a
|
|
|
|
# sticky allocation directory to be migrated.
|
|
|
|
# migrate = true
|
2016-09-24 18:42:25 +00:00
|
|
|
|
2016-10-31 23:55:33 +00:00
|
|
|
# The "size" parameter specifies the size in MB of shared ephemeral disk
|
|
|
|
# between tasks in the group.
|
2016-09-24 18:42:25 +00:00
|
|
|
size = 300
|
|
|
|
}
|
|
|
|
|
2016-10-31 23:55:33 +00:00
|
|
|
# The "task" stanza creates an individual unit of work, such as a Docker
|
|
|
|
# container, web application, or batch processing.
|
|
|
|
#
|
|
|
|
# For more information and examples on the "task" stanza, please see
|
|
|
|
# the online documentation at:
|
|
|
|
#
|
|
|
|
# https://www.nomadproject.io/docs/job-specification/task.html
|
|
|
|
#
|
2016-09-24 18:42:25 +00:00
|
|
|
task "redis" {
|
2016-10-31 23:55:33 +00:00
|
|
|
# The "driver" parameter specifies the task driver that should be used to
|
|
|
|
# run the task.
|
2016-09-24 18:42:25 +00:00
|
|
|
driver = "docker"
|
|
|
|
|
2016-10-31 23:55:33 +00:00
|
|
|
# The "config" stanza specifies the driver configuration, which is passed
|
|
|
|
# directly to the driver to start the task. The details of configurations
|
|
|
|
# are specific to each driver, so please see specific driver
|
|
|
|
# documentation for more information.
|
2016-09-24 18:42:25 +00:00
|
|
|
config {
|
|
|
|
image = "redis:3.2"
|
|
|
|
port_map {
|
|
|
|
db = 6379
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-10-31 01:27:25 +00:00
|
|
|
# The "artifact" stanza instructs Nomad to download an artifact from a
|
|
|
|
# remote source prior to starting the task. This provides a convenient
|
|
|
|
# mechanism for downloading configuration files or data needed to run the
|
|
|
|
# task. It is possible to specify the "artifact" stanza multiple times to
|
|
|
|
# download multiple artifacts.
|
|
|
|
#
|
|
|
|
# For more information and examples on the "artifact" stanza, please see
|
|
|
|
# the online documentation at:
|
|
|
|
#
|
|
|
|
# https://www.nomadproject.io/docs/job-specification/artifact.html
|
|
|
|
#
|
2016-09-24 18:42:25 +00:00
|
|
|
# artifact {
|
|
|
|
# source = "http://foo.com/artifact.tar.gz"
|
|
|
|
# options {
|
|
|
|
# checksum = "md5:c4aa853ad2215426eb7d70a21922e794"
|
|
|
|
# }
|
|
|
|
# }
|
|
|
|
|
2017-08-17 06:32:41 +00:00
|
|
|
# The "logs" stanza instructs the Nomad client on how many log files and
|
2016-10-31 01:30:13 +00:00
|
|
|
# the maximum size of those logs files to retain. Logging is enabled by
|
|
|
|
# default, but the "logs" stanza allows for finer-grained control over
|
|
|
|
# the log rotation and storage configuration.
|
|
|
|
#
|
|
|
|
# For more information and examples on the "logs" stanza, please see
|
|
|
|
# the online documentation at:
|
|
|
|
#
|
|
|
|
# https://www.nomadproject.io/docs/job-specification/logs.html
|
|
|
|
#
|
|
|
|
# logs {
|
|
|
|
# max_files = 10
|
|
|
|
# max_file_size = 15
|
|
|
|
# }
|
|
|
|
|
2016-10-31 01:33:10 +00:00
|
|
|
# The "resources" stanza describes the requirements a task needs to
|
2017-03-04 19:54:49 +00:00
|
|
|
# execute. Resource requirements include memory, network, cpu, and more.
|
|
|
|
# This ensures the task will execute on a machine that contains enough
|
|
|
|
# resource capacity.
|
2016-10-31 01:33:10 +00:00
|
|
|
#
|
|
|
|
# For more information and examples on the "resources" stanza, please see
|
|
|
|
# the online documentation at:
|
|
|
|
#
|
|
|
|
# https://www.nomadproject.io/docs/job-specification/resources.html
|
|
|
|
#
|
|
|
|
resources {
|
|
|
|
cpu = 500 # 500 MHz
|
|
|
|
memory = 256 # 256MB
|
|
|
|
network {
|
|
|
|
mbits = 10
|
|
|
|
port "db" {}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-10-31 04:03:03 +00:00
|
|
|
# The "service" stanza instructs Nomad to register this task as a service
|
|
|
|
# in the service discovery engine, which is currently Consul. This will
|
|
|
|
# make the service addressable after Nomad has placed it on a host and
|
|
|
|
# port.
|
|
|
|
#
|
|
|
|
# For more information and examples on the "service" stanza, please see
|
|
|
|
# the online documentation at:
|
|
|
|
#
|
|
|
|
# https://www.nomadproject.io/docs/job-specification/service.html
|
|
|
|
#
|
2016-09-24 18:42:25 +00:00
|
|
|
service {
|
2018-01-17 20:10:41 +00:00
|
|
|
name = "redis-cache"
|
2016-09-24 18:42:25 +00:00
|
|
|
tags = ["global", "cache"]
|
|
|
|
port = "db"
|
|
|
|
check {
|
|
|
|
name = "alive"
|
|
|
|
type = "tcp"
|
|
|
|
interval = "10s"
|
|
|
|
timeout = "2s"
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-10-31 01:19:19 +00:00
|
|
|
# The "template" stanza instructs Nomad to manage a template, such as
|
|
|
|
# a configuration file or script. This template can optionally pull data
|
|
|
|
# from Consul or Vault to populate runtime configuration data.
|
|
|
|
#
|
|
|
|
# For more information and examples on the "template" stanza, please see
|
|
|
|
# the online documentation at:
|
|
|
|
#
|
|
|
|
# https://www.nomadproject.io/docs/job-specification/template.html
|
|
|
|
#
|
|
|
|
# template {
|
|
|
|
# data = "---\nkey: {{ key \"service/my-key\" }}"
|
|
|
|
# destination = "local/file.yml"
|
|
|
|
# change_mode = "signal"
|
|
|
|
# change_signal = "SIGHUP"
|
2016-09-24 18:42:25 +00:00
|
|
|
# }
|
|
|
|
|
2017-07-25 23:26:42 +00:00
|
|
|
# The "template" stanza can also be used to create environment variables
|
|
|
|
# for tasks that prefer those to config files. The task will be restarted
|
|
|
|
# when data pulled from Consul or Vault changes.
|
2017-07-25 17:28:50 +00:00
|
|
|
#
|
|
|
|
# template {
|
|
|
|
# data = "KEY={{ key \"service/my-key\" }}"
|
|
|
|
# destination = "local/file.env"
|
|
|
|
# env = true
|
|
|
|
# }
|
|
|
|
|
2016-10-31 12:45:55 +00:00
|
|
|
# The "vault" stanza instructs the Nomad client to acquire a token from
|
2016-10-31 01:25:08 +00:00
|
|
|
# a HashiCorp Vault server. The Nomad servers must be configured and
|
|
|
|
# authorized to communicate with Vault. By default, Nomad will inject
|
|
|
|
# The token into the job via an environment variable and make the token
|
|
|
|
# available to the "template" stanza. The Nomad client handles the renewal
|
|
|
|
# and revocation of the Vault token.
|
|
|
|
#
|
|
|
|
# For more information and examples on the "vault" stanza, please see
|
|
|
|
# the online documentation at:
|
|
|
|
#
|
|
|
|
# https://www.nomadproject.io/docs/job-specification/vault.html
|
|
|
|
#
|
|
|
|
# vault {
|
|
|
|
# policies = ["cdn", "frontend"]
|
|
|
|
# change_mode = "signal"
|
|
|
|
# change_signal = "SIGHUP"
|
2016-09-24 18:42:25 +00:00
|
|
|
# }
|
|
|
|
|
|
|
|
# Controls the timeout between signalling a task it will be killed
|
|
|
|
# and killing the task. If not set a default is used.
|
|
|
|
# kill_timeout = "20s"
|
|
|
|
}
|
|
|
|
}
|
2015-09-21 00:06:02 +00:00
|
|
|
}
|
2015-09-30 21:21:50 +00:00
|
|
|
`)
|