Update the sample jobfile syntax

- Uses spaces instead of tabs for the sample job file. Even though Nomad
  is written in Go, the HCL style guide uses two spaces for indentation,
  and this will match Terraform and Vault in terms of configuration.
- Locks to redis:3.2 instead of floating latest. Running latest in
  production is a bad idea and we shouldn't encourage it even via
  example.
- Removes the linux constraint (native docker exists for both Mac and
  Windows now)
- Aligns equal signs and stuff as per the HCL formatter

- Closes #1743
This commit is contained in:
Seth Vargo 2016-09-24 14:42:25 -04:00
parent 18c657a1b7
commit 4523f112fe
No known key found for this signature in database
GPG Key ID: 905A90C2949E8787
2 changed files with 117 additions and 110 deletions

View File

@ -64,132 +64,130 @@ func (c *InitCommand) Run(args []string) int {
}
var defaultJob = strings.TrimSpace(`
# There can only be a single job definition per file.
# Create a job with ID and Name 'example'
# There can only be a single job definition per file. This job is named
# "example" so it will create a job with the ID and Name "example".
job "example" {
# Run the job in the global region, which is the default.
# region = "global"
# Run the job in the global region, which is the default.
# region = "global"
# Specify the datacenters within the region this job can run in.
datacenters = ["dc1"]
# Specify the datacenters within the region this job can run in.
datacenters = ["dc1"]
# Service type jobs optimize for long-lived services. This is
# the default but we can change to batch for short-lived tasks.
# type = "service"
# Service type jobs optimize for long-lived services. This is
# the default but we can change to batch for short-lived tasks.
# type = "service"
# Priority controls our access to resources and scheduling priority.
# This can be 1 to 100, inclusively, and defaults to 50.
# priority = 50
# Priority controls our access to resources and scheduling priority.
# This can be 1 to 100, inclusively, and defaults to 50.
# priority = 50
# Restrict our job to only linux. We can specify multiple
# constraints as needed.
# constraint {
# attribute = "${attr.kernel.name}"
# value = "linux"
# }
# Restrict our job to only linux. We can specify multiple constraints
# as needed.
# constraint {
# attribute = "${attr.kernel.name}"
# value = "linux"
# }
# Configure the job to do rolling updates
update {
# Stagger updates every 10 seconds
stagger = "10s"
# Configure the job to do rolling updates
update {
# Stagger updates every 10 seconds
stagger = "10s"
# Update a single task at a time
max_parallel = 1
}
# Update a single task at a time
max_parallel = 1
}
# Create a 'cache' group. Each task in the group will be
# scheduled onto the same machine.
group "cache" {
# Control the number of instances of this group.
# Defaults to 1
# count = 1
# Create a 'cache' group. Each task in the group will be scheduled
# onto the same machine.
group "cache" {
# Control the number of instances of this group. Defaults to 1.
# count = 1
# Configure the restart policy for the task group. If not provided, a
# default is used based on the job type.
restart {
# The number of attempts to run the job within the specified interval.
attempts = 10
interval = "5m"
# A delay between a task failing and a restart occurring.
delay = "25s"
# Configure the restart policy for the task group. If not provided, a
# default is used based on the job type.
restart {
# The number of attempts to run the job within the specified interval.
attempts = 10
interval = "5m"
# Mode controls what happens when a task has restarted "attempts"
# times within the interval. "delay" mode delays the next restart
# till the next interval. "fail" mode does not restart the task if
# "attempts" has been hit within the interval.
mode = "delay"
}
# A delay between a task failing and a restart occurring.
delay = "25s"
ephemeral_disk {
# When sticky is true and the task group is updated, the scheduler
# will prefer to place the updated allocation on the same node and
# will migrate the data. This is useful for tasks that store data
# that should persist across allocation updates.
# sticky = true
# Mode controls what happens when a task has restarted "attempts"
# times within the interval. "delay" mode delays the next restart
# till the next interval. "fail" mode does not restart the task if
# "attempts" has been hit within the interval.
mode = "delay"
}
# Size of the shared ephemeral disk between tasks in the task group.
size = 300
}
ephemeral_disk {
# When sticky is true and the task group is updated, the scheduler
# will prefer to place the updated allocation on the same node and
# will migrate the data. This is useful for tasks that store data
# that should persist across allocation updates.
# sticky = true
# Define a task to run
task "redis" {
# Use Docker to run the task.
driver = "docker"
# Size of the shared ephemeral disk between tasks in the task group.
size = 300
}
# Configure Docker driver with the image
config {
image = "redis:latest"
port_map {
db = 6379
}
}
# Define a task to run
task "redis" {
# Use Docker to run the task.
driver = "docker"
service {
name = "${TASKGROUP}-redis"
tags = ["global", "cache"]
port = "db"
check {
name = "alive"
type = "tcp"
interval = "10s"
timeout = "2s"
}
}
# Configure Docker driver with the image
config {
image = "redis:3.2"
port_map {
db = 6379
}
}
# We must specify the resources required for
# this task to ensure it runs on a machine with
# enough capacity.
resources {
cpu = 500 # 500 MHz
memory = 256 # 256MB
network {
mbits = 10
port "db" {
}
}
}
service {
# ${TASKGROUP} is filled in automatically by Nomad
name = "${TASKGROUP}-redis"
tags = ["global", "cache"]
port = "db"
check {
name = "alive"
type = "tcp"
interval = "10s"
timeout = "2s"
}
}
# The artifact block can be specified one or more times to download
# artifacts prior to the task being started. This is convenient for
# shipping configs or data needed by the task.
# artifact {
# source = "http://foo.com/artifact.tar.gz"
# options {
# checksum = "md5:c4aa853ad2215426eb7d70a21922e794"
# }
# }
# Specify configuration related to log rotation
# logs {
# max_files = 10
# max_file_size = 15
# }
# Controls the timeout between signalling a task it will be killed
# and killing the task. If not set a default is used.
# kill_timeout = "20s"
}
}
# We must specify the resources required for this task to ensure
# it runs on a machine with enough capacity.
resources {
cpu = 500 # 500 MHz
memory = 256 # 256MB
network {
mbits = 10
port "db" {}
}
}
# The artifact block can be specified one or more times to download
# artifacts prior to the task being started. This is convenient for
# shipping configs or data needed by the task.
# artifact {
# source = "http://foo.com/artifact.tar.gz"
# options {
# checksum = "md5:c4aa853ad2215426eb7d70a21922e794"
# }
# }
# Specify configuration related to log rotation
# logs {
# max_files = 10
# max_file_size = 15
# }
# Controls the timeout between signalling a task it will be killed
# and killing the task. If not set a default is used.
# kill_timeout = "20s"
}
}
}
`)

View File

@ -63,3 +63,12 @@ func TestInitCommand_Run(t *testing.T) {
t.Fatalf("expect file exists error, got: %s", out)
}
}
func TestInitCommand_defaultJob(t *testing.T) {
// Ensure the job file is always written with spaces instead of tabs. Since
// the default job file is embedded in the go file, it's easy for tabs to
// slip in.
if strings.Contains("\t", defaultJob) {
t.Error("default job contains tab character - please convert to spaces")
}
}