2015-09-21 00:06:02 +00:00
|
|
|
package command
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
2015-09-23 00:06:23 +00:00
|
|
|
"io/ioutil"
|
2015-09-21 00:06:02 +00:00
|
|
|
"os"
|
2015-09-23 00:06:23 +00:00
|
|
|
"strings"
|
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
|
|
|
// DefaultInitName is the default name we use when
|
|
|
|
// initializing the example file
|
|
|
|
DefaultInitName = "example.nomad"
|
2015-09-21 00:06:02 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// InitCommand generates a new job template that you can customize to your
|
|
|
|
// liking, like vagrant init
|
|
|
|
type InitCommand struct {
|
|
|
|
Meta
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *InitCommand) Help() string {
|
2015-09-23 00:06:23 +00:00
|
|
|
helpText := `
|
|
|
|
Usage: nomad init
|
|
|
|
|
|
|
|
Creates an example job file that can be used as a starting
|
|
|
|
point to customize further.
|
|
|
|
`
|
|
|
|
return strings.TrimSpace(helpText)
|
2015-09-21 00:06:02 +00:00
|
|
|
}
|
|
|
|
|
2015-09-23 00:06:23 +00:00
|
|
|
func (c *InitCommand) Synopsis() string {
|
|
|
|
return "Create an example job file"
|
|
|
|
}
|
2015-09-21 00:06:02 +00:00
|
|
|
|
2015-09-23 00:06:23 +00:00
|
|
|
func (c *InitCommand) Run(args []string) int {
|
2015-09-30 21:21:50 +00:00
|
|
|
// Check for misuse
|
|
|
|
if len(args) != 0 {
|
|
|
|
c.Ui.Error(c.Help())
|
|
|
|
return 1
|
|
|
|
}
|
|
|
|
|
2015-09-23 00:06:23 +00:00
|
|
|
// Check if the file already exists
|
|
|
|
_, err := os.Stat(DefaultInitName)
|
2015-09-30 21:21:50 +00:00
|
|
|
if err != nil && !os.IsNotExist(err) {
|
2015-09-23 00:06:23 +00:00
|
|
|
c.Ui.Error(fmt.Sprintf("Failed to stat '%s': %v", DefaultInitName, err))
|
2015-09-21 00:06:02 +00:00
|
|
|
return 1
|
|
|
|
}
|
2015-09-30 21:21:50 +00:00
|
|
|
if !os.IsNotExist(err) {
|
|
|
|
c.Ui.Error(fmt.Sprintf("Job '%s' already exists", DefaultInitName))
|
|
|
|
return 1
|
|
|
|
}
|
2015-09-21 00:06:02 +00:00
|
|
|
|
2015-09-23 00:06:23 +00:00
|
|
|
// Write out the example
|
|
|
|
err = ioutil.WriteFile(DefaultInitName, []byte(defaultJob), 0660)
|
2015-09-21 00:06:02 +00:00
|
|
|
if err != nil {
|
2015-09-23 00:06:23 +00:00
|
|
|
c.Ui.Error(fmt.Sprintf("Failed to write '%s': %v", DefaultInitName, err))
|
2015-09-21 00:06:02 +00:00
|
|
|
return 1
|
|
|
|
}
|
|
|
|
|
2015-09-23 00:06:23 +00:00
|
|
|
// Success
|
|
|
|
c.Ui.Output(fmt.Sprintf("Example job file written to %s", DefaultInitName))
|
2015-09-21 00:06:02 +00:00
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
2015-09-30 21:21:50 +00:00
|
|
|
var defaultJob = strings.TrimSpace(`
|
2016-09-24 18:42:25 +00:00
|
|
|
# There can only be a single job definition per file. This job is named
|
|
|
|
# "example" so it will create a job with the ID and Name "example".
|
2015-09-23 00:06:23 +00:00
|
|
|
job "example" {
|
2016-09-24 18:42:25 +00:00
|
|
|
# Run the job in the global region, which is the default.
|
|
|
|
# region = "global"
|
|
|
|
|
|
|
|
# Specify the datacenters within the region this job can run in.
|
|
|
|
datacenters = ["dc1"]
|
|
|
|
|
|
|
|
# Service type jobs optimize for long-lived services. This is
|
|
|
|
# the default but we can change to batch for short-lived tasks.
|
|
|
|
# type = "service"
|
|
|
|
|
|
|
|
# Priority controls our access to resources and scheduling priority.
|
|
|
|
# This can be 1 to 100, inclusively, and defaults to 50.
|
|
|
|
# priority = 50
|
|
|
|
|
|
|
|
# Restrict our job to only linux. We can specify multiple constraints
|
|
|
|
# as needed.
|
|
|
|
# constraint {
|
|
|
|
# attribute = "${attr.kernel.name}"
|
|
|
|
# value = "linux"
|
|
|
|
# }
|
|
|
|
|
|
|
|
# Configure the job to do rolling updates
|
|
|
|
update {
|
|
|
|
# Stagger updates every 10 seconds
|
|
|
|
stagger = "10s"
|
|
|
|
|
|
|
|
# Update a single task at a time
|
|
|
|
max_parallel = 1
|
|
|
|
}
|
|
|
|
|
|
|
|
# Create a 'cache' group. Each task in the group will be scheduled
|
|
|
|
# onto the same machine.
|
|
|
|
group "cache" {
|
|
|
|
# Control the number of instances of this group. Defaults to 1.
|
|
|
|
# count = 1
|
|
|
|
|
|
|
|
# Configure the restart policy for the task group. If not provided, a
|
|
|
|
# default is used based on the job type.
|
|
|
|
restart {
|
|
|
|
# The number of attempts to run the job within the specified interval.
|
|
|
|
attempts = 10
|
|
|
|
interval = "5m"
|
|
|
|
|
|
|
|
# A delay between a task failing and a restart occurring.
|
|
|
|
delay = "25s"
|
|
|
|
|
|
|
|
# Mode controls what happens when a task has restarted "attempts"
|
|
|
|
# times within the interval. "delay" mode delays the next restart
|
|
|
|
# till the next interval. "fail" mode does not restart the task if
|
|
|
|
# "attempts" has been hit within the interval.
|
|
|
|
mode = "delay"
|
|
|
|
}
|
|
|
|
|
|
|
|
ephemeral_disk {
|
|
|
|
# When sticky is true and the task group is updated, the scheduler
|
|
|
|
# will prefer to place the updated allocation on the same node and
|
|
|
|
# will migrate the data. This is useful for tasks that store data
|
|
|
|
# that should persist across allocation updates.
|
|
|
|
# sticky = true
|
|
|
|
|
|
|
|
# Size of the shared ephemeral disk between tasks in the task group.
|
|
|
|
size = 300
|
|
|
|
}
|
|
|
|
|
|
|
|
# Define a task to run
|
|
|
|
task "redis" {
|
|
|
|
# Use Docker to run the task.
|
|
|
|
driver = "docker"
|
|
|
|
|
|
|
|
# Configure Docker driver with the image
|
|
|
|
config {
|
|
|
|
image = "redis:3.2"
|
|
|
|
port_map {
|
|
|
|
db = 6379
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-10-31 01:27:25 +00:00
|
|
|
# The "artifact" stanza instructs Nomad to download an artifact from a
|
|
|
|
# remote source prior to starting the task. This provides a convenient
|
|
|
|
# mechanism for downloading configuration files or data needed to run the
|
|
|
|
# task. It is possible to specify the "artifact" stanza multiple times to
|
|
|
|
# download multiple artifacts.
|
|
|
|
#
|
|
|
|
# For more information and examples on the "artifact" stanza, please see
|
|
|
|
# the online documentation at:
|
|
|
|
#
|
|
|
|
# https://www.nomadproject.io/docs/job-specification/artifact.html
|
|
|
|
#
|
2016-09-24 18:42:25 +00:00
|
|
|
# artifact {
|
|
|
|
# source = "http://foo.com/artifact.tar.gz"
|
|
|
|
# options {
|
|
|
|
# checksum = "md5:c4aa853ad2215426eb7d70a21922e794"
|
|
|
|
# }
|
|
|
|
# }
|
|
|
|
|
2016-10-31 01:30:13 +00:00
|
|
|
# The "logs" stana instructs the Nomad client on how many log files and
|
|
|
|
# the maximum size of those logs files to retain. Logging is enabled by
|
|
|
|
# default, but the "logs" stanza allows for finer-grained control over
|
|
|
|
# the log rotation and storage configuration.
|
|
|
|
#
|
|
|
|
# For more information and examples on the "logs" stanza, please see
|
|
|
|
# the online documentation at:
|
|
|
|
#
|
|
|
|
# https://www.nomadproject.io/docs/job-specification/logs.html
|
|
|
|
#
|
|
|
|
# logs {
|
|
|
|
# max_files = 10
|
|
|
|
# max_file_size = 15
|
|
|
|
# }
|
|
|
|
|
2016-10-31 01:33:10 +00:00
|
|
|
# The "resources" stanza describes the requirements a task needs to
|
|
|
|
# execute. Resource requirements include memory, disk space, network,
|
|
|
|
# cpu, and more. This ensures the task will execute on a machine that
|
|
|
|
# contains enough resource capacity.
|
|
|
|
#
|
|
|
|
# For more information and examples on the "resources" stanza, please see
|
|
|
|
# the online documentation at:
|
|
|
|
#
|
|
|
|
# https://www.nomadproject.io/docs/job-specification/resources.html
|
|
|
|
#
|
|
|
|
resources {
|
|
|
|
cpu = 500 # 500 MHz
|
|
|
|
memory = 256 # 256MB
|
|
|
|
network {
|
|
|
|
mbits = 10
|
|
|
|
port "db" {}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-10-31 04:03:03 +00:00
|
|
|
# The "service" stanza instructs Nomad to register this task as a service
|
|
|
|
# in the service discovery engine, which is currently Consul. This will
|
|
|
|
# make the service addressable after Nomad has placed it on a host and
|
|
|
|
# port.
|
|
|
|
#
|
|
|
|
# For more information and examples on the "service" stanza, please see
|
|
|
|
# the online documentation at:
|
|
|
|
#
|
|
|
|
# https://www.nomadproject.io/docs/job-specification/service.html
|
|
|
|
#
|
|
|
|
service {
|
|
|
|
name = "global-redis-check"
|
|
|
|
tags = ["global", "cache"]
|
|
|
|
port = "db"
|
|
|
|
check {
|
|
|
|
name = "alive"
|
|
|
|
type = "tcp"
|
|
|
|
interval = "10s"
|
|
|
|
timeout = "2s"
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-10-31 01:19:19 +00:00
|
|
|
# The "template" stanza instructs Nomad to manage a template, such as
|
|
|
|
# a configuration file or script. This template can optionally pull data
|
|
|
|
# from Consul or Vault to populate runtime configuration data.
|
|
|
|
#
|
|
|
|
# For more information and examples on the "template" stanza, please see
|
|
|
|
# the online documentation at:
|
|
|
|
#
|
|
|
|
# https://www.nomadproject.io/docs/job-specification/template.html
|
|
|
|
#
|
|
|
|
# template {
|
|
|
|
# data = "---\nkey: {{ key \"service/my-key\" }}"
|
|
|
|
# destination = "local/file.yml"
|
|
|
|
# change_mode = "signal"
|
|
|
|
# change_signal = "SIGHUP"
|
|
|
|
# }
|
|
|
|
|
2016-10-31 01:25:08 +00:00
|
|
|
# The "vault" stnaza instructs the Nomad client to acquire a token from
|
|
|
|
# a HashiCorp Vault server. The Nomad servers must be configured and
|
|
|
|
# authorized to communicate with Vault. By default, Nomad will inject
|
|
|
|
# The token into the job via an environment variable and make the token
|
|
|
|
# available to the "template" stanza. The Nomad client handles the renewal
|
|
|
|
# and revocation of the Vault token.
|
|
|
|
#
|
|
|
|
# For more information and examples on the "vault" stanza, please see
|
|
|
|
# the online documentation at:
|
|
|
|
#
|
|
|
|
# https://www.nomadproject.io/docs/job-specification/vault.html
|
|
|
|
#
|
|
|
|
# vault {
|
|
|
|
# policies = ["cdn", "frontend"]
|
|
|
|
# change_mode = "signal"
|
|
|
|
# change_signal = "SIGHUP"
|
|
|
|
# }
|
|
|
|
|
2016-09-24 18:42:25 +00:00
|
|
|
# Controls the timeout between signalling a task it will be killed
|
|
|
|
# and killing the task. If not set a default is used.
|
|
|
|
# kill_timeout = "20s"
|
|
|
|
}
|
|
|
|
}
|
2015-09-21 00:06:02 +00:00
|
|
|
}
|
2015-09-30 21:21:50 +00:00
|
|
|
`)
|