Merge branch 'master' into TC_fix_verify_ssl
This commit is contained in:
commit
d00355e8d3
|
@ -1,9 +1,10 @@
|
|||
## 0.5.3 (Unreleased)
|
||||
|
||||
IMPROVEMENTS:
|
||||
* api: Added APIs for requesting GC of allocations [GH-2192]
|
||||
* core: Introduce Parameterized Jobs and Dispatch command/API [GH-2128]
|
||||
* core: Introduce parameterized jobs and dispatch command/API [GH-2128]
|
||||
* core: Cancel blocked evals upon successful one for job [GH-2155]
|
||||
* api: Added APIs for requesting GC of allocations [GH-2192]
|
||||
* api: Job summary endpoint includes summary status for child jobs [GH-2128]
|
||||
* api/client: Plain text log streaming suitable for viewing logs in a browser
|
||||
[GH-2235]
|
||||
* cli: Defaulting to showing allocations which belong to currently registered
|
||||
|
|
34
api/tasks.go
34
api/tasks.go
|
@ -141,28 +141,28 @@ type LogConfig struct {
|
|||
MaxFileSizeMB int
|
||||
}
|
||||
|
||||
// DispatchInputConfig configures how a task gets its input from a job dispatch
|
||||
type DispatchInputConfig struct {
|
||||
// DispatchPayloadConfig configures how a task gets its input from a job dispatch
|
||||
type DispatchPayloadConfig struct {
|
||||
File string
|
||||
}
|
||||
|
||||
// Task is a single process in a task group.
|
||||
type Task struct {
|
||||
Name string
|
||||
Driver string
|
||||
User string
|
||||
Config map[string]interface{}
|
||||
Constraints []*Constraint
|
||||
Env map[string]string
|
||||
Services []Service
|
||||
Resources *Resources
|
||||
Meta map[string]string
|
||||
KillTimeout time.Duration
|
||||
LogConfig *LogConfig
|
||||
Artifacts []*TaskArtifact
|
||||
Vault *Vault
|
||||
Templates []*Template
|
||||
DispatchInput *DispatchInputConfig
|
||||
Name string
|
||||
Driver string
|
||||
User string
|
||||
Config map[string]interface{}
|
||||
Constraints []*Constraint
|
||||
Env map[string]string
|
||||
Services []Service
|
||||
Resources *Resources
|
||||
Meta map[string]string
|
||||
KillTimeout time.Duration
|
||||
LogConfig *LogConfig
|
||||
Artifacts []*TaskArtifact
|
||||
Vault *Vault
|
||||
Templates []*Template
|
||||
DispatchPayload *DispatchPayloadConfig
|
||||
}
|
||||
|
||||
// TaskArtifact is used to download artifacts before running a task.
|
||||
|
|
56
client/driver/executor/checks_linux_test.go
Normal file
56
client/driver/executor/checks_linux_test.go
Normal file
|
@ -0,0 +1,56 @@
|
|||
package executor
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
dstructs "github.com/hashicorp/nomad/client/driver/structs"
|
||||
"github.com/hashicorp/nomad/client/testutil"
|
||||
)
|
||||
|
||||
func TestExecScriptCheckWithIsolation(t *testing.T) {
|
||||
testutil.ExecCompatible(t)
|
||||
|
||||
execCmd := ExecCommand{Cmd: "/bin/echo", Args: []string{"hello world"}}
|
||||
ctx, allocDir := testExecutorContextWithChroot(t)
|
||||
defer allocDir.Destroy()
|
||||
|
||||
execCmd.FSIsolation = true
|
||||
execCmd.ResourceLimits = true
|
||||
execCmd.User = dstructs.DefaultUnpriviledgedUser
|
||||
|
||||
executor := NewExecutor(log.New(os.Stdout, "", log.LstdFlags))
|
||||
|
||||
if err := executor.SetContext(ctx); err != nil {
|
||||
t.Fatalf("Unexpected error")
|
||||
}
|
||||
|
||||
_, err := executor.LaunchCmd(&execCmd)
|
||||
if err != nil {
|
||||
t.Fatalf("error in launching command: %v", err)
|
||||
}
|
||||
|
||||
check := &ExecScriptCheck{
|
||||
id: "foo",
|
||||
cmd: "/bin/echo",
|
||||
args: []string{"hello", "world"},
|
||||
taskDir: ctx.TaskDir,
|
||||
FSIsolation: true,
|
||||
}
|
||||
|
||||
res := check.Run()
|
||||
expectedOutput := "hello world"
|
||||
expectedExitCode := 0
|
||||
if res.Err != nil {
|
||||
t.Fatalf("err: %v", res.Err)
|
||||
}
|
||||
if strings.TrimSpace(res.Output) != expectedOutput {
|
||||
t.Fatalf("output expected: %v, actual: %v", expectedOutput, res.Output)
|
||||
}
|
||||
|
||||
if res.ExitCode != expectedExitCode {
|
||||
t.Fatalf("exitcode expected: %v, actual: %v", expectedExitCode, res.ExitCode)
|
||||
}
|
||||
}
|
|
@ -9,7 +9,6 @@ import (
|
|||
|
||||
docker "github.com/fsouza/go-dockerclient"
|
||||
|
||||
dstructs "github.com/hashicorp/nomad/client/driver/structs"
|
||||
"github.com/hashicorp/nomad/client/testutil"
|
||||
)
|
||||
|
||||
|
@ -37,51 +36,6 @@ func TestExecScriptCheckNoIsolation(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestExecScriptCheckWithIsolation(t *testing.T) {
|
||||
testutil.ExecCompatible(t)
|
||||
|
||||
execCmd := ExecCommand{Cmd: "/bin/echo", Args: []string{"hello world"}}
|
||||
ctx, allocDir := testExecutorContextWithChroot(t)
|
||||
defer allocDir.Destroy()
|
||||
|
||||
execCmd.FSIsolation = true
|
||||
execCmd.ResourceLimits = true
|
||||
execCmd.User = dstructs.DefaultUnpriviledgedUser
|
||||
|
||||
executor := NewExecutor(log.New(os.Stdout, "", log.LstdFlags))
|
||||
|
||||
if err := executor.SetContext(ctx); err != nil {
|
||||
t.Fatalf("Unexpected error")
|
||||
}
|
||||
|
||||
_, err := executor.LaunchCmd(&execCmd)
|
||||
if err != nil {
|
||||
t.Fatalf("error in launching command: %v", err)
|
||||
}
|
||||
|
||||
check := &ExecScriptCheck{
|
||||
id: "foo",
|
||||
cmd: "/bin/echo",
|
||||
args: []string{"hello", "world"},
|
||||
taskDir: ctx.TaskDir,
|
||||
FSIsolation: true,
|
||||
}
|
||||
|
||||
res := check.Run()
|
||||
expectedOutput := "hello world"
|
||||
expectedExitCode := 0
|
||||
if res.Err != nil {
|
||||
t.Fatalf("err: %v", res.Err)
|
||||
}
|
||||
if strings.TrimSpace(res.Output) != expectedOutput {
|
||||
t.Fatalf("output expected: %v, actual: %v", expectedOutput, res.Output)
|
||||
}
|
||||
|
||||
if res.ExitCode != expectedExitCode {
|
||||
t.Fatalf("exitcode expected: %v, actual: %v", expectedExitCode, res.ExitCode)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDockerScriptCheck(t *testing.T) {
|
||||
if !testutil.DockerIsConnected(t) {
|
||||
return
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/nomad/client/allocdir"
|
||||
"github.com/hashicorp/nomad/client/driver/env"
|
||||
|
@ -136,3 +137,54 @@ ld.so.conf.d/`
|
|||
t.Fatalf("Command output incorrectly: want %v; got %v", expected, act)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExecutor_ClientCleanup(t *testing.T) {
|
||||
testutil.ExecCompatible(t)
|
||||
|
||||
ctx, allocDir := testExecutorContextWithChroot(t)
|
||||
ctx.Task.LogConfig.MaxFiles = 1
|
||||
ctx.Task.LogConfig.MaxFileSizeMB = 300
|
||||
defer allocDir.Destroy()
|
||||
|
||||
executor := NewExecutor(log.New(os.Stdout, "", log.LstdFlags))
|
||||
|
||||
if err := executor.SetContext(ctx); err != nil {
|
||||
t.Fatalf("Unexpected error")
|
||||
}
|
||||
|
||||
// Need to run a command which will produce continuous output but not
|
||||
// too quickly to ensure executor.Exit() stops the process.
|
||||
execCmd := ExecCommand{Cmd: "/bin/bash", Args: []string{"-c", "while true; do /bin/echo X; /bin/sleep 1; done"}}
|
||||
execCmd.FSIsolation = true
|
||||
execCmd.ResourceLimits = true
|
||||
execCmd.User = "nobody"
|
||||
|
||||
ps, err := executor.LaunchCmd(&execCmd)
|
||||
if err != nil {
|
||||
t.Fatalf("error in launching command: %v", err)
|
||||
}
|
||||
if ps.Pid == 0 {
|
||||
t.Fatalf("expected process to start and have non zero pid")
|
||||
}
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
if err := executor.Exit(); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
file := filepath.Join(ctx.LogDir, "web.stdout.0")
|
||||
finfo, err := os.Stat(file)
|
||||
if err != nil {
|
||||
t.Fatalf("error stating stdout file: %v", err)
|
||||
}
|
||||
if finfo.Size() == 0 {
|
||||
t.Fatal("Nothing in stdout; expected at least one byte.")
|
||||
}
|
||||
time.Sleep(2 * time.Second)
|
||||
finfo1, err := os.Stat(file)
|
||||
if err != nil {
|
||||
t.Fatalf("error stating stdout file: %v", err)
|
||||
}
|
||||
if finfo.Size() != finfo1.Size() {
|
||||
t.Fatalf("Expected size: %v, actual: %v", finfo.Size(), finfo1.Size())
|
||||
}
|
||||
}
|
||||
|
|
|
@ -14,7 +14,6 @@ import (
|
|||
"github.com/hashicorp/nomad/client/allocdir"
|
||||
"github.com/hashicorp/nomad/client/driver/env"
|
||||
cstructs "github.com/hashicorp/nomad/client/structs"
|
||||
"github.com/hashicorp/nomad/client/testutil"
|
||||
"github.com/hashicorp/nomad/nomad/mock"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
tu "github.com/hashicorp/nomad/testutil"
|
||||
|
@ -187,57 +186,6 @@ func TestExecutor_WaitExitSignal(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestExecutor_ClientCleanup(t *testing.T) {
|
||||
testutil.ExecCompatible(t)
|
||||
|
||||
ctx, allocDir := testExecutorContextWithChroot(t)
|
||||
ctx.Task.LogConfig.MaxFiles = 1
|
||||
ctx.Task.LogConfig.MaxFileSizeMB = 300
|
||||
defer allocDir.Destroy()
|
||||
|
||||
executor := NewExecutor(log.New(os.Stdout, "", log.LstdFlags))
|
||||
|
||||
if err := executor.SetContext(ctx); err != nil {
|
||||
t.Fatalf("Unexpected error")
|
||||
}
|
||||
|
||||
// Need to run a command which will produce continuous output but not
|
||||
// too quickly to ensure executor.Exit() stops the process.
|
||||
execCmd := ExecCommand{Cmd: "/bin/bash", Args: []string{"-c", "while true; do /bin/echo X; /bin/sleep 1; done"}}
|
||||
execCmd.FSIsolation = true
|
||||
execCmd.ResourceLimits = true
|
||||
execCmd.User = "nobody"
|
||||
|
||||
ps, err := executor.LaunchCmd(&execCmd)
|
||||
if err != nil {
|
||||
t.Fatalf("error in launching command: %v", err)
|
||||
}
|
||||
if ps.Pid == 0 {
|
||||
t.Fatalf("expected process to start and have non zero pid")
|
||||
}
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
if err := executor.Exit(); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
file := filepath.Join(ctx.LogDir, "web.stdout.0")
|
||||
finfo, err := os.Stat(file)
|
||||
if err != nil {
|
||||
t.Fatalf("error stating stdout file: %v", err)
|
||||
}
|
||||
if finfo.Size() == 0 {
|
||||
t.Fatal("Nothing in stdout; expected at least one byte.")
|
||||
}
|
||||
time.Sleep(2 * time.Second)
|
||||
finfo1, err := os.Stat(file)
|
||||
if err != nil {
|
||||
t.Fatalf("error stating stdout file: %v", err)
|
||||
}
|
||||
if finfo.Size() != finfo1.Size() {
|
||||
t.Fatalf("Expected size: %v, actual: %v", finfo.Size(), finfo1.Size())
|
||||
}
|
||||
}
|
||||
|
||||
func TestExecutor_Start_Kill(t *testing.T) {
|
||||
execCmd := ExecCommand{Cmd: "/bin/sleep", Args: []string{"10 && hello world"}}
|
||||
ctx, allocDir := testExecutorContext(t)
|
||||
|
|
|
@ -753,9 +753,9 @@ func (r *TaskRunner) prestart(resultCh chan bool) {
|
|||
|
||||
// If the job is a dispatch job and there is a payload write it to disk
|
||||
requirePayload := len(r.alloc.Job.Payload) != 0 &&
|
||||
(r.task.DispatchInput != nil && r.task.DispatchInput.File != "")
|
||||
(r.task.DispatchPayload != nil && r.task.DispatchPayload.File != "")
|
||||
if !r.payloadRendered && requirePayload {
|
||||
renderTo := filepath.Join(r.taskDir.LocalDir, r.task.DispatchInput.File)
|
||||
renderTo := filepath.Join(r.taskDir.LocalDir, r.task.DispatchPayload.File)
|
||||
decoded, err := snappy.Decode(nil, r.alloc.Job.Payload)
|
||||
if err != nil {
|
||||
r.setState(
|
||||
|
|
|
@ -536,7 +536,7 @@ func TestTaskRunner_RestartTask(t *testing.T) {
|
|||
task.Driver = "mock_driver"
|
||||
task.Config = map[string]interface{}{
|
||||
"exit_code": "0",
|
||||
"run_for": "10s",
|
||||
"run_for": "100s",
|
||||
}
|
||||
|
||||
ctx := testTaskRunnerFromAlloc(t, true, alloc)
|
||||
|
@ -1232,7 +1232,7 @@ func TestTaskRunner_SimpleRun_Dispatch(t *testing.T) {
|
|||
"run_for": "1s",
|
||||
}
|
||||
fileName := "test"
|
||||
task.DispatchInput = &structs.DispatchInputConfig{
|
||||
task.DispatchPayload = &structs.DispatchPayloadConfig{
|
||||
File: fileName,
|
||||
}
|
||||
alloc.Job.ParameterizedJob = &structs.ParameterizedJobConfig{}
|
||||
|
|
|
@ -32,6 +32,13 @@ General Options:
|
|||
|
||||
Dispatch Options:
|
||||
|
||||
-meta <key>=<value>
|
||||
Meta takes a key/value pair seperated by "=". The metadata key will be
|
||||
merged into the job's metadata. The job may define a default value for the
|
||||
key which is overriden when dispatching. The flag can be provided more than
|
||||
once to inject multiple metadata key/value pairs. Arbitrary keys are not
|
||||
allowed. The parameterized job must allow the key to be merged.
|
||||
|
||||
-detach
|
||||
Return immediately instead of entering monitor mode. After job dispatch,
|
||||
the evaluation ID will be printed to the screen, which can be used to
|
||||
|
@ -44,7 +51,7 @@ Dispatch Options:
|
|||
}
|
||||
|
||||
func (c *JobDispatchCommand) Synopsis() string {
|
||||
return "Dispatch an instance of a parametereized job"
|
||||
return "Dispatch an instance of a parameterized job"
|
||||
}
|
||||
|
||||
func (c *JobDispatchCommand) Run(args []string) int {
|
||||
|
|
|
@ -564,7 +564,7 @@ func parseTasks(jobName string, taskGroupName string, result *[]*structs.Task, l
|
|||
"artifact",
|
||||
"config",
|
||||
"constraint",
|
||||
"dispatch_input",
|
||||
"dispatch_payload",
|
||||
"driver",
|
||||
"env",
|
||||
"kill_timeout",
|
||||
|
@ -587,7 +587,7 @@ func parseTasks(jobName string, taskGroupName string, result *[]*structs.Task, l
|
|||
delete(m, "artifact")
|
||||
delete(m, "config")
|
||||
delete(m, "constraint")
|
||||
delete(m, "dispatch_input")
|
||||
delete(m, "dispatch_payload")
|
||||
delete(m, "env")
|
||||
delete(m, "logs")
|
||||
delete(m, "meta")
|
||||
|
@ -747,10 +747,10 @@ func parseTasks(jobName string, taskGroupName string, result *[]*structs.Task, l
|
|||
t.Vault = v
|
||||
}
|
||||
|
||||
// If we have a dispatch_input block parse that
|
||||
if o := listVal.Filter("dispatch_input"); len(o.Items) > 0 {
|
||||
// If we have a dispatch_payload block parse that
|
||||
if o := listVal.Filter("dispatch_payload"); len(o.Items) > 0 {
|
||||
if len(o.Items) > 1 {
|
||||
return fmt.Errorf("only one dispatch_input block is allowed in a task. Number of dispatch_input blocks found: %d", len(o.Items))
|
||||
return fmt.Errorf("only one dispatch_payload block is allowed in a task. Number of dispatch_payload blocks found: %d", len(o.Items))
|
||||
}
|
||||
var m map[string]interface{}
|
||||
dispatchBlock := o.Items[0]
|
||||
|
@ -760,15 +760,15 @@ func parseTasks(jobName string, taskGroupName string, result *[]*structs.Task, l
|
|||
"file",
|
||||
}
|
||||
if err := checkHCLKeys(dispatchBlock.Val, valid); err != nil {
|
||||
return multierror.Prefix(err, fmt.Sprintf("'%s', dispatch_input ->", n))
|
||||
return multierror.Prefix(err, fmt.Sprintf("'%s', dispatch_payload ->", n))
|
||||
}
|
||||
|
||||
if err := hcl.DecodeObject(&m, dispatchBlock.Val); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
t.DispatchInput = &structs.DispatchInputConfig{}
|
||||
if err := mapstructure.WeakDecode(m, t.DispatchInput); err != nil {
|
||||
t.DispatchPayload = &structs.DispatchPayloadConfig{}
|
||||
if err := mapstructure.WeakDecode(m, t.DispatchPayload); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -1259,12 +1259,11 @@ func parseParameterizedJob(result **structs.ParameterizedJobConfig, list *ast.Ob
|
|||
return err
|
||||
}
|
||||
|
||||
delete(m, "meta")
|
||||
|
||||
// Check for invalid keys
|
||||
valid := []string{
|
||||
"payload",
|
||||
"meta_keys",
|
||||
"meta_required",
|
||||
"meta_optional",
|
||||
}
|
||||
if err := checkHCLKeys(o.Val, valid); err != nil {
|
||||
return err
|
||||
|
@ -1276,37 +1275,6 @@ func parseParameterizedJob(result **structs.ParameterizedJobConfig, list *ast.Ob
|
|||
return err
|
||||
}
|
||||
|
||||
var listVal *ast.ObjectList
|
||||
if ot, ok := o.Val.(*ast.ObjectType); ok {
|
||||
listVal = ot.List
|
||||
} else {
|
||||
return fmt.Errorf("parameterized block should be an object")
|
||||
}
|
||||
|
||||
// Parse the meta block
|
||||
if metaList := listVal.Filter("meta_keys"); len(metaList.Items) > 0 {
|
||||
// Get our resource object
|
||||
o := metaList.Items[0]
|
||||
|
||||
var m map[string]interface{}
|
||||
if err := hcl.DecodeObject(&m, o.Val); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Check for invalid keys
|
||||
valid := []string{
|
||||
"optional",
|
||||
"required",
|
||||
}
|
||||
if err := checkHCLKeys(o.Val, valid); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := mapstructure.WeakDecode(m, &d); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
*result = &d
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -571,7 +571,7 @@ func TestParse(t *testing.T) {
|
|||
MaxFiles: 10,
|
||||
MaxFileSizeMB: 10,
|
||||
},
|
||||
DispatchInput: &structs.DispatchInputConfig{
|
||||
DispatchPayload: &structs.DispatchPayloadConfig{
|
||||
File: "foo/bar",
|
||||
},
|
||||
},
|
||||
|
|
|
@ -1,17 +1,15 @@
|
|||
job "parameterized_job" {
|
||||
parameterized {
|
||||
payload = "required"
|
||||
meta_keys {
|
||||
required = ["foo", "bar"]
|
||||
optional = ["baz", "bam"]
|
||||
}
|
||||
meta_required = ["foo", "bar"]
|
||||
meta_optional = ["baz", "bam"]
|
||||
}
|
||||
group "foo" {
|
||||
task "bar" {
|
||||
driver = "docker"
|
||||
resources {}
|
||||
|
||||
dispatch_input {
|
||||
dispatch_payload {
|
||||
file = "foo/bar"
|
||||
}
|
||||
}
|
||||
|
|
|
@ -375,8 +375,8 @@ func (t *Task) Diff(other *Task, contextual bool) (*TaskDiff, error) {
|
|||
diff.Objects = append(diff.Objects, lDiff)
|
||||
}
|
||||
|
||||
// Dispatch Input diff
|
||||
dDiff := primitiveObjectDiff(t.DispatchInput, other.DispatchInput, nil, "DispatchInput", contextual)
|
||||
// Dispatch payload diff
|
||||
dDiff := primitiveObjectDiff(t.DispatchPayload, other.DispatchPayload, nil, "DispatchPayload", contextual)
|
||||
if dDiff != nil {
|
||||
diff.Objects = append(diff.Objects, dDiff)
|
||||
}
|
||||
|
@ -667,11 +667,11 @@ func parameterizedJobDiff(old, new *ParameterizedJobConfig, contextual bool) *Ob
|
|||
diff.Fields = fieldDiffs(oldPrimitiveFlat, newPrimitiveFlat, contextual)
|
||||
|
||||
// Meta diffs
|
||||
if optionalDiff := stringSetDiff(old.MetaOptional, new.MetaOptional, "OptionalMeta", contextual); optionalDiff != nil {
|
||||
if optionalDiff := stringSetDiff(old.MetaOptional, new.MetaOptional, "MetaOptional", contextual); optionalDiff != nil {
|
||||
diff.Objects = append(diff.Objects, optionalDiff)
|
||||
}
|
||||
|
||||
if requiredDiff := stringSetDiff(old.MetaRequired, new.MetaRequired, "RequiredMeta", contextual); requiredDiff != nil {
|
||||
if requiredDiff := stringSetDiff(old.MetaRequired, new.MetaRequired, "MetaRequired", contextual); requiredDiff != nil {
|
||||
diff.Objects = append(diff.Objects, requiredDiff)
|
||||
}
|
||||
|
||||
|
|
|
@ -904,11 +904,11 @@ func TestJobDiff(t *testing.T) {
|
|||
Objects: []*ObjectDiff{
|
||||
{
|
||||
Type: DiffTypeAdded,
|
||||
Name: "OptionalMeta",
|
||||
Name: "MetaOptional",
|
||||
Fields: []*FieldDiff{
|
||||
{
|
||||
Type: DiffTypeAdded,
|
||||
Name: "OptionalMeta",
|
||||
Name: "MetaOptional",
|
||||
Old: "",
|
||||
New: "foo",
|
||||
},
|
||||
|
@ -916,11 +916,11 @@ func TestJobDiff(t *testing.T) {
|
|||
},
|
||||
{
|
||||
Type: DiffTypeAdded,
|
||||
Name: "RequiredMeta",
|
||||
Name: "MetaRequired",
|
||||
Fields: []*FieldDiff{
|
||||
{
|
||||
Type: DiffTypeAdded,
|
||||
Name: "RequiredMeta",
|
||||
Name: "MetaRequired",
|
||||
Old: "",
|
||||
New: "bar",
|
||||
},
|
||||
|
@ -958,11 +958,11 @@ func TestJobDiff(t *testing.T) {
|
|||
Objects: []*ObjectDiff{
|
||||
{
|
||||
Type: DiffTypeDeleted,
|
||||
Name: "OptionalMeta",
|
||||
Name: "MetaOptional",
|
||||
Fields: []*FieldDiff{
|
||||
{
|
||||
Type: DiffTypeDeleted,
|
||||
Name: "OptionalMeta",
|
||||
Name: "MetaOptional",
|
||||
Old: "foo",
|
||||
New: "",
|
||||
},
|
||||
|
@ -970,11 +970,11 @@ func TestJobDiff(t *testing.T) {
|
|||
},
|
||||
{
|
||||
Type: DiffTypeDeleted,
|
||||
Name: "RequiredMeta",
|
||||
Name: "MetaRequired",
|
||||
Fields: []*FieldDiff{
|
||||
{
|
||||
Type: DiffTypeDeleted,
|
||||
Name: "RequiredMeta",
|
||||
Name: "MetaRequired",
|
||||
Old: "bar",
|
||||
New: "",
|
||||
},
|
||||
|
@ -1018,17 +1018,17 @@ func TestJobDiff(t *testing.T) {
|
|||
Objects: []*ObjectDiff{
|
||||
{
|
||||
Type: DiffTypeEdited,
|
||||
Name: "OptionalMeta",
|
||||
Name: "MetaOptional",
|
||||
Fields: []*FieldDiff{
|
||||
{
|
||||
Type: DiffTypeAdded,
|
||||
Name: "OptionalMeta",
|
||||
Name: "MetaOptional",
|
||||
Old: "",
|
||||
New: "bam",
|
||||
},
|
||||
{
|
||||
Type: DiffTypeDeleted,
|
||||
Name: "OptionalMeta",
|
||||
Name: "MetaOptional",
|
||||
Old: "foo",
|
||||
New: "",
|
||||
},
|
||||
|
@ -1036,17 +1036,17 @@ func TestJobDiff(t *testing.T) {
|
|||
},
|
||||
{
|
||||
Type: DiffTypeEdited,
|
||||
Name: "RequiredMeta",
|
||||
Name: "MetaRequired",
|
||||
Fields: []*FieldDiff{
|
||||
{
|
||||
Type: DiffTypeAdded,
|
||||
Name: "RequiredMeta",
|
||||
Name: "MetaRequired",
|
||||
Old: "",
|
||||
New: "bang",
|
||||
},
|
||||
{
|
||||
Type: DiffTypeDeleted,
|
||||
Name: "RequiredMeta",
|
||||
Name: "MetaRequired",
|
||||
Old: "bar",
|
||||
New: "",
|
||||
},
|
||||
|
@ -1091,11 +1091,11 @@ func TestJobDiff(t *testing.T) {
|
|||
Objects: []*ObjectDiff{
|
||||
{
|
||||
Type: DiffTypeNone,
|
||||
Name: "OptionalMeta",
|
||||
Name: "MetaOptional",
|
||||
Fields: []*FieldDiff{
|
||||
{
|
||||
Type: DiffTypeNone,
|
||||
Name: "OptionalMeta",
|
||||
Name: "MetaOptional",
|
||||
Old: "foo",
|
||||
New: "foo",
|
||||
},
|
||||
|
@ -1103,11 +1103,11 @@ func TestJobDiff(t *testing.T) {
|
|||
},
|
||||
{
|
||||
Type: DiffTypeNone,
|
||||
Name: "RequiredMeta",
|
||||
Name: "MetaRequired",
|
||||
Fields: []*FieldDiff{
|
||||
{
|
||||
Type: DiffTypeNone,
|
||||
Name: "RequiredMeta",
|
||||
Name: "MetaRequired",
|
||||
Old: "bar",
|
||||
New: "bar",
|
||||
},
|
||||
|
@ -3666,10 +3666,10 @@ func TestTaskDiff(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
// DispatchInput added
|
||||
// DispatchPayload added
|
||||
Old: &Task{},
|
||||
New: &Task{
|
||||
DispatchInput: &DispatchInputConfig{
|
||||
DispatchPayload: &DispatchPayloadConfig{
|
||||
File: "foo",
|
||||
},
|
||||
},
|
||||
|
@ -3678,7 +3678,7 @@ func TestTaskDiff(t *testing.T) {
|
|||
Objects: []*ObjectDiff{
|
||||
{
|
||||
Type: DiffTypeAdded,
|
||||
Name: "DispatchInput",
|
||||
Name: "DispatchPayload",
|
||||
Fields: []*FieldDiff{
|
||||
{
|
||||
Type: DiffTypeAdded,
|
||||
|
@ -3692,9 +3692,9 @@ func TestTaskDiff(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
// DispatchInput deleted
|
||||
// DispatchPayload deleted
|
||||
Old: &Task{
|
||||
DispatchInput: &DispatchInputConfig{
|
||||
DispatchPayload: &DispatchPayloadConfig{
|
||||
File: "foo",
|
||||
},
|
||||
},
|
||||
|
@ -3704,7 +3704,7 @@ func TestTaskDiff(t *testing.T) {
|
|||
Objects: []*ObjectDiff{
|
||||
{
|
||||
Type: DiffTypeDeleted,
|
||||
Name: "DispatchInput",
|
||||
Name: "DispatchPayload",
|
||||
Fields: []*FieldDiff{
|
||||
{
|
||||
Type: DiffTypeDeleted,
|
||||
|
@ -3718,14 +3718,14 @@ func TestTaskDiff(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
// Dispatch input edited
|
||||
// Dispatch payload edited
|
||||
Old: &Task{
|
||||
DispatchInput: &DispatchInputConfig{
|
||||
DispatchPayload: &DispatchPayloadConfig{
|
||||
File: "foo",
|
||||
},
|
||||
},
|
||||
New: &Task{
|
||||
DispatchInput: &DispatchInputConfig{
|
||||
DispatchPayload: &DispatchPayloadConfig{
|
||||
File: "bar",
|
||||
},
|
||||
},
|
||||
|
@ -3734,7 +3734,7 @@ func TestTaskDiff(t *testing.T) {
|
|||
Objects: []*ObjectDiff{
|
||||
{
|
||||
Type: DiffTypeEdited,
|
||||
Name: "DispatchInput",
|
||||
Name: "DispatchPayload",
|
||||
Fields: []*FieldDiff{
|
||||
{
|
||||
Type: DiffTypeEdited,
|
||||
|
@ -3748,16 +3748,16 @@ func TestTaskDiff(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
// DispatchInput edited with context. Place holder for if more
|
||||
// DispatchPayload edited with context. Place holder for if more
|
||||
// fields are added
|
||||
Contextual: true,
|
||||
Old: &Task{
|
||||
DispatchInput: &DispatchInputConfig{
|
||||
DispatchPayload: &DispatchPayloadConfig{
|
||||
File: "foo",
|
||||
},
|
||||
},
|
||||
New: &Task{
|
||||
DispatchInput: &DispatchInputConfig{
|
||||
DispatchPayload: &DispatchPayloadConfig{
|
||||
File: "bar",
|
||||
},
|
||||
},
|
||||
|
@ -3766,7 +3766,7 @@ func TestTaskDiff(t *testing.T) {
|
|||
Objects: []*ObjectDiff{
|
||||
{
|
||||
Type: DiffTypeEdited,
|
||||
Name: "DispatchInput",
|
||||
Name: "DispatchPayload",
|
||||
Fields: []*FieldDiff{
|
||||
{
|
||||
Type: DiffTypeEdited,
|
||||
|
|
|
@ -1647,10 +1647,10 @@ type ParameterizedJobConfig struct {
|
|||
Payload string
|
||||
|
||||
// MetaRequired is metadata keys that must be specified by the dispatcher
|
||||
MetaRequired []string `mapstructure:"required"`
|
||||
MetaRequired []string `mapstructure:"meta_required"`
|
||||
|
||||
// MetaOptional is metadata keys that may be specified by the dispatcher
|
||||
MetaOptional []string `mapstructure:"optional"`
|
||||
MetaOptional []string `mapstructure:"meta_optional"`
|
||||
}
|
||||
|
||||
func (d *ParameterizedJobConfig) Validate() error {
|
||||
|
@ -1694,22 +1694,22 @@ func DispatchedID(templateID string, t time.Time) string {
|
|||
return fmt.Sprintf("%s%s%d-%s", templateID, DispatchLaunchSuffix, t.Unix(), u)
|
||||
}
|
||||
|
||||
// DispatchInputConfig configures how a task gets its input from a job dispatch
|
||||
type DispatchInputConfig struct {
|
||||
// DispatchPayloadConfig configures how a task gets its input from a job dispatch
|
||||
type DispatchPayloadConfig struct {
|
||||
// File specifies a relative path to where the input data should be written
|
||||
File string
|
||||
}
|
||||
|
||||
func (d *DispatchInputConfig) Copy() *DispatchInputConfig {
|
||||
func (d *DispatchPayloadConfig) Copy() *DispatchPayloadConfig {
|
||||
if d == nil {
|
||||
return nil
|
||||
}
|
||||
nd := new(DispatchInputConfig)
|
||||
nd := new(DispatchPayloadConfig)
|
||||
*nd = *d
|
||||
return nd
|
||||
}
|
||||
|
||||
func (d *DispatchInputConfig) Validate() error {
|
||||
func (d *DispatchPayloadConfig) Validate() error {
|
||||
// Verify the destination doesn't escape
|
||||
escaped, err := PathEscapesAllocDir("task/local/", d.File)
|
||||
if err != nil {
|
||||
|
@ -2272,8 +2272,8 @@ type Task struct {
|
|||
// Resources is the resources needed by this task
|
||||
Resources *Resources
|
||||
|
||||
// DispatchInput configures how the task retrieves its input from a dispatch
|
||||
DispatchInput *DispatchInputConfig
|
||||
// DispatchPayload configures how the task retrieves its input from a dispatch
|
||||
DispatchPayload *DispatchPayloadConfig
|
||||
|
||||
// Meta is used to associate arbitrary metadata with this
|
||||
// task. This is opaque to Nomad.
|
||||
|
@ -2312,7 +2312,7 @@ func (t *Task) Copy() *Task {
|
|||
nt.Vault = nt.Vault.Copy()
|
||||
nt.Resources = nt.Resources.Copy()
|
||||
nt.Meta = helper.CopyMapStringString(nt.Meta)
|
||||
nt.DispatchInput = nt.DispatchInput.Copy()
|
||||
nt.DispatchPayload = nt.DispatchPayload.Copy()
|
||||
|
||||
if t.Artifacts != nil {
|
||||
artifacts := make([]*TaskArtifact, 0, len(t.Artifacts))
|
||||
|
@ -2477,10 +2477,10 @@ func (t *Task) Validate(ephemeralDisk *EphemeralDisk) error {
|
|||
}
|
||||
}
|
||||
|
||||
// Validate the dispatch input block if there
|
||||
if t.DispatchInput != nil {
|
||||
if err := t.DispatchInput.Validate(); err != nil {
|
||||
mErr.Errors = append(mErr.Errors, fmt.Errorf("Dispatch Input validation failed: %v", err))
|
||||
// Validate the dispatch payload block if there
|
||||
if t.DispatchPayload != nil {
|
||||
if err := t.DispatchPayload.Validate(); err != nil {
|
||||
mErr.Errors = append(mErr.Errors, fmt.Errorf("Dispatch Payload validation failed: %v", err))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1518,8 +1518,8 @@ func TestParameterizedJobConfig_Canonicalize(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestDispatchInputConfig_Validate(t *testing.T) {
|
||||
d := &DispatchInputConfig{
|
||||
func TestDispatchPayloadConfig_Validate(t *testing.T) {
|
||||
d := &DispatchPayloadConfig{
|
||||
File: "foo",
|
||||
}
|
||||
|
||||
|
|
|
@ -591,9 +591,6 @@ func TestVaultClient_LookupToken_RateLimit(t *testing.T) {
|
|||
unblock := make(chan struct{})
|
||||
for i := 0; i < numRequests; i++ {
|
||||
go func() {
|
||||
// Ensure all the goroutines are made
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
// Lookup ourselves
|
||||
_, err := client.LookupToken(ctx, v.Config.Token)
|
||||
if err != nil {
|
||||
|
@ -607,7 +604,7 @@ func TestVaultClient_LookupToken_RateLimit(t *testing.T) {
|
|||
|
||||
// Cancel the context
|
||||
cancel()
|
||||
time.AfterFunc(1*time.Second, func() { close(unblock) })
|
||||
close(unblock)
|
||||
}()
|
||||
}
|
||||
|
||||
|
@ -618,9 +615,15 @@ func TestVaultClient_LookupToken_RateLimit(t *testing.T) {
|
|||
}
|
||||
|
||||
desired := numRequests - 1
|
||||
if cancels != desired {
|
||||
t.Fatalf("Incorrect number of cancels; got %d; want %d", cancels, desired)
|
||||
}
|
||||
testutil.WaitForResult(func() (bool, error) {
|
||||
if cancels != desired {
|
||||
return false, fmt.Errorf("Incorrect number of cancels; got %d; want %d", cancels, desired)
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}, func(err error) {
|
||||
t.Fatalf("Connection not established")
|
||||
})
|
||||
}
|
||||
|
||||
func TestVaultClient_CreateToken_Root(t *testing.T) {
|
||||
|
|
114
website/source/docs/commands/job-dispatch.html.md.erb
Normal file
114
website/source/docs/commands/job-dispatch.html.md.erb
Normal file
|
@ -0,0 +1,114 @@
|
|||
---
|
||||
layout: "docs"
|
||||
page_title: "Commands: job dispatch"
|
||||
sidebar_current: "docs-commands-job-dispatch"
|
||||
description: >
|
||||
The dispatch command is used to create an instance of a parameterized job.
|
||||
---
|
||||
|
||||
# Command: job dispatch
|
||||
|
||||
~> The `job dispatch` subcommand described here is available only in version
|
||||
0.5.3 and later. The release canidate is downloadable on the [releases
|
||||
page.](https://releases.hashicorp.com/nomad/0.5.3-rc1/)
|
||||
|
||||
The `job dispatch` command is used to create new instances of a [parameterized
|
||||
job]. The parameterized job captures a job's configuration and runtime
|
||||
requirements in a generic way and `dispatch` is used to provide the input for
|
||||
the job to run against. A parameterized job is similar to a function definition,
|
||||
and dispatch is used to invoke the function.
|
||||
|
||||
Each time a job is dispatched, a unique job ID is generated. This allows a
|
||||
caller to track the status of the job, much like a future or promise in some
|
||||
programming languages.
|
||||
|
||||
## Usage
|
||||
|
||||
```
|
||||
nomad job dispatch [options] <parameterized job> [input source]
|
||||
```
|
||||
|
||||
Dispatch creates an instance of a parameterized job. A data payload to the
|
||||
dispatched instance can be provided via stdin by using "-" for the input source
|
||||
or by specifiying a path to a file. Metadata can be supplied by using the meta
|
||||
flag one or more times.
|
||||
|
||||
The payload has a **size limit of 16KiB**.
|
||||
|
||||
Upon successfully creation, the dispatched job ID will be printed and the
|
||||
triggered evaluation will be monitored. This can be disabled by supplying the
|
||||
detach flag.
|
||||
|
||||
On successful job submission and scheduling, exit code 0 will be returned. If
|
||||
there are job placement issues encountered (unsatisfiable constraints, resource
|
||||
exhaustion, etc), then the exit code will be 2. Any other errors, including
|
||||
client connection issues or internal errors, are indicated by exit code 1.
|
||||
|
||||
## General Options
|
||||
|
||||
<%= partial "docs/commands/_general_options" %>
|
||||
|
||||
## Run Options
|
||||
|
||||
* `-meta`: Meta takes a key/value pair seperated by "=". The metadata key will
|
||||
be merged into the job's metadata. The job may define a default value for the
|
||||
key which is overriden when dispatching. The flag can be provided more than
|
||||
once to inject multiple metadata key/value pairs. Arbitrary keys are not
|
||||
allowed. The parameterized job must allow the key to be merged.
|
||||
|
||||
* `-detach`: Return immediately instead of monitoring. A new evaluation ID
|
||||
will be output, which can be used to examine the evaluation using the
|
||||
[eval-status](/docs/commands/eval-status.html) command
|
||||
|
||||
* `-verbose`: Show full information.
|
||||
|
||||
## Examples
|
||||
|
||||
Dispatch against a parameterized job with the ID "video-encode" and
|
||||
passing in a configuration payload via stdin:
|
||||
|
||||
```
|
||||
$ cat << EOF | nomad job dispatch video-encode -
|
||||
{
|
||||
"s3-input": "https://s3-us-west-1.amazonaws.com/video-bucket/cb31dabb1",
|
||||
"s3-output": "https://s3-us-west-1.amazonaws.com/video-bucket/a149adbe3",
|
||||
"input-codec": "mp4",
|
||||
"output-codec": "webm",
|
||||
"quality": "1080p"
|
||||
}
|
||||
EOF
|
||||
Dispatched Job ID = video-encode/dispatch-1485379325-cb38d00d
|
||||
Evaluation ID = 31199841
|
||||
|
||||
==> Monitoring evaluation "31199841"
|
||||
Evaluation triggered by job "example/dispatch-1485379325-cb38d00d"
|
||||
Allocation "8254b85f" created: node "82ff9c50", group "cache"
|
||||
Evaluation status changed: "pending" -> "complete"
|
||||
==> Evaluation "31199841" finished with status "complete"
|
||||
```
|
||||
|
||||
Dispatch against a parameterized job with the ID "video-encode" and
|
||||
passing in a configuration payload via a file:
|
||||
|
||||
```
|
||||
$ nomad job dispatch video-encode video-config.json
|
||||
Dispatched Job ID = video-encode/dispatch-1485379325-cb38d00d
|
||||
Evaluation ID = 31199841
|
||||
|
||||
==> Monitoring evaluation "31199841"
|
||||
Evaluation triggered by job "example/dispatch-1485379325-cb38d00d"
|
||||
Allocation "8254b85f" created: node "82ff9c50", group "cache"
|
||||
Evaluation status changed: "pending" -> "complete"
|
||||
==> Evaluation "31199841" finished with status "complete"
|
||||
```
|
||||
|
||||
Dispatch against a parameterized job with the ID "video-encode" using the detach
|
||||
flag:
|
||||
|
||||
```
|
||||
$ nomad job dispatch -detach video-encode video-config.json
|
||||
Dispatched Job ID = example/dispatch-1485380684-c37b3dba
|
||||
Evaluation ID = d9034c4e
|
||||
```
|
||||
|
||||
[parameterized job]: /docs/job-specification/parameterized.html "Nomad parameterized Job Specification"
|
|
@ -54,26 +54,28 @@ Short view of a specific job:
|
|||
|
||||
```
|
||||
$ nomad status -short job1
|
||||
ID = job1
|
||||
Name = Test Job
|
||||
Type = service
|
||||
Priority = 3
|
||||
Datacenters = dc1,dc2,dc3
|
||||
Status = pending
|
||||
Periodic = false
|
||||
ID = job1
|
||||
Name = Test Job
|
||||
Type = service
|
||||
Priority = 3
|
||||
Datacenters = dc1,dc2,dc3
|
||||
Status = pending
|
||||
Periodic = false
|
||||
Parameterized = false
|
||||
```
|
||||
|
||||
Full status information of a job:
|
||||
|
||||
```
|
||||
$ nomad status example
|
||||
ID = example
|
||||
Name = example
|
||||
Type = service
|
||||
Priority = 50
|
||||
Datacenters = dc1
|
||||
Status = running
|
||||
Periodic = false
|
||||
ID = example
|
||||
Name = example
|
||||
Type = service
|
||||
Priority = 50
|
||||
Datacenters = dc1
|
||||
Status = running
|
||||
Periodic = false
|
||||
Parameterized = false
|
||||
|
||||
Summary
|
||||
Task Group Queued Starting Running Failed Complete Lost
|
||||
|
@ -84,17 +86,71 @@ ID Eval ID Node ID Task Group Desired Status Created At
|
|||
24cfd201 81efc2fa 8d0331e9 cache run running 08/08/16 21:03:19 CDT
|
||||
```
|
||||
|
||||
Full status information of a perodic job:
|
||||
|
||||
```
|
||||
ID = example
|
||||
Name = example
|
||||
Type = batch
|
||||
Priority = 50
|
||||
Datacenters = dc1
|
||||
Status = running
|
||||
Periodic = true
|
||||
Parameterized = false
|
||||
Next Periodic Launch = 01/26/17 06:19:46 UTC (1s from now)
|
||||
|
||||
Children Job Summary
|
||||
Pending Running Dead
|
||||
0 5 0
|
||||
|
||||
Previously Launched Jobs
|
||||
ID Status
|
||||
example/periodic-1485411581 running
|
||||
example/periodic-1485411582 running
|
||||
example/periodic-1485411583 running
|
||||
example/periodic-1485411584 running
|
||||
example/periodic-1485411585 running
|
||||
```
|
||||
|
||||
Full status information of a parameterized job:
|
||||
|
||||
```
|
||||
ID = example
|
||||
Name = example
|
||||
Type = batch
|
||||
Priority = 50
|
||||
Datacenters = dc1
|
||||
Status = running
|
||||
Periodic = false
|
||||
Parameterized = true
|
||||
|
||||
Parameterized Job
|
||||
Payload = required
|
||||
Required Metadata = foo
|
||||
Optional Metadata = bar
|
||||
|
||||
Parameterized Job Summary
|
||||
Pending Running Dead
|
||||
0 2 0
|
||||
|
||||
Dispatched Jobs
|
||||
ID Status
|
||||
example/dispatch-1485411496-58f24d2d running
|
||||
example/dispatch-1485411499-fa2ee40e running
|
||||
```
|
||||
|
||||
Full status information of a job with placement failures:
|
||||
|
||||
```
|
||||
$ nomad status example
|
||||
ID = example
|
||||
Name = example
|
||||
Type = service
|
||||
Priority = 50
|
||||
Datacenters = dc1
|
||||
Status = running
|
||||
Periodic = false
|
||||
ID = example
|
||||
Name = example
|
||||
Type = service
|
||||
Priority = 50
|
||||
Datacenters = dc1
|
||||
Status = running
|
||||
Periodic = false
|
||||
Parameterized = false
|
||||
|
||||
Summary
|
||||
Task Group Queued Starting Running Failed Complete Lost
|
||||
|
@ -120,13 +176,14 @@ become availables so that it can place the remaining allocations.
|
|||
|
||||
```
|
||||
$ nomad status -evals example
|
||||
ID = example
|
||||
Name = example
|
||||
Type = service
|
||||
Priority = 50
|
||||
Datacenters = dc1
|
||||
Status = running
|
||||
Periodic = false
|
||||
ID = example
|
||||
Name = example
|
||||
Type = service
|
||||
Priority = 50
|
||||
Datacenters = dc1
|
||||
Status = running
|
||||
Periodic = false
|
||||
Parameterized = false
|
||||
|
||||
Summary
|
||||
Task Group Queued Starting Running Failed Complete Lost
|
||||
|
|
|
@ -137,7 +137,6 @@ region is used; another region can be specified using the `?region=` query param
|
|||
"ModifyIndex": 14
|
||||
}
|
||||
```
|
||||
|
||||
</dd>
|
||||
</dl>
|
||||
|
||||
|
@ -275,6 +274,11 @@ region is used; another region can be specified using the `?region=` query param
|
|||
```javascript
|
||||
{
|
||||
"JobID": "example",
|
||||
"Children": {
|
||||
"Dead": 0,
|
||||
"Running": 7,
|
||||
"Pending": 2
|
||||
},
|
||||
"Summary": {
|
||||
"cache": {
|
||||
"Queued": 0,
|
||||
|
@ -333,6 +337,52 @@ region is used; another region can be specified using the `?region=` query param
|
|||
</dd>
|
||||
</dl>
|
||||
|
||||
<dl>
|
||||
<dt>Description</dt>
|
||||
<dd>
|
||||
Dispatch a new instance of a parameterized job.
|
||||
</dd>
|
||||
|
||||
<dt>Method</dt>
|
||||
<dd>PUT or POST</dd>
|
||||
|
||||
<dt>URL</dt>
|
||||
<dd>`/v1/job/<ID>/dispatch`</dd>
|
||||
|
||||
<dt>Parameters</dt>
|
||||
<dd>
|
||||
<ul>
|
||||
<li>
|
||||
<span class="param">Payload</span>
|
||||
<span class="param-flags">optional</span>
|
||||
A `[]byte` array encoded as a base64 string with a maximum size of 16KiB.
|
||||
</li>
|
||||
<li>
|
||||
<span class="param">Meta</span>
|
||||
<span class="param-flags">optional</span>
|
||||
A `map[string]string` of metadata keys to their values.
|
||||
</li>
|
||||
</ul>
|
||||
</dd>
|
||||
|
||||
<dt>Returns</dt>
|
||||
<dd>
|
||||
|
||||
```javascript
|
||||
{
|
||||
"KnownLeader": false,
|
||||
"LastContact": 0,
|
||||
"Index": 13,
|
||||
"JobCreateIndex": 12,
|
||||
"EvalCreateIndex": 13,
|
||||
"EvalID": "e5f55fac-bc69-119d-528a-1fc7ade5e02c",
|
||||
"DispatchedJobID": "example/dispatch-1485408778-81644024"
|
||||
}
|
||||
```
|
||||
|
||||
</dd>
|
||||
</dl>
|
||||
|
||||
<dl>
|
||||
<dt>Description</dt>
|
||||
<dd>
|
||||
|
|
|
@ -137,7 +137,10 @@ Below is an example of a JSON object that submits a `periodic` job to Nomad:
|
|||
},
|
||||
"RelativeDest":"local/"
|
||||
}
|
||||
]
|
||||
],
|
||||
"DispatchPayload": {
|
||||
"File": "config.json"
|
||||
}
|
||||
}
|
||||
],
|
||||
"RestartPolicy":{
|
||||
|
@ -165,7 +168,17 @@ Below is an example of a JSON object that submits a `periodic` job to Nomad:
|
|||
"Meta":{
|
||||
"foo":"bar",
|
||||
"baz":"pipe"
|
||||
}
|
||||
},
|
||||
"ParameterizedJob": {
|
||||
"Payload": "required",
|
||||
"MetaRequired": [
|
||||
"foo"
|
||||
],
|
||||
"MetaOptional": [
|
||||
"bar"
|
||||
]
|
||||
},
|
||||
"Payload": null
|
||||
}
|
||||
}
|
||||
```
|
||||
|
@ -194,6 +207,25 @@ The `Job` object supports the following keys:
|
|||
|
||||
* `Meta` - Annotates the job with opaque metadata.
|
||||
|
||||
* `ParameterizedJob` - Specifies the job as a paramterized job such that it can
|
||||
be dispatched against. The `ParamaterizedJob` object supports the following
|
||||
attributes:
|
||||
|
||||
* `MetaOptional` - Specifies the set of metadata keys that may be provided
|
||||
when dispatching against the job as a string array.
|
||||
|
||||
* `MetaRequired` - Specifies the set of metadata keys that must be provided
|
||||
when dispatching against the job as a string array.
|
||||
|
||||
* `Payload` - Specifies the requirement of providing a payload when
|
||||
dispatching against the parameterized job. The options for this field are
|
||||
"optional", "required" and "forbidden". The default value is "optional".
|
||||
|
||||
* `Payload` - The payload may not be set when submitting a job but may appear in
|
||||
a dispatched job. The `Payload` will be a base64 encoded string containing the
|
||||
payload that the job was dispatched with. The `payload` has a **maximum size
|
||||
of 16 KiB**.
|
||||
|
||||
* `Priority` - Specifies the job priority which is used to prioritize
|
||||
scheduling and access to resources. Must be between 1 and 100 inclusively,
|
||||
and defaults to 50.
|
||||
|
@ -295,6 +327,12 @@ The `Task` object supports the following keys:
|
|||
* `Constraints` - This is a list of `Constraint` objects. See the constraint
|
||||
reference for more details.
|
||||
|
||||
- `DispatchPayload` - Configures the task to have access to dispatch payloads.
|
||||
The `DispatchPayload` object supports the following attributes:
|
||||
|
||||
* `File` - Specifies the file name to write the content of dispatch payload
|
||||
to. The file is written relative to the task's local directory.
|
||||
|
||||
* `Driver` - Specifies the task driver that should be used to run the
|
||||
task. See the [driver documentation](/docs/drivers/index.html) for what
|
||||
is available. Examples include `docker`, `qemu`, `java`, and `exec`.
|
||||
|
|
|
@ -51,15 +51,15 @@ before the starting the task.
|
|||
default value is to place the binary in `local/`. The destination is treated
|
||||
as a directory and source files will be downloaded into that directory path.
|
||||
|
||||
- `source` `(string: <required>)` - Specifies the URL of the artifact to download.
|
||||
Only `http`, `https`, and `s3` URLs are supported. See [`go-getter`][go-getter]
|
||||
for details.
|
||||
|
||||
- `options` `(map<string|string>: nil)` - Specifies configuration parameters to
|
||||
fetch the artifact. The key-value pairs map directly to parameters appended to
|
||||
the supplied `source` URL. Please see the [`go-getter`
|
||||
documentation][go-getter] for a complete list of options and examples
|
||||
|
||||
- `source` `(string: <required>)` - Specifies the URL of the artifact to download.
|
||||
Only `http`, `https`, and `s3` URLs are supported. See [`go-getter`][go-getter]
|
||||
for details.
|
||||
|
||||
## `artifact` Examples
|
||||
|
||||
The following examples only show the `artifact` stanzas. Remember that the
|
||||
|
|
|
@ -0,0 +1,62 @@
|
|||
---
|
||||
layout: "docs"
|
||||
page_title: "dispatch_payload Stanza - Job Specification"
|
||||
sidebar_current: "docs-job-specification-dispatch-payload"
|
||||
description: |-
|
||||
The "dispatch_payload" stanza allows a task to access dispatch payloads.
|
||||
to
|
||||
---
|
||||
|
||||
# `dispatch_payload` Stanza
|
||||
|
||||
<table class="table table-bordered table-striped">
|
||||
<tr>
|
||||
<th width="120">Placement</th>
|
||||
<td>
|
||||
<code>job -> group -> task -> **dispatch_payload**</code>
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
The `dispatch_payload` stanza is used in conjuction with a [`paramterized`][parameterized] job
|
||||
that expects a payload. When the job is dispatched with a payload, the payload
|
||||
will be made available to any task that has a `dispatch_payload` stanza. The
|
||||
payload will be written to the configured file before the task is started. This
|
||||
allows the task to use the payload as input or configuration.
|
||||
|
||||
```hcl
|
||||
job "docs" {
|
||||
group "example" {
|
||||
task "server" {
|
||||
dispatch_payload {
|
||||
file = "config.json"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## `dispatch_payload` Parameters
|
||||
|
||||
- `file` `(string: "")` - Specifies the file name to write the content of
|
||||
dispatch payload to. The file is written relative to the [task's local
|
||||
directory][localdir].
|
||||
|
||||
## `dispatch_payload` Examples
|
||||
|
||||
The following examples only show the `dispatch_payload` stanzas. Remember that the
|
||||
`dispatch_payload` stanza is only valid in the placements listed above.
|
||||
|
||||
### Write Payload to a File
|
||||
|
||||
This example shows a `dispatch_payload` block in a parameterized job that writes
|
||||
the payload to a `config.json` file.
|
||||
|
||||
```hcl
|
||||
dispatch_payload {
|
||||
file = "config.json"
|
||||
}
|
||||
```
|
||||
|
||||
[localdir]: /docs/runtime/environment.html#local_ "Task Local Directory"
|
||||
[parameterized]: /docs/job-specification/parameterized.html "Nomad parameterized Job Specification"
|
|
@ -42,6 +42,10 @@ job "docs" {
|
|||
"my-key" = "my-value"
|
||||
}
|
||||
|
||||
parameterized {
|
||||
# ...
|
||||
}
|
||||
|
||||
periodic {
|
||||
# ...
|
||||
}
|
||||
|
@ -74,13 +78,16 @@ job "docs" {
|
|||
- `datacenters` `(array<string>: <required>)` - A list of datacenters in the region which are eligible
|
||||
for task placement. This must be provided, and does not have a default.
|
||||
|
||||
- `group` <code>([Group][group]: <required>)</code> - Specifies the start of a
|
||||
- `group` <code>([Group][group]: \<required\>)</code> - Specifies the start of a
|
||||
group of tasks. This can be provided multiple times to define additional
|
||||
groups. Group names must be unique within the job file.
|
||||
|
||||
- `meta` <code>([Meta][]: nil)</code> - Specifies a key-value map that annotates
|
||||
with user-defined metadata.
|
||||
|
||||
- `parameterized` <code>([Parameterized][parameterized]: nil)</code> - Specifies
|
||||
the job as a paramterized job such that it can be dispatched against.
|
||||
|
||||
- `periodic` <code>([Periodic][]: nil)</code> - Allows the job to be scheduled
|
||||
at fixed times, dates or intervals.
|
||||
|
||||
|
@ -215,6 +222,7 @@ $ VAULT_TOKEN="..." nomad run example.nomad
|
|||
[constraint]: /docs/job-specification/constraint.html "Nomad constraint Job Specification"
|
||||
[group]: /docs/job-specification/group.html "Nomad group Job Specification"
|
||||
[meta]: /docs/job-specification/meta.html "Nomad meta Job Specification"
|
||||
[parameterized]: /docs/job-specification/parameterized.html "Nomad parameterized Job Specification"
|
||||
[periodic]: /docs/job-specification/periodic.html "Nomad periodic Job Specification"
|
||||
[task]: /docs/job-specification/task.html "Nomad task Job Specification"
|
||||
[update]: /docs/job-specification/update.html "Nomad update Job Specification"
|
||||
|
|
161
website/source/docs/job-specification/parameterized.html.md
Normal file
161
website/source/docs/job-specification/parameterized.html.md
Normal file
|
@ -0,0 +1,161 @@
|
|||
---
|
||||
layout: "docs"
|
||||
page_title: "parameterized Stanza - Job Specification"
|
||||
sidebar_current: "docs-job-specification-parameterized"
|
||||
description: |-
|
||||
A parameterized job is used to encapsulate a set of work that can be carried
|
||||
out on various inputs much like a function definition. When the
|
||||
`parameterized` stanza is added to a job, the job acts as a function to the
|
||||
cluster as a whole.
|
||||
---
|
||||
|
||||
# `parameterized` Stanza
|
||||
|
||||
<table class="table table-bordered table-striped">
|
||||
<tr>
|
||||
<th width="120">Placement</th>
|
||||
<td>
|
||||
<code>job -> **parameterized**</code>
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
A parameterized job is used to encapsulate a set of work that can be carried out
|
||||
on various inputs much like a function definition. When the `parameterized`
|
||||
stanza is added to a job, the job acts as a function to the cluster as a whole.
|
||||
|
||||
The `parameterized` stanza allows job operators to configure a job that carries
|
||||
out a particular action, define its resource requirements and configure how
|
||||
inputs and configuration are retreived by the tasks within the job.
|
||||
|
||||
To invoke a parameterized job, [`nomad job
|
||||
dispatch`][dispatch command] or the equivalent HTTP APIs are
|
||||
used. When dispatching against a parameterized job, an opaque payload and
|
||||
metadata may be injected into the job. These inputs to the parameterized job act
|
||||
like arguments to a function. The job consumes them to change it's behavior,
|
||||
without exposing the implementation details to the caller.
|
||||
|
||||
To that end, tasks within the job can add a
|
||||
[`dispatch_payload`][dispatch_payload] stanza that
|
||||
defines where on the filesystem this payload gets written to. An example payload
|
||||
would be a task's JSON configuration.
|
||||
|
||||
Further, certain metadata may be marked as required when dispatching a job so it
|
||||
can be used to inject configuration directly into a task's arguments using
|
||||
[interpolation]. An example of this would be to require a run ID key that
|
||||
could be used to lookup the work the job is suppose to do from a management
|
||||
service or database.
|
||||
|
||||
Each time a job is dispatched, a unique job ID is generated. This allows a
|
||||
caller to track the status of the job, much like a future or promise in some
|
||||
programming languages.
|
||||
|
||||
```hcl
|
||||
job "docs" {
|
||||
parameterized {
|
||||
payload = "required"
|
||||
meta_required = ["dispatcher_email"]
|
||||
meta_optional = ["pager_email"]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## `parameterized` Requirements
|
||||
|
||||
- The job's [scheduler type][batch-type] must be `batch`.
|
||||
|
||||
## `parameterized` Parameters
|
||||
|
||||
- `meta_optional` `(array<string>: nil)` - Specifies the set of metadata keys that
|
||||
may be provided when dispatching against the job.
|
||||
|
||||
- `meta_required` `(array<string>: nil)` - Specifies the set of metadata keys that
|
||||
must be provided when dispatching against the job.
|
||||
|
||||
- `payload` `(string: "optional")` - Specifies the requirement of providing a
|
||||
payload when dispatching against the parameterized job. The **maximum size of a
|
||||
`payload` is 16 KiB**. The options for this
|
||||
field are:
|
||||
|
||||
- `"optional"` - A payload is optional when dispatching against the job.
|
||||
|
||||
- `"required"` - A payload must be provided when dispatching against the job.
|
||||
|
||||
- `"forbidden"` - A payload is forbidden when dispatching against the job.
|
||||
|
||||
## `parameterized` Examples
|
||||
|
||||
The following examples show non-runnable example parameterized jobs:
|
||||
|
||||
### Required Inputs
|
||||
|
||||
This example shows a parameterized job that requires both a payload and
|
||||
metadata:
|
||||
|
||||
```hcl
|
||||
job "video-encode" {
|
||||
...
|
||||
type = "batch"
|
||||
|
||||
parameterized {
|
||||
payload = "required"
|
||||
meta_required = ["dispatcher_email"]
|
||||
}
|
||||
|
||||
group "encode" {
|
||||
...
|
||||
|
||||
task "ffmpeg" {
|
||||
driver = "exec"
|
||||
|
||||
config {
|
||||
command = "ffmpeg-wrapper"
|
||||
|
||||
# When dispatched, the payload is written to a file that is then read by
|
||||
# the created task upon startup
|
||||
args = ["-config=${NOMAD_TASK_DIR}/config.json"]
|
||||
}
|
||||
|
||||
dispatch_payload {
|
||||
file = "config.json"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Metadata Interpolation
|
||||
|
||||
```hcl
|
||||
job "email-blast" {
|
||||
...
|
||||
type = "batch"
|
||||
|
||||
parameterized {
|
||||
payload = "forbidden"
|
||||
meta_required = ["CAMPAIGN_ID"]
|
||||
}
|
||||
|
||||
group "emails" {
|
||||
...
|
||||
|
||||
task "emailer" {
|
||||
driver = "exec"
|
||||
|
||||
config {
|
||||
command = "emailer"
|
||||
|
||||
# The campagain ID is interpolated and injected into the task's
|
||||
# arguments
|
||||
args = ["-campaign=${NOMAD_META_CAMPAIGN_ID}"]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
[batch-type]: /docs/job-specification/job.html#type "Batch scheduler type"
|
||||
[dispatch command]: /docs/commands/job-dispatch.html "Nomad Job Dispatch Command"
|
||||
[resources]: /docs/job-specification/resources.html "Nomad resources Job Specification"
|
||||
[interpolation]: /docs/runtime/interpolation.html "Nomad Runtime Interpolation"
|
||||
[dispatch_payload]: /docs/job-specification/dispatch_payload.html "Nomad dispatch_payload Job Specification"
|
|
@ -35,6 +35,10 @@ job "docs" {
|
|||
The periodic expression is always evaluated in the **UTC timezone** to ensure
|
||||
consistent evaluation when Nomad spans multiple time zones.
|
||||
|
||||
## `periodic` Requirements
|
||||
|
||||
- The job's [scheduler type][batch-type] must be `batch`.
|
||||
|
||||
## `periodic` Parameters
|
||||
|
||||
- `cron` `(string: <required>)` - Specifies a cron expression configuring the
|
||||
|
@ -60,4 +64,5 @@ periodic {
|
|||
}
|
||||
```
|
||||
|
||||
[batch-type]: /docs/job-specification/job.html#type "Batch scheduler type"
|
||||
[cron]: https://github.com/gorhill/cronexpr#implementation "List of cron expressions"
|
||||
|
|
|
@ -37,6 +37,9 @@ job "docs" {
|
|||
constraints on the task. This can be provided multiple times to define
|
||||
additional constraints.
|
||||
|
||||
- `dispatch_payload` <code>([DispatchPayload][]: nil)</code> - Configures the
|
||||
task to have access to dispatch payloads.
|
||||
|
||||
- `driver` - Specifies the task driver that should be used to run the
|
||||
task. See the [driver documentation](/docs/drivers/index.html) for what
|
||||
is available. Examples include `docker`, `qemu`, `java`, and `exec`.
|
||||
|
@ -163,6 +166,7 @@ task "server" {
|
|||
[artifact]: /docs/job-specification/artifact.html "Nomad artifact Job Specification"
|
||||
[consul]: https://www.consul.io/ "Consul by HashiCorp"
|
||||
[constraint]: /docs/job-specification/constraint.html "Nomad constraint Job Specification"
|
||||
[dispatchpayload]: /docs/job-specification/dispatch_payload.html "Nomad dispatch_payload Job Specification"
|
||||
[env]: /docs/job-specification/env.html "Nomad env Job Specification"
|
||||
[meta]: /docs/job-specification/meta.html "Nomad meta Job Specification"
|
||||
[resources]: /docs/job-specification/resources.html "Nomad resources Job Specification"
|
||||
|
|
|
@ -47,19 +47,17 @@ README][ct].
|
|||
|
||||
## `template` Parameters
|
||||
|
||||
- `source` `(string: "")` - Specifies the path to the template to be rendered.
|
||||
One of `source` or `data` must be specified, but not both. This source can
|
||||
optionally be fetched using an [`artifact`][artifact] resource. This template
|
||||
must exist on the machine prior to starting the task; it is not possible to
|
||||
reference a template inside of a Docker container, for example.
|
||||
|
||||
- `destination` `(string: <required>)` - Specifies the location where the
|
||||
resulting template should be rendered, relative to the task directory.
|
||||
- `change_signal` `(string: "")` - Specifies the signal to send to the task as a
|
||||
string like `"SIGUSR1"` or `"SIGINT"`. This option is required if the
|
||||
`change_mode` is `signal`.
|
||||
|
||||
- `data` `(string: "")` - Specifies the raw template to execute. One of `source`
|
||||
or `data` must be specified, but not both. This is useful for smaller
|
||||
templates, but we recommend using `source` for larger templates.
|
||||
|
||||
- `destination` `(string: <required>)` - Specifies the location where the
|
||||
resulting template should be rendered, relative to the task directory.
|
||||
|
||||
- `change_mode` `(string: "restart")` - Specifies the behavior Nomad should take
|
||||
if the rendered template changes. The possible values are:
|
||||
|
||||
|
@ -67,9 +65,11 @@ README][ct].
|
|||
- `"restart"` - restart the task
|
||||
- `"signal"` - send a configurable signal to the task
|
||||
|
||||
- `change_signal` `(string: "")` - Specifies the signal to send to the task as a
|
||||
string like `"SIGUSR1"` or `"SIGINT"`. This option is required if the
|
||||
`change_mode` is `signal`.
|
||||
- `source` `(string: "")` - Specifies the path to the template to be rendered.
|
||||
One of `source` or `data` must be specified, but not both. This source can
|
||||
optionally be fetched using an [`artifact`][artifact] resource. This template
|
||||
must exist on the machine prior to starting the task; it is not possible to
|
||||
reference a template inside of a Docker container, for example.
|
||||
|
||||
- `splay` `(string: "5s")` - Specifies a random amount of time to wait between
|
||||
0ms and the given splay value before invoking the change mode. This is
|
||||
|
|
|
@ -48,6 +48,9 @@
|
|||
<li<%= sidebar_current("docs-job-specification-constraint")%>>
|
||||
<a href="/docs/job-specification/constraint.html">constraint</a>
|
||||
</li>
|
||||
<li<%= sidebar_current("docs-job-specification-dispatch-payload")%>>
|
||||
<a href="/docs/job-specification/dispatch_payload.html">dispatch_payload</a>
|
||||
</li>
|
||||
<li<%= sidebar_current("docs-job-specification-env")%>>
|
||||
<a href="/docs/job-specification/env.html">env</a>
|
||||
</li>
|
||||
|
@ -69,6 +72,9 @@
|
|||
<li<%= sidebar_current("docs-job-specification-network")%>>
|
||||
<a href="/docs/job-specification/network.html">network</a>
|
||||
</li>
|
||||
<li<%= sidebar_current("docs-job-specification-parameterized")%>>
|
||||
<a href="/docs/job-specification/parameterized.html">parameterized</a>
|
||||
</li>
|
||||
<li<%= sidebar_current("docs-job-specification-periodic")%>>
|
||||
<a href="/docs/job-specification/periodic.html">periodic</a>
|
||||
</li>
|
||||
|
@ -204,6 +210,9 @@
|
|||
<li<%= sidebar_current("docs-commands-inspect") %>>
|
||||
<a href="/docs/commands/inspect.html">inspect</a>
|
||||
</li>
|
||||
<li<%= sidebar_current("docs-commands-job-dispatch") %>>
|
||||
<a href="/docs/commands/job-dispatch.html">job dispatch</a>
|
||||
</li>
|
||||
<li<%= sidebar_current("docs-commands-keygen") %>>
|
||||
<a href="/docs/commands/keygen.html">keygen</a>
|
||||
</li>
|
||||
|
|
Loading…
Reference in a new issue