open-nomad/jobspec/parse_test.go

1868 lines
44 KiB
Go
Raw Normal View History

2015-09-15 00:43:42 +00:00
package jobspec
import (
"path/filepath"
"strings"
2015-09-15 00:43:42 +00:00
"testing"
"time"
2015-09-15 00:43:42 +00:00
capi "github.com/hashicorp/consul/api"
"github.com/hashicorp/nomad/api"
"github.com/hashicorp/nomad/ci"
"github.com/stretchr/testify/require"
2015-09-15 00:43:42 +00:00
)
// consts copied from nomad/structs package to keep jobspec isolated from rest of nomad
const (
// vaultChangeModeRestart restarts the task when a new token is retrieved.
vaultChangeModeRestart = "restart"
// vaultChangeModeSignal signals the task when a new token is retrieved.
vaultChangeModeSignal = "signal"
// templateChangeModeRestart marks that the task should be restarted if the
templateChangeModeRestart = "restart"
)
2015-09-15 00:43:42 +00:00
func TestParse(t *testing.T) {
ci.Parallel(t)
2015-09-15 00:43:42 +00:00
cases := []struct {
File string
Result *api.Job
2015-09-15 00:43:42 +00:00
Err bool
}{
{
"basic.hcl",
&api.Job{
ID: stringToPtr("binstore-storagelocker"),
Name: stringToPtr("binstore-storagelocker"),
Type: stringToPtr("batch"),
Priority: intToPtr(52),
AllAtOnce: boolToPtr(true),
2015-09-15 00:43:42 +00:00
Datacenters: []string{"us2", "eu1"},
Region: stringToPtr("fooregion"),
Namespace: stringToPtr("foonamespace"),
ConsulToken: stringToPtr("abc"),
VaultToken: stringToPtr("foo"),
2015-09-15 00:43:42 +00:00
2015-09-15 00:46:52 +00:00
Meta: map[string]string{
"foo": "bar",
},
Constraints: []*api.Constraint{
2017-09-26 22:26:33 +00:00
{
2015-09-15 00:48:11 +00:00
LTarget: "kernel.os",
RTarget: "windows",
Operand: "=",
},
{
LTarget: "${attr.vault.version}",
RTarget: ">= 0.6.1",
Operand: "semver",
},
2015-09-15 00:48:11 +00:00
},
2018-07-16 13:30:58 +00:00
Affinities: []*api.Affinity{
{
LTarget: "${meta.team}",
RTarget: "mobile",
Operand: "=",
Weight: int8ToPtr(50),
2018-07-16 13:30:58 +00:00
},
},
2018-07-18 17:28:26 +00:00
Spreads: []*api.Spread{
{
Attribute: "${meta.rack}",
Weight: int8ToPtr(100),
2018-07-18 17:28:26 +00:00
SpreadTarget: []*api.SpreadTarget{
{
Value: "r1",
Percent: 40,
},
{
Value: "r2",
Percent: 60,
},
},
},
},
Update: &api.UpdateStrategy{
Stagger: timeToPtr(60 * time.Second),
MaxParallel: intToPtr(2),
HealthCheck: stringToPtr("manual"),
MinHealthyTime: timeToPtr(10 * time.Second),
HealthyDeadline: timeToPtr(10 * time.Minute),
ProgressDeadline: timeToPtr(10 * time.Minute),
AutoRevert: boolToPtr(true),
AutoPromote: boolToPtr(true),
Canary: intToPtr(1),
},
TaskGroups: []*api.TaskGroup{
2017-09-26 22:26:33 +00:00
{
Name: stringToPtr("outside"),
Tasks: []*api.Task{
2017-09-26 22:26:33 +00:00
{
2015-09-15 00:43:42 +00:00
Name: "outside",
Driver: "java",
2015-11-15 08:10:48 +00:00
Config: map[string]interface{}{
"jar_path": "s3://my-cool-store/foo.jar",
2015-09-15 00:43:42 +00:00
},
Meta: map[string]string{
"my-cool-key": "foobar",
},
},
},
},
2017-09-26 22:26:33 +00:00
{
Name: stringToPtr("binsl"),
Count: intToPtr(5),
Constraints: []*api.Constraint{
2017-09-26 22:26:33 +00:00
{
2015-09-15 00:43:42 +00:00
LTarget: "kernel.os",
RTarget: "linux",
Operand: "=",
},
},
2019-08-12 14:22:27 +00:00
Volumes: map[string]*api.VolumeRequest{
"foo": {
csi: add mount_options to volumes and volume requests (#7398) Add mount_options to both the volume definition on registration and to the volume block in the group where the volume is requested. If both are specified, the options provided in the request replace the options defined in the volume. They get passed to the NodePublishVolume, which causes the node plugin to actually mount the volume on the host. Individual tasks just mount bind into the host mounted volume (unchanged behavior). An operator can mount the same volume with different options by specifying it twice in the group context. closes #7007 * nomad/structs/volumes: add MountOptions to volume request * jobspec/test-fixtures/basic.hcl: add mount_options to volume block * jobspec/parse_test: add expected MountOptions * api/tasks: add mount_options * jobspec/parse_group: use hcl decode not mapstructure, mount_options * client/allocrunner/csi_hook: pass MountOptions through client/allocrunner/csi_hook: add a VolumeMountOptions client/allocrunner/csi_hook: drop Options client/allocrunner/csi_hook: use the structs options * client/pluginmanager/csimanager/interface: UsageOptions.MountOptions * client/pluginmanager/csimanager/volume: pass MountOptions in capabilities * plugins/csi/plugin: remove todo 7007 comment * nomad/structs/csi: MountOptions * api/csi: add options to the api for parsing, match structs * plugins/csi/plugin: move VolumeMountOptions to structs * api/csi: use specific type for mount_options * client/allocrunner/csi_hook: merge MountOptions here * rename CSIOptions to CSIMountOptions * client/allocrunner/csi_hook * client/pluginmanager/csimanager/volume * nomad/structs/csi * plugins/csi/fake/client: add PrevVolumeCapability * plugins/csi/plugin * client/pluginmanager/csimanager/volume_test: remove debugging * client/pluginmanager/csimanager/volume: fix odd merging logic * api: rename CSIOptions -> CSIMountOptions * nomad/csi_endpoint: remove a 7007 comment * command/alloc_status: show mount options in the volume list * nomad/structs/csi: include MountOptions in the volume stub * api/csi: add MountOptions to stub * command/volume_status_csi: clean up csiVolMountOption, add it * command/alloc_status: csiVolMountOption lives in volume_csi_status * command/node_status: display mount flags * nomad/structs/volumes: npe * plugins/csi/plugin: npe in ToCSIRepresentation * jobspec/parse_test: expand volume parse test cases * command/agent/job_endpoint: ApiTgToStructsTG needs MountOptions * command/volume_status_csi: copy paste error * jobspec/test-fixtures/basic: hclfmt * command/volume_status_csi: clean up csiVolMountOption
2020-03-23 17:55:26 +00:00
Name: "foo",
Type: "host",
Source: "/path",
ExtraKeysHCL: nil,
},
"bar": {
2021-07-22 19:15:25 +00:00
Name: "bar",
Type: "csi",
Source: "bar-vol",
ReadOnly: true,
AccessMode: "single-mode-writer",
AttachmentMode: "file-system",
csi: add mount_options to volumes and volume requests (#7398) Add mount_options to both the volume definition on registration and to the volume block in the group where the volume is requested. If both are specified, the options provided in the request replace the options defined in the volume. They get passed to the NodePublishVolume, which causes the node plugin to actually mount the volume on the host. Individual tasks just mount bind into the host mounted volume (unchanged behavior). An operator can mount the same volume with different options by specifying it twice in the group context. closes #7007 * nomad/structs/volumes: add MountOptions to volume request * jobspec/test-fixtures/basic.hcl: add mount_options to volume block * jobspec/parse_test: add expected MountOptions * api/tasks: add mount_options * jobspec/parse_group: use hcl decode not mapstructure, mount_options * client/allocrunner/csi_hook: pass MountOptions through client/allocrunner/csi_hook: add a VolumeMountOptions client/allocrunner/csi_hook: drop Options client/allocrunner/csi_hook: use the structs options * client/pluginmanager/csimanager/interface: UsageOptions.MountOptions * client/pluginmanager/csimanager/volume: pass MountOptions in capabilities * plugins/csi/plugin: remove todo 7007 comment * nomad/structs/csi: MountOptions * api/csi: add options to the api for parsing, match structs * plugins/csi/plugin: move VolumeMountOptions to structs * api/csi: use specific type for mount_options * client/allocrunner/csi_hook: merge MountOptions here * rename CSIOptions to CSIMountOptions * client/allocrunner/csi_hook * client/pluginmanager/csimanager/volume * nomad/structs/csi * plugins/csi/fake/client: add PrevVolumeCapability * plugins/csi/plugin * client/pluginmanager/csimanager/volume_test: remove debugging * client/pluginmanager/csimanager/volume: fix odd merging logic * api: rename CSIOptions -> CSIMountOptions * nomad/csi_endpoint: remove a 7007 comment * command/alloc_status: show mount options in the volume list * nomad/structs/csi: include MountOptions in the volume stub * api/csi: add MountOptions to stub * command/volume_status_csi: clean up csiVolMountOption, add it * command/alloc_status: csiVolMountOption lives in volume_csi_status * command/node_status: display mount flags * nomad/structs/volumes: npe * plugins/csi/plugin: npe in ToCSIRepresentation * jobspec/parse_test: expand volume parse test cases * command/agent/job_endpoint: ApiTgToStructsTG needs MountOptions * command/volume_status_csi: copy paste error * jobspec/test-fixtures/basic: hclfmt * command/volume_status_csi: clean up csiVolMountOption
2020-03-23 17:55:26 +00:00
MountOptions: &api.CSIMountOptions{
FSType: "ext4",
},
ExtraKeysHCL: nil,
},
"baz": {
Name: "baz",
Type: "csi",
Source: "bar-vol",
MountOptions: &api.CSIMountOptions{
MountFlags: []string{
"ro",
},
},
2021-07-22 19:15:25 +00:00
PerAlloc: true,
csi: add mount_options to volumes and volume requests (#7398) Add mount_options to both the volume definition on registration and to the volume block in the group where the volume is requested. If both are specified, the options provided in the request replace the options defined in the volume. They get passed to the NodePublishVolume, which causes the node plugin to actually mount the volume on the host. Individual tasks just mount bind into the host mounted volume (unchanged behavior). An operator can mount the same volume with different options by specifying it twice in the group context. closes #7007 * nomad/structs/volumes: add MountOptions to volume request * jobspec/test-fixtures/basic.hcl: add mount_options to volume block * jobspec/parse_test: add expected MountOptions * api/tasks: add mount_options * jobspec/parse_group: use hcl decode not mapstructure, mount_options * client/allocrunner/csi_hook: pass MountOptions through client/allocrunner/csi_hook: add a VolumeMountOptions client/allocrunner/csi_hook: drop Options client/allocrunner/csi_hook: use the structs options * client/pluginmanager/csimanager/interface: UsageOptions.MountOptions * client/pluginmanager/csimanager/volume: pass MountOptions in capabilities * plugins/csi/plugin: remove todo 7007 comment * nomad/structs/csi: MountOptions * api/csi: add options to the api for parsing, match structs * plugins/csi/plugin: move VolumeMountOptions to structs * api/csi: use specific type for mount_options * client/allocrunner/csi_hook: merge MountOptions here * rename CSIOptions to CSIMountOptions * client/allocrunner/csi_hook * client/pluginmanager/csimanager/volume * nomad/structs/csi * plugins/csi/fake/client: add PrevVolumeCapability * plugins/csi/plugin * client/pluginmanager/csimanager/volume_test: remove debugging * client/pluginmanager/csimanager/volume: fix odd merging logic * api: rename CSIOptions -> CSIMountOptions * nomad/csi_endpoint: remove a 7007 comment * command/alloc_status: show mount options in the volume list * nomad/structs/csi: include MountOptions in the volume stub * api/csi: add MountOptions to stub * command/volume_status_csi: clean up csiVolMountOption, add it * command/alloc_status: csiVolMountOption lives in volume_csi_status * command/node_status: display mount flags * nomad/structs/volumes: npe * plugins/csi/plugin: npe in ToCSIRepresentation * jobspec/parse_test: expand volume parse test cases * command/agent/job_endpoint: ApiTgToStructsTG needs MountOptions * command/volume_status_csi: copy paste error * jobspec/test-fixtures/basic: hclfmt * command/volume_status_csi: clean up csiVolMountOption
2020-03-23 17:55:26 +00:00
ExtraKeysHCL: nil,
},
},
2018-07-16 13:30:58 +00:00
Affinities: []*api.Affinity{
{
LTarget: "${node.datacenter}",
RTarget: "dc2",
Operand: "=",
Weight: int8ToPtr(100),
2018-07-16 13:30:58 +00:00
},
},
2015-09-15 00:43:42 +00:00
Meta: map[string]string{
"elb_mode": "tcp",
"elb_interval": "10",
"elb_checks": "3",
},
RestartPolicy: &api.RestartPolicy{
Interval: timeToPtr(10 * time.Minute),
Attempts: intToPtr(5),
Delay: timeToPtr(15 * time.Second),
Mode: stringToPtr("delay"),
},
2018-07-18 17:28:26 +00:00
Spreads: []*api.Spread{
{
Attribute: "${node.datacenter}",
Weight: int8ToPtr(50),
2018-07-18 17:28:26 +00:00
SpreadTarget: []*api.SpreadTarget{
{
Value: "dc1",
Percent: 50,
},
{
Value: "dc2",
Percent: 25,
},
{
Value: "dc3",
Percent: 25,
},
},
},
},
StopAfterClientDisconnect: timeToPtr(120 * time.Second),
MaxClientDisconnect: timeToPtr(120 * time.Hour),
ReschedulePolicy: &api.ReschedulePolicy{
Interval: timeToPtr(12 * time.Hour),
Attempts: intToPtr(5),
},
EphemeralDisk: &api.EphemeralDisk{
Sticky: boolToPtr(true),
SizeMB: intToPtr(150),
},
Update: &api.UpdateStrategy{
MaxParallel: intToPtr(3),
HealthCheck: stringToPtr("checks"),
MinHealthyTime: timeToPtr(1 * time.Second),
HealthyDeadline: timeToPtr(1 * time.Minute),
ProgressDeadline: timeToPtr(1 * time.Minute),
AutoRevert: boolToPtr(false),
AutoPromote: boolToPtr(false),
Canary: intToPtr(2),
},
Migrate: &api.MigrateStrategy{
MaxParallel: intToPtr(2),
HealthCheck: stringToPtr("task_states"),
MinHealthyTime: timeToPtr(11 * time.Second),
HealthyDeadline: timeToPtr(11 * time.Minute),
},
Tasks: []*api.Task{
2017-09-26 22:26:33 +00:00
{
2015-09-15 00:43:42 +00:00
Name: "binstore",
Driver: "docker",
User: "bob",
Kind: "connect-proxy:test",
2015-11-15 08:10:48 +00:00
Config: map[string]interface{}{
2015-09-15 00:43:42 +00:00
"image": "hashicorp/binstore",
"labels": []map[string]interface{}{
2017-09-26 22:26:33 +00:00
{
"FOO": "bar",
},
},
2015-09-15 00:43:42 +00:00
},
VolumeMounts: []*api.VolumeMount{
{
Volume: stringToPtr("foo"),
Destination: stringToPtr("/mnt/foo"),
},
},
Affinities: []*api.Affinity{
{
LTarget: "${meta.foo}",
RTarget: "a,b,c",
Operand: "set_contains",
Weight: int8ToPtr(25),
},
},
2020-03-07 02:52:58 +00:00
RestartPolicy: &api.RestartPolicy{
Attempts: intToPtr(10),
2020-03-07 02:52:58 +00:00
},
2017-03-01 23:30:01 +00:00
Services: []*api.Service{
2015-11-17 06:51:08 +00:00
{
2018-04-19 22:12:23 +00:00
Tags: []string{"foo", "bar"},
CanaryTags: []string{"canary", "bam"},
2019-11-13 03:27:54 +00:00
Meta: map[string]string{
"abc": "123",
},
CanaryMeta: map[string]string{
"canary": "boom",
},
PortLabel: "http",
Checks: []api.ServiceCheck{
2015-11-17 06:51:08 +00:00
{
Name: "check-name",
Type: "tcp",
PortLabel: "admin",
Interval: 10 * time.Second,
Timeout: 2 * time.Second,
GRPCService: "foo.Bar",
GRPCUseTLS: true,
2017-09-14 22:55:37 +00:00
CheckRestart: &api.CheckRestart{
Limit: 3,
Grace: timeToPtr(10 * time.Second),
2017-09-14 22:55:37 +00:00
IgnoreWarnings: true,
},
2015-11-17 06:51:08 +00:00
},
},
},
},
Env: map[string]string{
"HELLO": "world",
"LOREM": "ipsum",
},
Resources: &api.Resources{
CPU: intToPtr(500),
MemoryMB: intToPtr(128),
MemoryMaxMB: intToPtr(256),
Networks: []*api.NetworkResource{
2017-09-26 22:26:33 +00:00
{
MBits: intToPtr(100),
ReservedPorts: []api.Port{{Label: "one", Value: 1}, {Label: "two", Value: 2}, {Label: "three", Value: 3}},
DynamicPorts: []api.Port{{Label: "http", Value: 0}, {Label: "https", Value: 0}, {Label: "admin", Value: 0}},
2015-09-15 01:27:37 +00:00
},
},
2018-10-08 23:09:41 +00:00
Devices: []*api.RequestedDevice{
{
Name: "nvidia/gpu",
Count: uint64ToPtr(10),
Constraints: []*api.Constraint{
{
LTarget: "${device.attr.memory}",
RTarget: "2GB",
Operand: ">",
},
},
Affinities: []*api.Affinity{
{
LTarget: "${device.model}",
RTarget: "1080ti",
Operand: "=",
Weight: int8ToPtr(50),
},
},
2018-10-08 23:09:41 +00:00
},
{
Name: "intel/gpu",
Count: nil,
},
},
2015-09-15 00:43:42 +00:00
},
KillTimeout: timeToPtr(22 * time.Second),
ShutdownDelay: 11 * time.Second,
LogConfig: &api.LogConfig{
MaxFiles: intToPtr(14),
MaxFileSizeMB: intToPtr(101),
2016-02-05 07:28:01 +00:00
},
Artifacts: []*api.TaskArtifact{
2016-03-14 18:13:43 +00:00
{
GetterSource: stringToPtr("http://foo.com/artifact"),
GetterOptions: map[string]string{
"checksum": "md5:b8a4f3f72ecab0510a6a31e997461c5f",
2016-03-14 18:13:43 +00:00
},
},
{
GetterSource: stringToPtr("http://bar.com/artifact"),
RelativeDest: stringToPtr("test/foo/"),
GetterOptions: map[string]string{
"checksum": "md5:ff1cc0d3432dad54d607c1505fb7245c",
2016-03-14 18:13:43 +00:00
},
GetterMode: stringToPtr("file"),
2016-03-14 18:13:43 +00:00
},
},
Vault: &api.Vault{
Namespace: stringToPtr("ns1"),
Policies: []string{"foo", "bar"},
Env: boolToPtr(true),
ChangeMode: stringToPtr(vaultChangeModeRestart),
2016-08-09 23:07:45 +00:00
},
Templates: []*api.Template{
2016-09-26 22:23:26 +00:00
{
SourcePath: stringToPtr("foo"),
DestPath: stringToPtr("foo"),
ChangeMode: stringToPtr("foo"),
ChangeSignal: stringToPtr("foo"),
Splay: timeToPtr(10 * time.Second),
Perms: stringToPtr("0644"),
Envvars: boolToPtr(true),
VaultGrace: timeToPtr(33 * time.Second),
2016-09-26 22:23:26 +00:00
},
{
SourcePath: stringToPtr("bar"),
DestPath: stringToPtr("bar"),
ChangeMode: stringToPtr(templateChangeModeRestart),
Splay: timeToPtr(5 * time.Second),
Perms: stringToPtr("777"),
LeftDelim: stringToPtr("--"),
RightDelim: stringToPtr("__"),
2016-09-26 22:23:26 +00:00
},
},
Leader: true,
KillSignal: "",
2015-09-15 00:43:42 +00:00
},
2017-09-26 22:26:33 +00:00
{
2015-09-15 00:43:42 +00:00
Name: "storagelocker",
Driver: "docker",
User: "",
2019-10-11 17:10:45 +00:00
Lifecycle: &api.TaskLifecycle{
2020-03-21 22:25:13 +00:00
Hook: "prestart",
Sidecar: true,
2019-10-11 17:10:45 +00:00
},
2015-11-15 08:10:48 +00:00
Config: map[string]interface{}{
2015-09-15 00:43:42 +00:00
"image": "hashicorp/storagelocker",
},
Resources: &api.Resources{
CPU: intToPtr(500),
MemoryMB: intToPtr(128),
2015-09-15 00:43:42 +00:00
},
Constraints: []*api.Constraint{
2017-09-26 22:26:33 +00:00
{
2015-09-15 00:50:34 +00:00
LTarget: "kernel.arch",
RTarget: "amd64",
Operand: "=",
},
},
Vault: &api.Vault{
2016-10-11 22:25:49 +00:00
Policies: []string{"foo", "bar"},
Env: boolToPtr(false),
ChangeMode: stringToPtr(vaultChangeModeSignal),
ChangeSignal: stringToPtr("SIGUSR1"),
2016-10-11 22:25:49 +00:00
},
2015-09-15 00:43:42 +00:00
},
},
},
},
},
false,
},
2015-09-15 01:30:26 +00:00
{
"multi-network.hcl",
nil,
true,
},
{
"multi-resource.hcl",
nil,
true,
},
2015-09-15 01:34:26 +00:00
2016-08-09 23:07:45 +00:00
{
"multi-vault.hcl",
nil,
true,
},
2015-09-15 01:34:26 +00:00
{
"default-job.hcl",
&api.Job{
ID: stringToPtr("foo"),
Name: stringToPtr("foo"),
2015-09-15 01:34:26 +00:00
},
false,
},
{
"version-constraint.hcl",
&api.Job{
ID: stringToPtr("foo"),
Name: stringToPtr("foo"),
Constraints: []*api.Constraint{
2017-09-26 22:26:33 +00:00
{
LTarget: "$attr.kernel.version",
RTarget: "~> 3.2",
Operand: api.ConstraintVersion,
},
},
},
false,
},
{
"regexp-constraint.hcl",
&api.Job{
ID: stringToPtr("foo"),
Name: stringToPtr("foo"),
Constraints: []*api.Constraint{
2017-09-26 22:26:33 +00:00
{
LTarget: "$attr.kernel.version",
RTarget: "[0-9.]+",
Operand: api.ConstraintRegex,
},
},
},
false,
},
2016-10-19 20:06:28 +00:00
{
"set-contains-constraint.hcl",
&api.Job{
ID: stringToPtr("foo"),
Name: stringToPtr("foo"),
Constraints: []*api.Constraint{
2017-09-26 22:26:33 +00:00
{
2016-10-19 20:06:28 +00:00
LTarget: "$meta.data",
RTarget: "foo,bar,baz",
Operand: api.ConstraintSetContains,
2016-10-19 20:06:28 +00:00
},
},
},
false,
},
{
2015-10-23 00:40:41 +00:00
"distinctHosts-constraint.hcl",
&api.Job{
ID: stringToPtr("foo"),
Name: stringToPtr("foo"),
Constraints: []*api.Constraint{
2017-09-26 22:26:33 +00:00
{
Operand: api.ConstraintDistinctHosts,
},
},
},
false,
},
2017-03-07 22:20:02 +00:00
{
"distinctProperty-constraint.hcl",
&api.Job{
ID: stringToPtr("foo"),
Name: stringToPtr("foo"),
2017-03-07 22:20:02 +00:00
Constraints: []*api.Constraint{
2017-09-26 22:26:33 +00:00
{
Operand: api.ConstraintDistinctProperty,
2017-03-07 22:20:02 +00:00
LTarget: "${meta.rack}",
},
},
},
false,
},
2015-12-01 00:51:56 +00:00
{
"periodic-cron.hcl",
&api.Job{
ID: stringToPtr("foo"),
Name: stringToPtr("foo"),
Periodic: &api.PeriodicConfig{
SpecType: stringToPtr(api.PeriodicSpecCron),
Spec: stringToPtr("*/5 * * *"),
ProhibitOverlap: boolToPtr(true),
TimeZone: stringToPtr("Europe/Minsk"),
2015-12-01 00:51:56 +00:00
},
},
false,
},
{
"specify-job.hcl",
&api.Job{
ID: stringToPtr("job1"),
Name: stringToPtr("My Job"),
},
false,
},
{
"task-nested-config.hcl",
&api.Job{
ID: stringToPtr("foo"),
Name: stringToPtr("foo"),
TaskGroups: []*api.TaskGroup{
2017-09-26 22:26:33 +00:00
{
Name: stringToPtr("bar"),
Tasks: []*api.Task{
2017-09-26 22:26:33 +00:00
{
Name: "bar",
Driver: "docker",
Config: map[string]interface{}{
"image": "hashicorp/image",
"port_map": []map[string]interface{}{
2017-09-26 22:26:33 +00:00
{
"db": 1234,
},
},
},
},
},
},
},
},
false,
},
2016-03-16 03:21:52 +00:00
{
"bad-artifact.hcl",
nil,
true,
},
{
"artifacts.hcl",
&api.Job{
ID: stringToPtr("binstore-storagelocker"),
Name: stringToPtr("binstore-storagelocker"),
TaskGroups: []*api.TaskGroup{
2017-09-26 22:26:33 +00:00
{
Name: stringToPtr("binsl"),
Tasks: []*api.Task{
2017-09-26 22:26:33 +00:00
{
Name: "binstore",
Driver: "docker",
Artifacts: []*api.TaskArtifact{
{
GetterSource: stringToPtr("http://foo.com/bar"),
2016-06-10 19:28:27 +00:00
GetterOptions: map[string]string{"foo": "bar"},
RelativeDest: stringToPtr(""),
},
{
GetterSource: stringToPtr("http://foo.com/baz"),
2016-06-10 19:28:27 +00:00
GetterOptions: nil,
RelativeDest: nil,
},
{
GetterSource: stringToPtr("http://foo.com/bam"),
2016-06-10 19:28:27 +00:00
GetterOptions: nil,
RelativeDest: stringToPtr("var/foo"),
},
},
},
},
},
},
},
false,
},
CSI Plugin Registration (#6555) This changeset implements the initial registration and fingerprinting of CSI Plugins as part of #5378. At a high level, it introduces the following: * A `csi_plugin` stanza as part of a Nomad task configuration, to allow a task to expose that it is a plugin. * A new task runner hook: `csi_plugin_supervisor`. This hook does two things. When the `csi_plugin` stanza is detected, it will automatically configure the plugin task to receive bidirectional mounts to the CSI intermediary directory. At runtime, it will then perform an initial heartbeat of the plugin and handle submitting it to the new `dynamicplugins.Registry` for further use by the client, and then run a lightweight heartbeat loop that will emit task events when health changes. * The `dynamicplugins.Registry` for handling plugins that run as Nomad tasks, in contrast to the existing catalog that requires `go-plugin` type plugins and to know the plugin configuration in advance. * The `csimanager` which fingerprints CSI plugins, in a similar way to `drivermanager` and `devicemanager`. It currently only fingerprints the NodeID from the plugin, and assumes that all plugins are monolithic. Missing features * We do not use the live updates of the `dynamicplugin` registry in the `csimanager` yet. * We do not deregister the plugins from the client when they shutdown yet, they just become indefinitely marked as unhealthy. This is deliberate until we figure out how we should manage deploying new versions of plugins/transitioning them.
2019-10-22 13:20:26 +00:00
{
"csi-plugin.hcl",
&api.Job{
ID: stringToPtr("binstore-storagelocker"),
Name: stringToPtr("binstore-storagelocker"),
CSI Plugin Registration (#6555) This changeset implements the initial registration and fingerprinting of CSI Plugins as part of #5378. At a high level, it introduces the following: * A `csi_plugin` stanza as part of a Nomad task configuration, to allow a task to expose that it is a plugin. * A new task runner hook: `csi_plugin_supervisor`. This hook does two things. When the `csi_plugin` stanza is detected, it will automatically configure the plugin task to receive bidirectional mounts to the CSI intermediary directory. At runtime, it will then perform an initial heartbeat of the plugin and handle submitting it to the new `dynamicplugins.Registry` for further use by the client, and then run a lightweight heartbeat loop that will emit task events when health changes. * The `dynamicplugins.Registry` for handling plugins that run as Nomad tasks, in contrast to the existing catalog that requires `go-plugin` type plugins and to know the plugin configuration in advance. * The `csimanager` which fingerprints CSI plugins, in a similar way to `drivermanager` and `devicemanager`. It currently only fingerprints the NodeID from the plugin, and assumes that all plugins are monolithic. Missing features * We do not use the live updates of the `dynamicplugin` registry in the `csimanager` yet. * We do not deregister the plugins from the client when they shutdown yet, they just become indefinitely marked as unhealthy. This is deliberate until we figure out how we should manage deploying new versions of plugins/transitioning them.
2019-10-22 13:20:26 +00:00
TaskGroups: []*api.TaskGroup{
{
Name: stringToPtr("binsl"),
CSI Plugin Registration (#6555) This changeset implements the initial registration and fingerprinting of CSI Plugins as part of #5378. At a high level, it introduces the following: * A `csi_plugin` stanza as part of a Nomad task configuration, to allow a task to expose that it is a plugin. * A new task runner hook: `csi_plugin_supervisor`. This hook does two things. When the `csi_plugin` stanza is detected, it will automatically configure the plugin task to receive bidirectional mounts to the CSI intermediary directory. At runtime, it will then perform an initial heartbeat of the plugin and handle submitting it to the new `dynamicplugins.Registry` for further use by the client, and then run a lightweight heartbeat loop that will emit task events when health changes. * The `dynamicplugins.Registry` for handling plugins that run as Nomad tasks, in contrast to the existing catalog that requires `go-plugin` type plugins and to know the plugin configuration in advance. * The `csimanager` which fingerprints CSI plugins, in a similar way to `drivermanager` and `devicemanager`. It currently only fingerprints the NodeID from the plugin, and assumes that all plugins are monolithic. Missing features * We do not use the live updates of the `dynamicplugin` registry in the `csimanager` yet. * We do not deregister the plugins from the client when they shutdown yet, they just become indefinitely marked as unhealthy. This is deliberate until we figure out how we should manage deploying new versions of plugins/transitioning them.
2019-10-22 13:20:26 +00:00
Tasks: []*api.Task{
{
Name: "binstore",
Driver: "docker",
CSIPluginConfig: &api.TaskCSIPluginConfig{
ID: "org.hashicorp.csi",
Type: api.CSIPluginTypeMonolith,
MountDir: "/csi/test",
},
},
},
},
},
},
false,
},
2016-08-16 21:34:36 +00:00
{
"service-check-initial-status.hcl",
&api.Job{
ID: stringToPtr("check_initial_status"),
Name: stringToPtr("check_initial_status"),
Type: stringToPtr("service"),
TaskGroups: []*api.TaskGroup{
2017-09-26 22:26:33 +00:00
{
Name: stringToPtr("group"),
Count: intToPtr(1),
Tasks: []*api.Task{
2017-09-26 22:26:33 +00:00
{
2016-08-16 21:34:36 +00:00
Name: "task",
2017-03-01 23:30:01 +00:00
Services: []*api.Service{
2016-08-16 21:34:36 +00:00
{
Tags: []string{"foo", "bar"},
PortLabel: "http",
Checks: []api.ServiceCheck{
2016-08-16 21:34:36 +00:00
{
Name: "check-name",
Type: "http",
Path: "/",
2016-08-16 21:34:36 +00:00
Interval: 10 * time.Second,
Timeout: 2 * time.Second,
InitialStatus: capi.HealthPassing,
Method: "POST",
Header: map[string][]string{
"Authorization": {"Basic ZWxhc3RpYzpjaGFuZ2VtZQ=="},
},
2016-08-16 21:34:36 +00:00
},
},
},
},
},
},
},
},
},
false,
},
{
"service-check-pass-fail.hcl",
&api.Job{
ID: stringToPtr("check_pass_fail"),
Name: stringToPtr("check_pass_fail"),
Type: stringToPtr("service"),
TaskGroups: []*api.TaskGroup{{
Name: stringToPtr("group"),
Count: intToPtr(1),
Tasks: []*api.Task{{
Name: "task",
Services: []*api.Service{{
Name: "service",
PortLabel: "http",
Checks: []api.ServiceCheck{{
Name: "check-name",
Type: "http",
Path: "/",
Interval: 10 * time.Second,
Timeout: 2 * time.Second,
InitialStatus: capi.HealthPassing,
Method: "POST",
SuccessBeforePassing: 3,
FailuresBeforeCritical: 4,
}},
}},
}},
}},
},
false,
},
{
"service-check-pass-fail.hcl",
&api.Job{
ID: stringToPtr("check_pass_fail"),
Name: stringToPtr("check_pass_fail"),
Type: stringToPtr("service"),
TaskGroups: []*api.TaskGroup{{
Name: stringToPtr("group"),
Count: intToPtr(1),
Tasks: []*api.Task{{
Name: "task",
Services: []*api.Service{{
Name: "service",
PortLabel: "http",
Checks: []api.ServiceCheck{{
Name: "check-name",
Type: "http",
Path: "/",
Interval: 10 * time.Second,
Timeout: 2 * time.Second,
InitialStatus: capi.HealthPassing,
Method: "POST",
SuccessBeforePassing: 3,
FailuresBeforeCritical: 4,
}},
}},
}},
}},
},
false,
},
2017-08-17 22:25:51 +00:00
{
"service-check-bad-header.hcl",
nil,
true,
},
{
"service-check-bad-header-2.hcl",
nil,
true,
},
2016-09-21 18:18:44 +00:00
{
// TODO This should be pushed into the API
2016-09-21 18:18:44 +00:00
"vault_inheritance.hcl",
&api.Job{
ID: stringToPtr("example"),
Name: stringToPtr("example"),
TaskGroups: []*api.TaskGroup{
2017-09-26 22:26:33 +00:00
{
Name: stringToPtr("cache"),
Tasks: []*api.Task{
2017-09-26 22:26:33 +00:00
{
Name: "redis",
Vault: &api.Vault{
2016-10-11 22:25:49 +00:00
Policies: []string{"group"},
Env: boolToPtr(true),
ChangeMode: stringToPtr(vaultChangeModeRestart),
2016-09-21 18:18:44 +00:00
},
},
2017-09-26 22:26:33 +00:00
{
Name: "redis2",
Vault: &api.Vault{
2016-10-11 22:25:49 +00:00
Policies: []string{"task"},
Env: boolToPtr(false),
ChangeMode: stringToPtr(vaultChangeModeRestart),
2016-09-21 18:18:44 +00:00
},
},
},
},
2017-09-26 22:26:33 +00:00
{
Name: stringToPtr("cache2"),
Tasks: []*api.Task{
2017-09-26 22:26:33 +00:00
{
Name: "redis",
Vault: &api.Vault{
2016-10-11 22:25:49 +00:00
Policies: []string{"job"},
Env: boolToPtr(true),
ChangeMode: stringToPtr(vaultChangeModeRestart),
2016-09-21 18:18:44 +00:00
},
},
},
},
},
},
false,
},
2016-11-23 23:48:36 +00:00
{
"parameterized_job.hcl",
&api.Job{
ID: stringToPtr("parameterized_job"),
Name: stringToPtr("parameterized_job"),
ParameterizedJob: &api.ParameterizedJobConfig{
2016-12-14 20:50:08 +00:00
Payload: "required",
2016-11-23 23:48:36 +00:00
MetaRequired: []string{"foo", "bar"},
MetaOptional: []string{"baz", "bam"},
},
TaskGroups: []*api.TaskGroup{
{
Name: stringToPtr("foo"),
Tasks: []*api.Task{
{
2016-11-23 23:48:36 +00:00
Name: "bar",
Driver: "docker",
DispatchPayload: &api.DispatchPayloadConfig{
2016-12-14 20:50:08 +00:00
File: "foo/bar",
2016-11-23 23:48:36 +00:00
},
},
},
},
},
},
false,
},
{
"job-with-kill-signal.hcl",
&api.Job{
ID: stringToPtr("foo"),
Name: stringToPtr("foo"),
TaskGroups: []*api.TaskGroup{
{
Name: stringToPtr("bar"),
Tasks: []*api.Task{
{
2017-12-06 21:23:24 +00:00
Name: "bar",
Driver: "docker",
2017-12-06 21:23:24 +00:00
KillSignal: "SIGQUIT",
Config: map[string]interface{}{
"image": "hashicorp/image",
},
},
},
},
},
},
false,
},
{
"service-check-driver-address.hcl",
&api.Job{
ID: stringToPtr("address_mode_driver"),
Name: stringToPtr("address_mode_driver"),
Type: stringToPtr("service"),
TaskGroups: []*api.TaskGroup{
{
Name: stringToPtr("group"),
Tasks: []*api.Task{
{
Name: "task",
Services: []*api.Service{
{
Name: "http-service",
PortLabel: "http",
AddressMode: "auto",
Checks: []api.ServiceCheck{
{
Name: "http-check",
Type: "http",
Path: "/",
PortLabel: "http",
AddressMode: "driver",
},
},
},
{
Name: "random-service",
PortLabel: "9000",
AddressMode: "driver",
Checks: []api.ServiceCheck{
{
Name: "random-check",
Type: "tcp",
PortLabel: "9001",
AddressMode: "driver",
},
},
},
},
},
},
},
},
},
false,
},
{
"service-check-restart.hcl",
&api.Job{
ID: stringToPtr("service_check_restart"),
Name: stringToPtr("service_check_restart"),
Type: stringToPtr("service"),
TaskGroups: []*api.TaskGroup{
{
Name: stringToPtr("group"),
Tasks: []*api.Task{
{
Name: "task",
Services: []*api.Service{
{
Name: "http-service",
CheckRestart: &api.CheckRestart{
Limit: 3,
Grace: timeToPtr(10 * time.Second),
IgnoreWarnings: true,
},
Checks: []api.ServiceCheck{
{
Name: "random-check",
Type: "tcp",
PortLabel: "9001",
},
},
},
},
},
},
},
},
},
false,
},
{
"service-meta.hcl",
&api.Job{
ID: stringToPtr("service_meta"),
Name: stringToPtr("service_meta"),
Type: stringToPtr("service"),
TaskGroups: []*api.TaskGroup{
{
Name: stringToPtr("group"),
Tasks: []*api.Task{
{
Name: "task",
Services: []*api.Service{
{
Name: "http-service",
Meta: map[string]string{
"foo": "bar",
},
},
},
},
},
},
},
},
false,
},
client: enable configuring enable_tag_override for services Consul provides a feature of Service Definitions where the tags associated with a service can be modified through the Catalog API, overriding the value(s) configured in the agent's service configuration. To enable this feature, the flag enable_tag_override must be configured in the service definition. Previously, Nomad did not allow configuring this flag, and thus the default value of false was used. Now, it is configurable. Because Nomad itself acts as a state machine around the the service definitions of the tasks it manages, it's worth describing what happens when this feature is enabled and why. Consider the basic case where there is no Nomad, and your service is provided to consul as a boring JSON file. The ultimate source of truth for the definition of that service is the file, and is stored in the agent. Later, Consul performs "anti-entropy" which synchronizes the Catalog (stored only the leaders). Then with enable_tag_override=true, the tags field is available for "external" modification through the Catalog API (rather than directly configuring the service definition file, or using the Agent API). The important observation is that if the service definition ever changes (i.e. the file is changed & config reloaded OR the Agent API is used to modify the service), those "external" tag values are thrown away, and the new service definition is once again the source of truth. In the Nomad case, Nomad itself is the source of truth over the Agent in the same way the JSON file was the source of truth in the example above. That means any time Nomad sets a new service definition, any externally configured tags are going to be replaced. When does this happen? Only on major lifecycle events, for example when a task is modified because of an updated job spec from the 'nomad job run <existing>' command. Otherwise, Nomad's periodic re-sync's with Consul will now no longer try to restore the externally modified tag values (as long as enable_tag_override=true). Fixes #2057
2020-02-07 21:22:19 +00:00
{
"service-enable-tag-override.hcl",
&api.Job{
ID: stringToPtr("service_eto"),
Name: stringToPtr("service_eto"),
Type: stringToPtr("service"),
client: enable configuring enable_tag_override for services Consul provides a feature of Service Definitions where the tags associated with a service can be modified through the Catalog API, overriding the value(s) configured in the agent's service configuration. To enable this feature, the flag enable_tag_override must be configured in the service definition. Previously, Nomad did not allow configuring this flag, and thus the default value of false was used. Now, it is configurable. Because Nomad itself acts as a state machine around the the service definitions of the tasks it manages, it's worth describing what happens when this feature is enabled and why. Consider the basic case where there is no Nomad, and your service is provided to consul as a boring JSON file. The ultimate source of truth for the definition of that service is the file, and is stored in the agent. Later, Consul performs "anti-entropy" which synchronizes the Catalog (stored only the leaders). Then with enable_tag_override=true, the tags field is available for "external" modification through the Catalog API (rather than directly configuring the service definition file, or using the Agent API). The important observation is that if the service definition ever changes (i.e. the file is changed & config reloaded OR the Agent API is used to modify the service), those "external" tag values are thrown away, and the new service definition is once again the source of truth. In the Nomad case, Nomad itself is the source of truth over the Agent in the same way the JSON file was the source of truth in the example above. That means any time Nomad sets a new service definition, any externally configured tags are going to be replaced. When does this happen? Only on major lifecycle events, for example when a task is modified because of an updated job spec from the 'nomad job run <existing>' command. Otherwise, Nomad's periodic re-sync's with Consul will now no longer try to restore the externally modified tag values (as long as enable_tag_override=true). Fixes #2057
2020-02-07 21:22:19 +00:00
TaskGroups: []*api.TaskGroup{{
Name: stringToPtr("group"),
client: enable configuring enable_tag_override for services Consul provides a feature of Service Definitions where the tags associated with a service can be modified through the Catalog API, overriding the value(s) configured in the agent's service configuration. To enable this feature, the flag enable_tag_override must be configured in the service definition. Previously, Nomad did not allow configuring this flag, and thus the default value of false was used. Now, it is configurable. Because Nomad itself acts as a state machine around the the service definitions of the tasks it manages, it's worth describing what happens when this feature is enabled and why. Consider the basic case where there is no Nomad, and your service is provided to consul as a boring JSON file. The ultimate source of truth for the definition of that service is the file, and is stored in the agent. Later, Consul performs "anti-entropy" which synchronizes the Catalog (stored only the leaders). Then with enable_tag_override=true, the tags field is available for "external" modification through the Catalog API (rather than directly configuring the service definition file, or using the Agent API). The important observation is that if the service definition ever changes (i.e. the file is changed & config reloaded OR the Agent API is used to modify the service), those "external" tag values are thrown away, and the new service definition is once again the source of truth. In the Nomad case, Nomad itself is the source of truth over the Agent in the same way the JSON file was the source of truth in the example above. That means any time Nomad sets a new service definition, any externally configured tags are going to be replaced. When does this happen? Only on major lifecycle events, for example when a task is modified because of an updated job spec from the 'nomad job run <existing>' command. Otherwise, Nomad's periodic re-sync's with Consul will now no longer try to restore the externally modified tag values (as long as enable_tag_override=true). Fixes #2057
2020-02-07 21:22:19 +00:00
Tasks: []*api.Task{{
Name: "task",
Services: []*api.Service{{
Name: "example",
EnableTagOverride: true,
}},
}},
}},
},
false,
},
{
"reschedule-job.hcl",
&api.Job{
ID: stringToPtr("foo"),
Name: stringToPtr("foo"),
Type: stringToPtr("batch"),
Datacenters: []string{"dc1"},
Reschedule: &api.ReschedulePolicy{
Attempts: intToPtr(15),
Interval: timeToPtr(30 * time.Minute),
DelayFunction: stringToPtr("constant"),
Delay: timeToPtr(10 * time.Second),
2018-03-07 21:26:45 +00:00
},
TaskGroups: []*api.TaskGroup{
{
Name: stringToPtr("bar"),
Count: intToPtr(3),
2018-03-07 21:26:45 +00:00
Tasks: []*api.Task{
{
Name: "bar",
Driver: "raw_exec",
Config: map[string]interface{}{
"command": "bash",
"args": []interface{}{"-c", "echo hi"},
},
},
},
},
},
},
false,
},
{
"reschedule-job-unlimited.hcl",
&api.Job{
ID: stringToPtr("foo"),
Name: stringToPtr("foo"),
Type: stringToPtr("batch"),
2018-03-07 21:26:45 +00:00
Datacenters: []string{"dc1"},
Reschedule: &api.ReschedulePolicy{
DelayFunction: stringToPtr("exponential"),
Delay: timeToPtr(10 * time.Second),
MaxDelay: timeToPtr(120 * time.Second),
Unlimited: boolToPtr(true),
},
TaskGroups: []*api.TaskGroup{
{
Name: stringToPtr("bar"),
Count: intToPtr(3),
Tasks: []*api.Task{
{
Name: "bar",
Driver: "raw_exec",
Config: map[string]interface{}{
"command": "bash",
"args": []interface{}{"-c", "echo hi"},
},
},
},
},
},
},
false,
},
2018-03-01 19:21:32 +00:00
{
"migrate-job.hcl",
&api.Job{
ID: stringToPtr("foo"),
Name: stringToPtr("foo"),
Type: stringToPtr("batch"),
2018-03-01 19:21:32 +00:00
Datacenters: []string{"dc1"},
Migrate: &api.MigrateStrategy{
MaxParallel: intToPtr(2),
HealthCheck: stringToPtr("task_states"),
MinHealthyTime: timeToPtr(11 * time.Second),
HealthyDeadline: timeToPtr(11 * time.Minute),
2018-03-01 19:21:32 +00:00
},
TaskGroups: []*api.TaskGroup{
{
Name: stringToPtr("bar"),
Count: intToPtr(3),
2018-03-01 19:21:32 +00:00
Migrate: &api.MigrateStrategy{
MaxParallel: intToPtr(3),
HealthCheck: stringToPtr("checks"),
MinHealthyTime: timeToPtr(1 * time.Second),
HealthyDeadline: timeToPtr(1 * time.Minute),
2018-03-01 19:21:32 +00:00
},
Tasks: []*api.Task{
{
Name: "bar",
Driver: "raw_exec",
Config: map[string]interface{}{
"command": "bash",
"args": []interface{}{"-c", "echo hi"},
},
},
},
},
},
},
false,
},
{
"tg-network.hcl",
&api.Job{
ID: stringToPtr("foo"),
Name: stringToPtr("foo"),
Datacenters: []string{"dc1"},
TaskGroups: []*api.TaskGroup{
{
Name: stringToPtr("bar"),
ShutdownDelay: timeToPtr(14 * time.Second),
Count: intToPtr(3),
Networks: []*api.NetworkResource{
{
Mode: "bridge",
ReservedPorts: []api.Port{
{
2020-06-16 15:53:10 +00:00
Label: "http",
Value: 80,
To: 8080,
HostNetwork: "public",
},
},
DNS: &api.DNSConfig{
Servers: []string{"8.8.8.8"},
Options: []string{"ndots:2", "edns0"},
},
},
},
Services: []*api.Service{
{
Name: "connect-service",
Tags: []string{"foo", "bar"},
CanaryTags: []string{"canary", "bam"},
PortLabel: "1234",
Connect: &api.ConsulConnect{
SidecarService: &api.ConsulSidecarService{
Tags: []string{"side1", "side2"},
Proxy: &api.ConsulProxy{
LocalServicePort: 8080,
Upstreams: []*api.ConsulUpstream{
{
2021-07-22 19:15:50 +00:00
DestinationName: "other-service",
LocalBindPort: 4567,
LocalBindAddress: "0.0.0.0",
Datacenter: "dc1",
MeshGateway: &api.ConsulMeshGateway{
Mode: "local",
},
},
},
},
},
SidecarTask: &api.SidecarTask{
Resources: &api.Resources{
CPU: intToPtr(500),
MemoryMB: intToPtr(1024),
},
Env: map[string]string{
"FOO": "abc",
},
ShutdownDelay: timeToPtr(5 * time.Second),
},
},
},
},
Tasks: []*api.Task{
{
Name: "bar",
Driver: "raw_exec",
Config: map[string]interface{}{
"command": "bash",
"args": []interface{}{"-c", "echo hi"},
},
Resources: &api.Resources{
Networks: []*api.NetworkResource{
{
MBits: intToPtr(10),
},
},
},
},
},
},
},
},
false,
},
{
"tg-service-check.hcl",
&api.Job{
ID: stringToPtr("group_service_check_script"),
Name: stringToPtr("group_service_check_script"),
TaskGroups: []*api.TaskGroup{
{
Name: stringToPtr("group"),
Count: intToPtr(1),
Networks: []*api.NetworkResource{
{
Mode: "bridge",
ReservedPorts: []api.Port{
{
Label: "http",
Value: 80,
To: 8080,
},
},
},
},
Services: []*api.Service{
{
Name: "foo-service",
PortLabel: "http",
2021-07-22 19:25:36 +00:00
OnUpdate: "ignore",
Checks: []api.ServiceCheck{
{
Name: "check-name",
Type: "script",
Command: "/bin/true",
Interval: time.Duration(10 * time.Second),
Timeout: time.Duration(2 * time.Second),
InitialStatus: "passing",
TaskName: "foo",
2021-07-22 19:25:36 +00:00
OnUpdate: "ignore",
2021-07-22 19:34:23 +00:00
Body: "post body",
},
},
},
},
Tasks: []*api.Task{{Name: "foo"}},
},
},
},
false,
},
connect: enable proxy.passthrough configuration Enable configuration of HTTP and gRPC endpoints which should be exposed by the Connect sidecar proxy. This changeset is the first "non-magical" pass that lays the groundwork for enabling Consul service checks for tasks running in a network namespace because they are Connect-enabled. The changes here provide for full configuration of the connect { sidecar_service { proxy { expose { paths = [{ path = <exposed endpoint> protocol = <http or grpc> local_path_port = <local endpoint port> listener_port = <inbound mesh port> }, ... ] } } } stanza. Everything from `expose` and below is new, and partially implements the precedent set by Consul: https://www.consul.io/docs/connect/registration/service-registration.html#expose-paths-configuration-reference Combined with a task-group level network port-mapping in the form: port "exposeExample" { to = -1 } it is now possible to "punch a hole" through the network namespace to a specific HTTP or gRPC path, with the anticipated use case of creating Consul checks on Connect enabled services. A future PR may introduce more automagic behavior, where we can do things like 1) auto-fill the 'expose.path.local_path_port' with the default value of the 'service.port' value for task-group level connect-enabled services. 2) automatically generate a port-mapping 3) enable an 'expose.checks' flag which automatically creates exposed endpoints for every compatible consul service check (http/grpc checks on connect enabled services).
2020-03-07 03:15:22 +00:00
{
"tg-service-proxy-expose.hcl",
&api.Job{
ID: stringToPtr("group_service_proxy_expose"),
Name: stringToPtr("group_service_proxy_expose"),
connect: enable proxy.passthrough configuration Enable configuration of HTTP and gRPC endpoints which should be exposed by the Connect sidecar proxy. This changeset is the first "non-magical" pass that lays the groundwork for enabling Consul service checks for tasks running in a network namespace because they are Connect-enabled. The changes here provide for full configuration of the connect { sidecar_service { proxy { expose { paths = [{ path = <exposed endpoint> protocol = <http or grpc> local_path_port = <local endpoint port> listener_port = <inbound mesh port> }, ... ] } } } stanza. Everything from `expose` and below is new, and partially implements the precedent set by Consul: https://www.consul.io/docs/connect/registration/service-registration.html#expose-paths-configuration-reference Combined with a task-group level network port-mapping in the form: port "exposeExample" { to = -1 } it is now possible to "punch a hole" through the network namespace to a specific HTTP or gRPC path, with the anticipated use case of creating Consul checks on Connect enabled services. A future PR may introduce more automagic behavior, where we can do things like 1) auto-fill the 'expose.path.local_path_port' with the default value of the 'service.port' value for task-group level connect-enabled services. 2) automatically generate a port-mapping 3) enable an 'expose.checks' flag which automatically creates exposed endpoints for every compatible consul service check (http/grpc checks on connect enabled services).
2020-03-07 03:15:22 +00:00
TaskGroups: []*api.TaskGroup{{
Name: stringToPtr("group"),
connect: enable proxy.passthrough configuration Enable configuration of HTTP and gRPC endpoints which should be exposed by the Connect sidecar proxy. This changeset is the first "non-magical" pass that lays the groundwork for enabling Consul service checks for tasks running in a network namespace because they are Connect-enabled. The changes here provide for full configuration of the connect { sidecar_service { proxy { expose { paths = [{ path = <exposed endpoint> protocol = <http or grpc> local_path_port = <local endpoint port> listener_port = <inbound mesh port> }, ... ] } } } stanza. Everything from `expose` and below is new, and partially implements the precedent set by Consul: https://www.consul.io/docs/connect/registration/service-registration.html#expose-paths-configuration-reference Combined with a task-group level network port-mapping in the form: port "exposeExample" { to = -1 } it is now possible to "punch a hole" through the network namespace to a specific HTTP or gRPC path, with the anticipated use case of creating Consul checks on Connect enabled services. A future PR may introduce more automagic behavior, where we can do things like 1) auto-fill the 'expose.path.local_path_port' with the default value of the 'service.port' value for task-group level connect-enabled services. 2) automatically generate a port-mapping 3) enable an 'expose.checks' flag which automatically creates exposed endpoints for every compatible consul service check (http/grpc checks on connect enabled services).
2020-03-07 03:15:22 +00:00
Services: []*api.Service{{
Name: "example",
Connect: &api.ConsulConnect{
SidecarService: &api.ConsulSidecarService{
Proxy: &api.ConsulProxy{
ExposeConfig: &api.ConsulExposeConfig{
Path: []*api.ConsulExposePath{{
connect: enable proxy.passthrough configuration Enable configuration of HTTP and gRPC endpoints which should be exposed by the Connect sidecar proxy. This changeset is the first "non-magical" pass that lays the groundwork for enabling Consul service checks for tasks running in a network namespace because they are Connect-enabled. The changes here provide for full configuration of the connect { sidecar_service { proxy { expose { paths = [{ path = <exposed endpoint> protocol = <http or grpc> local_path_port = <local endpoint port> listener_port = <inbound mesh port> }, ... ] } } } stanza. Everything from `expose` and below is new, and partially implements the precedent set by Consul: https://www.consul.io/docs/connect/registration/service-registration.html#expose-paths-configuration-reference Combined with a task-group level network port-mapping in the form: port "exposeExample" { to = -1 } it is now possible to "punch a hole" through the network namespace to a specific HTTP or gRPC path, with the anticipated use case of creating Consul checks on Connect enabled services. A future PR may introduce more automagic behavior, where we can do things like 1) auto-fill the 'expose.path.local_path_port' with the default value of the 'service.port' value for task-group level connect-enabled services. 2) automatically generate a port-mapping 3) enable an 'expose.checks' flag which automatically creates exposed endpoints for every compatible consul service check (http/grpc checks on connect enabled services).
2020-03-07 03:15:22 +00:00
Path: "/health",
Protocol: "http",
LocalPathPort: 2222,
ListenerPort: "healthcheck",
}, {
Path: "/metrics",
Protocol: "grpc",
LocalPathPort: 3000,
ListenerPort: "metrics",
connect: enable proxy.passthrough configuration Enable configuration of HTTP and gRPC endpoints which should be exposed by the Connect sidecar proxy. This changeset is the first "non-magical" pass that lays the groundwork for enabling Consul service checks for tasks running in a network namespace because they are Connect-enabled. The changes here provide for full configuration of the connect { sidecar_service { proxy { expose { paths = [{ path = <exposed endpoint> protocol = <http or grpc> local_path_port = <local endpoint port> listener_port = <inbound mesh port> }, ... ] } } } stanza. Everything from `expose` and below is new, and partially implements the precedent set by Consul: https://www.consul.io/docs/connect/registration/service-registration.html#expose-paths-configuration-reference Combined with a task-group level network port-mapping in the form: port "exposeExample" { to = -1 } it is now possible to "punch a hole" through the network namespace to a specific HTTP or gRPC path, with the anticipated use case of creating Consul checks on Connect enabled services. A future PR may introduce more automagic behavior, where we can do things like 1) auto-fill the 'expose.path.local_path_port' with the default value of the 'service.port' value for task-group level connect-enabled services. 2) automatically generate a port-mapping 3) enable an 'expose.checks' flag which automatically creates exposed endpoints for every compatible consul service check (http/grpc checks on connect enabled services).
2020-03-07 03:15:22 +00:00
}},
},
},
},
},
}},
}},
},
false,
},
{
"tg-service-connect-sidecar_task-name.hcl",
&api.Job{
ID: stringToPtr("sidecar_task_name"),
Name: stringToPtr("sidecar_task_name"),
Type: stringToPtr("service"),
TaskGroups: []*api.TaskGroup{{
Name: stringToPtr("group"),
Services: []*api.Service{{
Name: "example",
Connect: &api.ConsulConnect{
Native: false,
SidecarService: &api.ConsulSidecarService{},
SidecarTask: &api.SidecarTask{
Name: "my-sidecar",
},
},
}},
}},
},
false,
},
{
"tg-service-connect-sidecar_disablecheck.hcl",
&api.Job{
ID: stringToPtr("sidecar_disablecheck"),
Name: stringToPtr("sidecar_disablecheck"),
Type: stringToPtr("service"),
TaskGroups: []*api.TaskGroup{{
Name: stringToPtr("group"),
Services: []*api.Service{{
Name: "example",
Connect: &api.ConsulConnect{
Native: false,
SidecarService: &api.ConsulSidecarService{
DisableDefaultTCPCheck: true,
},
},
}},
}},
},
false,
},
{
"tg-service-connect-resources.hcl",
&api.Job{
ID: stringToPtr("sidecar_task_resources"),
Name: stringToPtr("sidecar_task_resources"),
Type: stringToPtr("service"),
TaskGroups: []*api.TaskGroup{{
Name: stringToPtr("group"),
Services: []*api.Service{{
Name: "example",
Connect: &api.ConsulConnect{
SidecarTask: &api.SidecarTask{
Resources: &api.Resources{
CPU: intToPtr(111),
MemoryMB: intToPtr(222),
MemoryMaxMB: intToPtr(333),
},
},
},
}},
}},
},
false,
},
{
"tg-service-connect-proxy.hcl",
&api.Job{
ID: stringToPtr("service-connect-proxy"),
Name: stringToPtr("service-connect-proxy"),
Type: stringToPtr("service"),
TaskGroups: []*api.TaskGroup{{
Name: stringToPtr("group"),
Services: []*api.Service{{
Name: "example",
Connect: &api.ConsulConnect{
Native: false,
SidecarService: &api.ConsulSidecarService{
Proxy: &api.ConsulProxy{
LocalServiceAddress: "10.0.1.2",
LocalServicePort: 8080,
ExposeConfig: &api.ConsulExposeConfig{
Path: []*api.ConsulExposePath{{
Path: "/metrics",
Protocol: "http",
LocalPathPort: 9001,
ListenerPort: "metrics",
}, {
Path: "/health",
Protocol: "http",
LocalPathPort: 9002,
ListenerPort: "health",
}},
},
Upstreams: []*api.ConsulUpstream{{
DestinationName: "upstream1",
LocalBindPort: 2001,
}, {
DestinationName: "upstream2",
LocalBindPort: 2002,
}},
Config: map[string]interface{}{
"foo": "bar",
},
},
},
},
}},
}},
},
false,
},
{
"tg-service-connect-local-service.hcl",
&api.Job{
ID: stringToPtr("connect-proxy-local-service"),
Name: stringToPtr("connect-proxy-local-service"),
Type: stringToPtr("service"),
TaskGroups: []*api.TaskGroup{{
Name: stringToPtr("group"),
Services: []*api.Service{{
Name: "example",
Connect: &api.ConsulConnect{
Native: false,
SidecarService: &api.ConsulSidecarService{
Proxy: &api.ConsulProxy{
LocalServiceAddress: "10.0.1.2",
LocalServicePort: 9876,
},
},
},
}},
}},
},
false,
},
connect: enable automatic expose paths for individual group service checks Part of #6120 Building on the support for enabling connect proxy paths in #7323, this change adds the ability to configure the 'service.check.expose' flag on group-level service check definitions for services that are connect-enabled. This is a slight deviation from the "magic" that Consul provides. With Consul, the 'expose' flag exists on the connect.proxy stanza, which will then auto-generate expose paths for every HTTP and gRPC service check associated with that connect-enabled service. A first attempt at providing similar magic for Nomad's Consul Connect integration followed that pattern exactly, as seen in #7396. However, on reviewing the PR we realized having the `expose` flag on the proxy stanza inseperably ties together the automatic path generation with every HTTP/gRPC defined on the service. This makes sense in Consul's context, because a service definition is reasonably associated with a single "task". With Nomad's group level service definitions however, there is a reasonable expectation that a service definition is more abstractly representative of multiple services within the task group. In this case, one would want to define checks of that service which concretely make HTTP or gRPC requests to different underlying tasks. Such a model is not possible with the course `proxy.expose` flag. Instead, we now have the flag made available within the check definitions themselves. By making the expose feature resolute to each check, it is possible to have some HTTP/gRPC checks which make use of the envoy exposed paths, as well as some HTTP/gRPC checks which make use of some orthongonal port-mapping to do checks on some other task (or even some other bound port of the same task) within the task group. Given this example, group "server-group" { network { mode = "bridge" port "forchecks" { to = -1 } } service { name = "myserver" port = 2000 connect { sidecar_service { } } check { name = "mycheck-myserver" type = "http" port = "forchecks" interval = "3s" timeout = "2s" method = "GET" path = "/classic/responder/health" expose = true } } } Nomad will automatically inject (via job endpoint mutator) the extrapolated expose path configuration, i.e. expose { path { path = "/classic/responder/health" protocol = "http" local_path_port = 2000 listener_port = "forchecks" } } Documentation is coming in #7440 (needs updating, doing next) Modifications to the `countdash` examples in https://github.com/hashicorp/demo-consul-101/pull/6 which will make the examples in the documentation actually runnable. Will add some e2e tests based on the above when it becomes available.
2020-03-25 01:49:55 +00:00
{
"tg-service-check-expose.hcl",
&api.Job{
ID: stringToPtr("group_service_proxy_expose"),
Name: stringToPtr("group_service_proxy_expose"),
connect: enable automatic expose paths for individual group service checks Part of #6120 Building on the support for enabling connect proxy paths in #7323, this change adds the ability to configure the 'service.check.expose' flag on group-level service check definitions for services that are connect-enabled. This is a slight deviation from the "magic" that Consul provides. With Consul, the 'expose' flag exists on the connect.proxy stanza, which will then auto-generate expose paths for every HTTP and gRPC service check associated with that connect-enabled service. A first attempt at providing similar magic for Nomad's Consul Connect integration followed that pattern exactly, as seen in #7396. However, on reviewing the PR we realized having the `expose` flag on the proxy stanza inseperably ties together the automatic path generation with every HTTP/gRPC defined on the service. This makes sense in Consul's context, because a service definition is reasonably associated with a single "task". With Nomad's group level service definitions however, there is a reasonable expectation that a service definition is more abstractly representative of multiple services within the task group. In this case, one would want to define checks of that service which concretely make HTTP or gRPC requests to different underlying tasks. Such a model is not possible with the course `proxy.expose` flag. Instead, we now have the flag made available within the check definitions themselves. By making the expose feature resolute to each check, it is possible to have some HTTP/gRPC checks which make use of the envoy exposed paths, as well as some HTTP/gRPC checks which make use of some orthongonal port-mapping to do checks on some other task (or even some other bound port of the same task) within the task group. Given this example, group "server-group" { network { mode = "bridge" port "forchecks" { to = -1 } } service { name = "myserver" port = 2000 connect { sidecar_service { } } check { name = "mycheck-myserver" type = "http" port = "forchecks" interval = "3s" timeout = "2s" method = "GET" path = "/classic/responder/health" expose = true } } } Nomad will automatically inject (via job endpoint mutator) the extrapolated expose path configuration, i.e. expose { path { path = "/classic/responder/health" protocol = "http" local_path_port = 2000 listener_port = "forchecks" } } Documentation is coming in #7440 (needs updating, doing next) Modifications to the `countdash` examples in https://github.com/hashicorp/demo-consul-101/pull/6 which will make the examples in the documentation actually runnable. Will add some e2e tests based on the above when it becomes available.
2020-03-25 01:49:55 +00:00
TaskGroups: []*api.TaskGroup{{
Name: stringToPtr("group"),
connect: enable automatic expose paths for individual group service checks Part of #6120 Building on the support for enabling connect proxy paths in #7323, this change adds the ability to configure the 'service.check.expose' flag on group-level service check definitions for services that are connect-enabled. This is a slight deviation from the "magic" that Consul provides. With Consul, the 'expose' flag exists on the connect.proxy stanza, which will then auto-generate expose paths for every HTTP and gRPC service check associated with that connect-enabled service. A first attempt at providing similar magic for Nomad's Consul Connect integration followed that pattern exactly, as seen in #7396. However, on reviewing the PR we realized having the `expose` flag on the proxy stanza inseperably ties together the automatic path generation with every HTTP/gRPC defined on the service. This makes sense in Consul's context, because a service definition is reasonably associated with a single "task". With Nomad's group level service definitions however, there is a reasonable expectation that a service definition is more abstractly representative of multiple services within the task group. In this case, one would want to define checks of that service which concretely make HTTP or gRPC requests to different underlying tasks. Such a model is not possible with the course `proxy.expose` flag. Instead, we now have the flag made available within the check definitions themselves. By making the expose feature resolute to each check, it is possible to have some HTTP/gRPC checks which make use of the envoy exposed paths, as well as some HTTP/gRPC checks which make use of some orthongonal port-mapping to do checks on some other task (or even some other bound port of the same task) within the task group. Given this example, group "server-group" { network { mode = "bridge" port "forchecks" { to = -1 } } service { name = "myserver" port = 2000 connect { sidecar_service { } } check { name = "mycheck-myserver" type = "http" port = "forchecks" interval = "3s" timeout = "2s" method = "GET" path = "/classic/responder/health" expose = true } } } Nomad will automatically inject (via job endpoint mutator) the extrapolated expose path configuration, i.e. expose { path { path = "/classic/responder/health" protocol = "http" local_path_port = 2000 listener_port = "forchecks" } } Documentation is coming in #7440 (needs updating, doing next) Modifications to the `countdash` examples in https://github.com/hashicorp/demo-consul-101/pull/6 which will make the examples in the documentation actually runnable. Will add some e2e tests based on the above when it becomes available.
2020-03-25 01:49:55 +00:00
Services: []*api.Service{{
Name: "example",
Connect: &api.ConsulConnect{
SidecarService: &api.ConsulSidecarService{
Proxy: &api.ConsulProxy{},
},
},
Checks: []api.ServiceCheck{{
Name: "example-check1",
Expose: true,
}, {
Name: "example-check2",
Expose: false,
}},
}},
}},
},
false,
},
{
"tg-service-connect-native.hcl",
&api.Job{
ID: stringToPtr("connect_native_service"),
Name: stringToPtr("connect_native_service"),
TaskGroups: []*api.TaskGroup{{
Name: stringToPtr("group"),
Services: []*api.Service{{
Name: "example",
TaskName: "task1",
Connect: &api.ConsulConnect{
Native: true,
},
}},
}},
},
false,
},
client: enable configuring enable_tag_override for services Consul provides a feature of Service Definitions where the tags associated with a service can be modified through the Catalog API, overriding the value(s) configured in the agent's service configuration. To enable this feature, the flag enable_tag_override must be configured in the service definition. Previously, Nomad did not allow configuring this flag, and thus the default value of false was used. Now, it is configurable. Because Nomad itself acts as a state machine around the the service definitions of the tasks it manages, it's worth describing what happens when this feature is enabled and why. Consider the basic case where there is no Nomad, and your service is provided to consul as a boring JSON file. The ultimate source of truth for the definition of that service is the file, and is stored in the agent. Later, Consul performs "anti-entropy" which synchronizes the Catalog (stored only the leaders). Then with enable_tag_override=true, the tags field is available for "external" modification through the Catalog API (rather than directly configuring the service definition file, or using the Agent API). The important observation is that if the service definition ever changes (i.e. the file is changed & config reloaded OR the Agent API is used to modify the service), those "external" tag values are thrown away, and the new service definition is once again the source of truth. In the Nomad case, Nomad itself is the source of truth over the Agent in the same way the JSON file was the source of truth in the example above. That means any time Nomad sets a new service definition, any externally configured tags are going to be replaced. When does this happen? Only on major lifecycle events, for example when a task is modified because of an updated job spec from the 'nomad job run <existing>' command. Otherwise, Nomad's periodic re-sync's with Consul will now no longer try to restore the externally modified tag values (as long as enable_tag_override=true). Fixes #2057
2020-02-07 21:22:19 +00:00
{
"tg-service-enable-tag-override.hcl",
&api.Job{
ID: stringToPtr("group_service_eto"),
Name: stringToPtr("group_service_eto"),
client: enable configuring enable_tag_override for services Consul provides a feature of Service Definitions where the tags associated with a service can be modified through the Catalog API, overriding the value(s) configured in the agent's service configuration. To enable this feature, the flag enable_tag_override must be configured in the service definition. Previously, Nomad did not allow configuring this flag, and thus the default value of false was used. Now, it is configurable. Because Nomad itself acts as a state machine around the the service definitions of the tasks it manages, it's worth describing what happens when this feature is enabled and why. Consider the basic case where there is no Nomad, and your service is provided to consul as a boring JSON file. The ultimate source of truth for the definition of that service is the file, and is stored in the agent. Later, Consul performs "anti-entropy" which synchronizes the Catalog (stored only the leaders). Then with enable_tag_override=true, the tags field is available for "external" modification through the Catalog API (rather than directly configuring the service definition file, or using the Agent API). The important observation is that if the service definition ever changes (i.e. the file is changed & config reloaded OR the Agent API is used to modify the service), those "external" tag values are thrown away, and the new service definition is once again the source of truth. In the Nomad case, Nomad itself is the source of truth over the Agent in the same way the JSON file was the source of truth in the example above. That means any time Nomad sets a new service definition, any externally configured tags are going to be replaced. When does this happen? Only on major lifecycle events, for example when a task is modified because of an updated job spec from the 'nomad job run <existing>' command. Otherwise, Nomad's periodic re-sync's with Consul will now no longer try to restore the externally modified tag values (as long as enable_tag_override=true). Fixes #2057
2020-02-07 21:22:19 +00:00
TaskGroups: []*api.TaskGroup{{
Name: stringToPtr("group"),
client: enable configuring enable_tag_override for services Consul provides a feature of Service Definitions where the tags associated with a service can be modified through the Catalog API, overriding the value(s) configured in the agent's service configuration. To enable this feature, the flag enable_tag_override must be configured in the service definition. Previously, Nomad did not allow configuring this flag, and thus the default value of false was used. Now, it is configurable. Because Nomad itself acts as a state machine around the the service definitions of the tasks it manages, it's worth describing what happens when this feature is enabled and why. Consider the basic case where there is no Nomad, and your service is provided to consul as a boring JSON file. The ultimate source of truth for the definition of that service is the file, and is stored in the agent. Later, Consul performs "anti-entropy" which synchronizes the Catalog (stored only the leaders). Then with enable_tag_override=true, the tags field is available for "external" modification through the Catalog API (rather than directly configuring the service definition file, or using the Agent API). The important observation is that if the service definition ever changes (i.e. the file is changed & config reloaded OR the Agent API is used to modify the service), those "external" tag values are thrown away, and the new service definition is once again the source of truth. In the Nomad case, Nomad itself is the source of truth over the Agent in the same way the JSON file was the source of truth in the example above. That means any time Nomad sets a new service definition, any externally configured tags are going to be replaced. When does this happen? Only on major lifecycle events, for example when a task is modified because of an updated job spec from the 'nomad job run <existing>' command. Otherwise, Nomad's periodic re-sync's with Consul will now no longer try to restore the externally modified tag values (as long as enable_tag_override=true). Fixes #2057
2020-02-07 21:22:19 +00:00
Services: []*api.Service{{
Name: "example",
EnableTagOverride: true,
}},
}},
},
false,
},
2020-01-16 15:44:40 +00:00
{
"tg-scaling-policy.hcl",
&api.Job{
ID: stringToPtr("elastic"),
Name: stringToPtr("elastic"),
2020-01-16 15:44:40 +00:00
TaskGroups: []*api.TaskGroup{
{
Name: stringToPtr("group"),
2020-01-16 15:44:40 +00:00
Scaling: &api.ScalingPolicy{
Type: "horizontal",
Min: int64ToPtr(5),
Max: int64ToPtr(100),
2020-01-16 15:44:40 +00:00
Policy: map[string]interface{}{
"foo": "bar",
"b": true,
"val": 5,
"f": .1,
"check": []map[string]interface{}{
{"foo": []map[string]interface{}{
{"query": "some_query"},
}},
},
2020-01-16 15:44:40 +00:00
},
Enabled: boolToPtr(false),
2020-01-16 15:44:40 +00:00
},
},
},
},
false,
},
{
"task-scaling-policy.hcl",
&api.Job{
ID: stringToPtr("foo"),
Name: stringToPtr("foo"),
TaskGroups: []*api.TaskGroup{
{
Name: stringToPtr("bar"),
Tasks: []*api.Task{
{
Name: "bar",
Driver: "docker",
ScalingPolicies: []*api.ScalingPolicy{
{
Type: "vertical_cpu",
Target: nil,
Min: int64ToPtr(50),
Max: int64ToPtr(1000),
Policy: map[string]interface{}{
"test": "cpu",
},
Enabled: boolToPtr(true),
},
{
Type: "vertical_mem",
Target: nil,
Min: int64ToPtr(128),
Max: int64ToPtr(1024),
Policy: map[string]interface{}{
"test": "mem",
},
Enabled: boolToPtr(false),
},
},
},
},
},
},
},
false,
},
{
"tg-service-connect-gateway-ingress.hcl",
&api.Job{
ID: stringToPtr("connect_gateway_ingress"),
Name: stringToPtr("connect_gateway_ingress"),
TaskGroups: []*api.TaskGroup{{
Name: stringToPtr("group"),
Services: []*api.Service{{
Name: "ingress-gateway-service",
Connect: &api.ConsulConnect{
Gateway: &api.ConsulGateway{
Proxy: &api.ConsulGatewayProxy{
ConnectTimeout: timeToPtr(3 * time.Second),
EnvoyGatewayBindTaggedAddresses: true,
EnvoyGatewayBindAddresses: map[string]*api.ConsulGatewayBindAddress{
"listener1": {Name: "listener1", Address: "10.0.0.1", Port: 8888},
"listener2": {Name: "listener2", Address: "10.0.0.2", Port: 8889},
},
EnvoyGatewayNoDefaultBind: true,
Config: map[string]interface{}{"foo": "bar"},
},
Ingress: &api.ConsulIngressConfigEntry{
TLS: &api.ConsulGatewayTLSConfig{
Enabled: true,
},
Listeners: []*api.ConsulIngressListener{{
Port: 8001,
Protocol: "tcp",
Services: []*api.ConsulIngressService{{
Name: "service1",
Hosts: []string{
"127.0.0.1:8001",
"[::1]:8001",
}}, {
Name: "service2",
Hosts: []string{
"10.0.0.1:8001",
}},
}}, {
Port: 8080,
Protocol: "http",
Services: []*api.ConsulIngressService{{
Name: "nginx",
Hosts: []string{
"2.2.2.2:8080",
},
}},
},
},
},
},
},
}},
}},
},
false,
},
2021-06-22 22:47:00 +00:00
{
"tg-service-connect-gateway-terminating.hcl",
&api.Job{
ID: stringToPtr("connect_gateway_terminating"),
Name: stringToPtr("connect_gateway_terminating"),
TaskGroups: []*api.TaskGroup{{
Name: stringToPtr("group"),
Services: []*api.Service{{
Name: "terminating-gateway-service",
Connect: &api.ConsulConnect{
Gateway: &api.ConsulGateway{
Proxy: &api.ConsulGatewayProxy{
ConnectTimeout: timeToPtr(3 * time.Second),
EnvoyGatewayBindTaggedAddresses: true,
EnvoyGatewayBindAddresses: map[string]*api.ConsulGatewayBindAddress{
"listener1": {Name: "listener1", Address: "10.0.0.1", Port: 8888},
"listener2": {Name: "listener2", Address: "10.0.0.2", Port: 8889},
},
EnvoyGatewayNoDefaultBind: true,
EnvoyDNSDiscoveryType: "LOGICAL_DNS",
2021-06-22 22:47:00 +00:00
Config: map[string]interface{}{"foo": "bar"},
},
Terminating: &api.ConsulTerminatingConfigEntry{
Services: []*api.ConsulLinkedService{{
Name: "service1",
CAFile: "ca.pem",
CertFile: "cert.pem",
KeyFile: "key.pem",
}, {
Name: "service2",
SNI: "myhost",
}},
},
},
},
}},
}},
},
false,
},
2021-07-22 19:57:02 +00:00
{
"tg-service-connect-gateway-mesh.hcl",
&api.Job{
ID: stringToPtr("connect_gateway_mesh"),
Name: stringToPtr("connect_gateway_mesh"),
TaskGroups: []*api.TaskGroup{{
Name: stringToPtr("group"),
Services: []*api.Service{{
Name: "mesh-gateway-service",
Connect: &api.ConsulConnect{
Gateway: &api.ConsulGateway{
Proxy: &api.ConsulGatewayProxy{
Config: map[string]interface{}{"foo": "bar"},
},
Mesh: &api.ConsulMeshConfigEntry{},
},
},
}},
}},
},
false,
},
{
"tg-scaling-policy-minimal.hcl",
&api.Job{
ID: stringToPtr("elastic"),
Name: stringToPtr("elastic"),
TaskGroups: []*api.TaskGroup{
{
Name: stringToPtr("group"),
Scaling: &api.ScalingPolicy{
Type: "horizontal",
Min: nil,
Max: int64ToPtr(10),
Policy: nil,
Enabled: nil,
},
},
},
},
false,
},
{
"tg-scaling-policy-missing-max.hcl",
nil,
true,
},
{
"tg-scaling-policy-multi-policy.hcl",
nil,
true,
},
{
"tg-scaling-policy-with-label.hcl",
nil,
true,
},
{
"tg-scaling-policy-invalid-type.hcl",
nil,
true,
},
{
"task-scaling-policy-missing-name.hcl",
nil,
true,
},
{
"task-scaling-policy-multi-name.hcl",
nil,
true,
},
{
"task-scaling-policy-multi-cpu.hcl",
nil,
true,
},
{
"task-scaling-policy-invalid-type.hcl",
nil,
true,
},
{
"task-scaling-policy-invalid-resource.hcl",
nil,
true,
},
{
"consul-namespace.hcl",
&api.Job{
ID: stringToPtr("consul-namespace"),
Name: stringToPtr("consul-namespace"),
TaskGroups: []*api.TaskGroup{
{
Name: stringToPtr("group"),
Consul: &api.Consul{
Namespace: "foo",
},
},
},
},
false,
},
{
"multiregion.hcl",
&api.Job{
ID: stringToPtr("multiregion_job"),
Name: stringToPtr("multiregion_job"),
Multiregion: &api.Multiregion{
Strategy: &api.MultiregionStrategy{
MaxParallel: intToPtr(1),
OnFailure: stringToPtr("fail_all"),
},
Regions: []*api.MultiregionRegion{
{
Name: "west",
Count: intToPtr(2),
Datacenters: []string{"west-1"},
Meta: map[string]string{"region_code": "W"},
},
{
Name: "east",
Count: intToPtr(1),
Datacenters: []string{"east-1", "east-2"},
Meta: map[string]string{"region_code": "E"},
},
},
},
},
false,
},
2021-07-26 15:25:28 +00:00
{
"resources-cores.hcl",
&api.Job{
ID: stringToPtr("cores-test"),
Name: stringToPtr("cores-test"),
TaskGroups: []*api.TaskGroup{
{
Count: intToPtr(5),
Name: stringToPtr("group"),
Tasks: []*api.Task{
{
Name: "task",
Driver: "docker",
Resources: &api.Resources{
Cores: intToPtr(4),
MemoryMB: intToPtr(128),
},
},
},
},
},
},
false,
},
{
"service-provider.hcl",
&api.Job{
ID: stringToPtr("service-provider"),
Name: stringToPtr("service-provider"),
TaskGroups: []*api.TaskGroup{
{
Count: intToPtr(5),
Name: stringToPtr("group"),
Tasks: []*api.Task{
{
Name: "task",
Driver: "docker",
Services: []*api.Service{
{
Name: "service-provider",
Provider: "nomad",
},
},
},
},
},
},
},
false,
},
2015-09-15 00:43:42 +00:00
}
for _, tc := range cases {
t.Run(tc.File, func(t *testing.T) {
t.Logf("Testing parse: %s", tc.File)
2015-09-15 00:43:42 +00:00
path, err := filepath.Abs(filepath.Join("./test-fixtures", tc.File))
require.NoError(t, err)
actual, err := ParseFile(path)
if tc.Err {
require.Error(t, err)
} else {
require.NoError(t, err)
require.Equal(t, tc.Result, actual)
}
})
2015-09-15 00:43:42 +00:00
}
}
func TestBadPorts(t *testing.T) {
ci.Parallel(t)
path, err := filepath.Abs(filepath.Join("./test-fixtures", "bad-ports.hcl"))
if err != nil {
t.Fatalf("Can't get absolute path for file: %s", err)
}
_, err = ParseFile(path)
2015-11-15 08:10:48 +00:00
if !strings.Contains(err.Error(), errPortLabel.Error()) {
t.Fatalf("\nExpected error\n %s\ngot\n %v", errPortLabel, err)
}
}
func TestOverlappingPorts(t *testing.T) {
ci.Parallel(t)
path, err := filepath.Abs(filepath.Join("./test-fixtures", "overlapping-ports.hcl"))
if err != nil {
2015-11-18 01:12:21 +00:00
t.Fatalf("Can't get absolute path for file: %s", err)
}
_, err = ParseFile(path)
if err == nil {
t.Fatalf("Expected an error")
}
2015-12-18 20:33:38 +00:00
if !strings.Contains(err.Error(), "found a port label collision") {
t.Fatalf("Expected collision error; got %v", err)
}
}
2016-03-10 18:16:35 +00:00
func TestIncorrectKey(t *testing.T) {
ci.Parallel(t)
2016-03-10 18:16:35 +00:00
path, err := filepath.Abs(filepath.Join("./test-fixtures", "basic_wrong_key.hcl"))
if err != nil {
t.Fatalf("Can't get absolute path for file: %s", err)
}
_, err = ParseFile(path)
if err == nil {
t.Fatalf("Expected an error")
}
if !strings.Contains(err.Error(), "* group: 'binsl', task: 'binstore', service (0): 'foo', check -> invalid key: nterval") {
t.Fatalf("Expected key error; got %v", err)
2016-03-10 18:16:35 +00:00
}
}