Backport of feature: Add new field render_templates on restart block into release/1.6.x (#18094)

This pull request was automerged via backport-assistant
This commit is contained in:
hc-github-team-nomad-core 2023-07-28 13:54:00 -05:00 committed by GitHub
parent bebed09677
commit 2ed92e0c6c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
21 changed files with 314 additions and 166 deletions

3
.changelog/18054.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:improvement
jobspec: Add new parameter `render_templates` for `restart` block to allow explicit re-render of templates on task restart. The default value is `false` and is fully backward compatible
```

View File

@ -320,10 +320,11 @@ func TestJobs_Canonicalize(t *testing.T) {
SizeMB: pointerOf(300),
},
RestartPolicy: &RestartPolicy{
Delay: pointerOf(15 * time.Second),
Attempts: pointerOf(2),
Interval: pointerOf(30 * time.Minute),
Mode: pointerOf("fail"),
Delay: pointerOf(15 * time.Second),
Attempts: pointerOf(2),
Interval: pointerOf(30 * time.Minute),
Mode: pointerOf("fail"),
RenderTemplates: pointerOf(false),
},
ReschedulePolicy: &ReschedulePolicy{
Attempts: pointerOf(0),
@ -405,10 +406,11 @@ func TestJobs_Canonicalize(t *testing.T) {
SizeMB: pointerOf(300),
},
RestartPolicy: &RestartPolicy{
Delay: pointerOf(15 * time.Second),
Attempts: pointerOf(3),
Interval: pointerOf(24 * time.Hour),
Mode: pointerOf("fail"),
Delay: pointerOf(15 * time.Second),
Attempts: pointerOf(3),
Interval: pointerOf(24 * time.Hour),
Mode: pointerOf("fail"),
RenderTemplates: pointerOf(false),
},
ReschedulePolicy: &ReschedulePolicy{
Attempts: pointerOf(1),
@ -495,10 +497,11 @@ func TestJobs_Canonicalize(t *testing.T) {
SizeMB: pointerOf(300),
},
RestartPolicy: &RestartPolicy{
Delay: pointerOf(15 * time.Second),
Attempts: pointerOf(2),
Interval: pointerOf(30 * time.Minute),
Mode: pointerOf("fail"),
Delay: pointerOf(15 * time.Second),
Attempts: pointerOf(2),
Interval: pointerOf(30 * time.Minute),
Mode: pointerOf("fail"),
RenderTemplates: pointerOf(false),
},
ReschedulePolicy: &ReschedulePolicy{
Attempts: pointerOf(0),
@ -663,10 +666,11 @@ func TestJobs_Canonicalize(t *testing.T) {
Name: pointerOf("cache"),
Count: pointerOf(1),
RestartPolicy: &RestartPolicy{
Interval: pointerOf(5 * time.Minute),
Attempts: pointerOf(10),
Delay: pointerOf(25 * time.Second),
Mode: pointerOf("delay"),
Interval: pointerOf(5 * time.Minute),
Attempts: pointerOf(10),
Delay: pointerOf(25 * time.Second),
Mode: pointerOf("delay"),
RenderTemplates: pointerOf(false),
},
ReschedulePolicy: &ReschedulePolicy{
Attempts: pointerOf(0),
@ -707,10 +711,11 @@ func TestJobs_Canonicalize(t *testing.T) {
}},
},
RestartPolicy: &RestartPolicy{
Interval: pointerOf(5 * time.Minute),
Attempts: pointerOf(20),
Delay: pointerOf(25 * time.Second),
Mode: pointerOf("delay"),
Interval: pointerOf(5 * time.Minute),
Attempts: pointerOf(20),
Delay: pointerOf(25 * time.Second),
Mode: pointerOf("delay"),
RenderTemplates: pointerOf(false),
},
Resources: &Resources{
CPU: pointerOf(500),
@ -835,7 +840,6 @@ func TestJobs_Canonicalize(t *testing.T) {
},
},
},
{
name: "update_merge",
input: &Job{
@ -928,10 +932,11 @@ func TestJobs_Canonicalize(t *testing.T) {
SizeMB: pointerOf(300),
},
RestartPolicy: &RestartPolicy{
Delay: pointerOf(15 * time.Second),
Attempts: pointerOf(2),
Interval: pointerOf(30 * time.Minute),
Mode: pointerOf("fail"),
Delay: pointerOf(15 * time.Second),
Attempts: pointerOf(2),
Interval: pointerOf(30 * time.Minute),
Mode: pointerOf("fail"),
RenderTemplates: pointerOf(false),
},
ReschedulePolicy: &ReschedulePolicy{
Attempts: pointerOf(0),
@ -975,10 +980,11 @@ func TestJobs_Canonicalize(t *testing.T) {
SizeMB: pointerOf(300),
},
RestartPolicy: &RestartPolicy{
Delay: pointerOf(15 * time.Second),
Attempts: pointerOf(2),
Interval: pointerOf(30 * time.Minute),
Mode: pointerOf("fail"),
Delay: pointerOf(15 * time.Second),
Attempts: pointerOf(2),
Interval: pointerOf(30 * time.Minute),
Mode: pointerOf("fail"),
RenderTemplates: pointerOf(false),
},
ReschedulePolicy: &ReschedulePolicy{
Attempts: pointerOf(0),
@ -1016,7 +1022,6 @@ func TestJobs_Canonicalize(t *testing.T) {
},
},
},
{
name: "restart_merge",
input: &Job{
@ -1036,8 +1041,9 @@ func TestJobs_Canonicalize(t *testing.T) {
{
Name: "task1",
RestartPolicy: &RestartPolicy{
Attempts: pointerOf(5),
Delay: pointerOf(1 * time.Second),
Attempts: pointerOf(5),
Delay: pointerOf(1 * time.Second),
RenderTemplates: pointerOf(true),
},
},
},
@ -1105,10 +1111,11 @@ func TestJobs_Canonicalize(t *testing.T) {
SizeMB: pointerOf(300),
},
RestartPolicy: &RestartPolicy{
Delay: pointerOf(15 * time.Second),
Attempts: pointerOf(2),
Interval: pointerOf(30 * time.Minute),
Mode: pointerOf("fail"),
Delay: pointerOf(15 * time.Second),
Attempts: pointerOf(2),
Interval: pointerOf(30 * time.Minute),
Mode: pointerOf("fail"),
RenderTemplates: pointerOf(false),
},
ReschedulePolicy: &ReschedulePolicy{
Attempts: pointerOf(0),
@ -1140,10 +1147,11 @@ func TestJobs_Canonicalize(t *testing.T) {
Resources: DefaultResources(),
KillTimeout: pointerOf(5 * time.Second),
RestartPolicy: &RestartPolicy{
Attempts: pointerOf(5),
Delay: pointerOf(1 * time.Second),
Interval: pointerOf(30 * time.Minute),
Mode: pointerOf("fail"),
Attempts: pointerOf(5),
Delay: pointerOf(1 * time.Second),
Interval: pointerOf(30 * time.Minute),
Mode: pointerOf("fail"),
RenderTemplates: pointerOf(true),
},
},
},
@ -1157,10 +1165,11 @@ func TestJobs_Canonicalize(t *testing.T) {
SizeMB: pointerOf(300),
},
RestartPolicy: &RestartPolicy{
Delay: pointerOf(20 * time.Second),
Attempts: pointerOf(2),
Interval: pointerOf(30 * time.Minute),
Mode: pointerOf("fail"),
Delay: pointerOf(20 * time.Second),
Attempts: pointerOf(2),
Interval: pointerOf(30 * time.Minute),
Mode: pointerOf("fail"),
RenderTemplates: pointerOf(false),
},
ReschedulePolicy: &ReschedulePolicy{
Attempts: pointerOf(0),
@ -1192,10 +1201,11 @@ func TestJobs_Canonicalize(t *testing.T) {
Resources: DefaultResources(),
KillTimeout: pointerOf(5 * time.Second),
RestartPolicy: &RestartPolicy{
Delay: pointerOf(20 * time.Second),
Attempts: pointerOf(2),
Interval: pointerOf(30 * time.Minute),
Mode: pointerOf("fail"),
Delay: pointerOf(20 * time.Second),
Attempts: pointerOf(2),
Interval: pointerOf(30 * time.Minute),
Mode: pointerOf("fail"),
RenderTemplates: pointerOf(false),
},
},
},

View File

@ -88,10 +88,11 @@ type AllocCheckStatuses map[string]AllocCheckStatus
// RestartPolicy defines how the Nomad client restarts
// tasks in a taskgroup when they fail
type RestartPolicy struct {
Interval *time.Duration `hcl:"interval,optional"`
Attempts *int `hcl:"attempts,optional"`
Delay *time.Duration `hcl:"delay,optional"`
Mode *string `hcl:"mode,optional"`
Interval *time.Duration `hcl:"interval,optional"`
Attempts *int `hcl:"attempts,optional"`
Delay *time.Duration `hcl:"delay,optional"`
Mode *string `hcl:"mode,optional"`
RenderTemplates *bool `mapstructure:"render_templates" hcl:"render_templates,optional"`
}
func (r *RestartPolicy) Merge(rp *RestartPolicy) {
@ -107,6 +108,9 @@ func (r *RestartPolicy) Merge(rp *RestartPolicy) {
if rp.Mode != nil {
r.Mode = rp.Mode
}
if rp.RenderTemplates != nil {
r.RenderTemplates = rp.RenderTemplates
}
}
// Reschedule configures how Tasks are rescheduled when they crash or fail.
@ -580,10 +584,11 @@ func (g *TaskGroup) Canonicalize(job *Job) {
// in nomad/structs/structs.go
func defaultServiceJobRestartPolicy() *RestartPolicy {
return &RestartPolicy{
Delay: pointerOf(15 * time.Second),
Attempts: pointerOf(2),
Interval: pointerOf(30 * time.Minute),
Mode: pointerOf(RestartPolicyModeFail),
Delay: pointerOf(15 * time.Second),
Attempts: pointerOf(2),
Interval: pointerOf(30 * time.Minute),
Mode: pointerOf(RestartPolicyModeFail),
RenderTemplates: pointerOf(false),
}
}
@ -591,10 +596,11 @@ func defaultServiceJobRestartPolicy() *RestartPolicy {
// in nomad/structs/structs.go
func defaultBatchJobRestartPolicy() *RestartPolicy {
return &RestartPolicy{
Delay: pointerOf(15 * time.Second),
Attempts: pointerOf(3),
Interval: pointerOf(24 * time.Hour),
Mode: pointerOf(RestartPolicyModeFail),
Delay: pointerOf(15 * time.Second),
Attempts: pointerOf(3),
Interval: pointerOf(24 * time.Hour),
Mode: pointerOf(RestartPolicyModeFail),
RenderTemplates: pointerOf(false),
}
}

View File

@ -111,14 +111,15 @@ func (tr *TaskRunner) initHooks() {
// If there are templates is enabled, add the hook
if len(task.Templates) != 0 {
tr.runnerHooks = append(tr.runnerHooks, newTemplateHook(&templateHookConfig{
logger: hookLogger,
lifecycle: tr,
events: tr,
templates: task.Templates,
clientConfig: tr.clientConfig,
envBuilder: tr.envBuilder,
consulNamespace: consulNamespace,
nomadNamespace: tr.alloc.Job.Namespace,
logger: hookLogger,
lifecycle: tr,
events: tr,
templates: task.Templates,
clientConfig: tr.clientConfig,
envBuilder: tr.envBuilder,
consulNamespace: consulNamespace,
nomadNamespace: tr.alloc.Job.Namespace,
renderOnTaskRestart: task.RestartPolicy.RenderTemplates,
}))
}

View File

@ -45,6 +45,9 @@ type templateHookConfig struct {
// nomadNamespace is the job's Nomad namespace
nomadNamespace string
// renderOnTaskRestart is flag to explicitly render templates on task restart
renderOnTaskRestart bool
}
type templateHook struct {
@ -97,7 +100,12 @@ func (h *templateHook) Prestart(ctx context.Context, req *interfaces.TaskPrestar
// If we have already run prerun before exit early.
if h.templateManager != nil {
return nil
if !h.config.renderOnTaskRestart {
return nil
}
h.logger.Info("re-rendering templates on task restart")
h.templateManager.Stop()
h.templateManager = nil
}
// Store the current Vault token and the task directory

View File

@ -1058,10 +1058,11 @@ func ApiTgToStructsTG(job *structs.Job, taskGroup *api.TaskGroup, tg *structs.Ta
tg.Consul = apiConsulToStructs(taskGroup.Consul)
tg.RestartPolicy = &structs.RestartPolicy{
Attempts: *taskGroup.RestartPolicy.Attempts,
Interval: *taskGroup.RestartPolicy.Interval,
Delay: *taskGroup.RestartPolicy.Delay,
Mode: *taskGroup.RestartPolicy.Mode,
Attempts: *taskGroup.RestartPolicy.Attempts,
Interval: *taskGroup.RestartPolicy.Interval,
Delay: *taskGroup.RestartPolicy.Delay,
Mode: *taskGroup.RestartPolicy.Mode,
RenderTemplates: *taskGroup.RestartPolicy.RenderTemplates,
}
if taskGroup.ShutdownDelay != nil {
@ -1209,10 +1210,11 @@ func ApiTaskToStructsTask(job *structs.Job, group *structs.TaskGroup,
if apiTask.RestartPolicy != nil {
structsTask.RestartPolicy = &structs.RestartPolicy{
Attempts: *apiTask.RestartPolicy.Attempts,
Interval: *apiTask.RestartPolicy.Interval,
Delay: *apiTask.RestartPolicy.Delay,
Mode: *apiTask.RestartPolicy.Mode,
Attempts: *apiTask.RestartPolicy.Attempts,
Interval: *apiTask.RestartPolicy.Interval,
Delay: *apiTask.RestartPolicy.Delay,
Mode: *apiTask.RestartPolicy.Mode,
RenderTemplates: *apiTask.RestartPolicy.RenderTemplates,
}
}

View File

@ -2518,10 +2518,11 @@ func TestJobs_ApiJobToStructsJob(t *testing.T) {
},
},
RestartPolicy: &api.RestartPolicy{
Interval: pointer.Of(1 * time.Second),
Attempts: pointer.Of(5),
Delay: pointer.Of(10 * time.Second),
Mode: pointer.Of("delay"),
Interval: pointer.Of(1 * time.Second),
Attempts: pointer.Of(5),
Delay: pointer.Of(10 * time.Second),
Mode: pointer.Of("delay"),
RenderTemplates: pointer.Of(false),
},
ReschedulePolicy: &api.ReschedulePolicy{
Interval: pointer.Of(12 * time.Hour),
@ -2661,10 +2662,11 @@ func TestJobs_ApiJobToStructsJob(t *testing.T) {
},
},
RestartPolicy: &api.RestartPolicy{
Interval: pointer.Of(2 * time.Second),
Attempts: pointer.Of(10),
Delay: pointer.Of(20 * time.Second),
Mode: pointer.Of("delay"),
Interval: pointer.Of(2 * time.Second),
Attempts: pointer.Of(10),
Delay: pointer.Of(20 * time.Second),
Mode: pointer.Of("delay"),
RenderTemplates: pointer.Of(false),
},
Services: []*api.Service{
{
@ -2927,10 +2929,11 @@ func TestJobs_ApiJobToStructsJob(t *testing.T) {
},
},
RestartPolicy: &structs.RestartPolicy{
Interval: 1 * time.Second,
Attempts: 5,
Delay: 10 * time.Second,
Mode: "delay",
Interval: 1 * time.Second,
Attempts: 5,
Delay: 10 * time.Second,
Mode: "delay",
RenderTemplates: false,
},
Spreads: []*structs.Spread{
{
@ -3075,10 +3078,11 @@ func TestJobs_ApiJobToStructsJob(t *testing.T) {
},
},
RestartPolicy: &structs.RestartPolicy{
Interval: 2 * time.Second,
Attempts: 10,
Delay: 20 * time.Second,
Mode: "delay",
Interval: 2 * time.Second,
Attempts: 10,
Delay: 20 * time.Second,
Mode: "delay",
RenderTemplates: false,
},
Services: []*structs.Service{
{
@ -3283,10 +3287,11 @@ func TestJobs_ApiJobToStructsJob(t *testing.T) {
},
},
RestartPolicy: &api.RestartPolicy{
Interval: pointer.Of(1 * time.Second),
Attempts: pointer.Of(5),
Delay: pointer.Of(10 * time.Second),
Mode: pointer.Of("delay"),
Interval: pointer.Of(1 * time.Second),
Attempts: pointer.Of(5),
Delay: pointer.Of(10 * time.Second),
Mode: pointer.Of("delay"),
RenderTemplates: pointer.Of(false),
},
EphemeralDisk: &api.EphemeralDisk{
SizeMB: pointer.Of(100),
@ -3404,10 +3409,11 @@ func TestJobs_ApiJobToStructsJob(t *testing.T) {
},
},
RestartPolicy: &structs.RestartPolicy{
Interval: 1 * time.Second,
Attempts: 5,
Delay: 10 * time.Second,
Mode: "delay",
Interval: 1 * time.Second,
Attempts: 5,
Delay: 10 * time.Second,
Mode: "delay",
RenderTemplates: false,
},
EphemeralDisk: &structs.EphemeralDisk{
SizeMB: 100,
@ -3462,10 +3468,11 @@ func TestJobs_ApiJobToStructsJob(t *testing.T) {
},
},
RestartPolicy: &structs.RestartPolicy{
Interval: 1 * time.Second,
Attempts: 5,
Delay: 10 * time.Second,
Mode: "delay",
Interval: 1 * time.Second,
Attempts: 5,
Delay: 10 * time.Second,
Mode: "delay",
RenderTemplates: false,
},
Meta: map[string]string{
"lol": "code",

View File

@ -35,10 +35,11 @@ func MockJob() *api.Job {
SizeMB: pointer.Of(150),
},
RestartPolicy: &api.RestartPolicy{
Attempts: pointer.Of(3),
Interval: pointer.Of(10 * time.Minute),
Delay: pointer.Of(1 * time.Minute),
Mode: pointer.Of("delay"),
Attempts: pointer.Of(3),
Interval: pointer.Of(10 * time.Minute),
Delay: pointer.Of(1 * time.Minute),
Mode: pointer.Of("delay"),
RenderTemplates: pointer.Of(false),
},
Networks: []*api.NetworkResource{
{

View File

@ -224,9 +224,10 @@ const (
resources {}
}
restart {
attempts = 10
mode = "delay"
interval = "15s"
attempts = 10
mode = "delay"
interval = "15s"
render_templates = false
}
}
}`
@ -243,9 +244,10 @@ var (
Name: pointer.Of("group1"),
Count: pointer.Of(1),
RestartPolicy: &api.RestartPolicy{
Attempts: pointer.Of(10),
Interval: pointer.Of(15 * time.Second),
Mode: pointer.Of("delay"),
Attempts: pointer.Of(10),
Interval: pointer.Of(15 * time.Second),
Mode: pointer.Of("delay"),
RenderTemplates: pointer.Of(false),
},
Tasks: []*api.Task{

View File

@ -321,6 +321,7 @@ func parseRestartPolicy(final **api.RestartPolicy, list *ast.ObjectList) error {
"interval",
"delay",
"mode",
"render_templates",
}
if err := checkHCLKeys(obj.Val, valid); err != nil {
return err

View File

@ -193,10 +193,11 @@ func TestParse(t *testing.T) {
"elb_checks": "3",
},
RestartPolicy: &api.RestartPolicy{
Interval: timeToPtr(10 * time.Minute),
Attempts: intToPtr(5),
Delay: timeToPtr(15 * time.Second),
Mode: stringToPtr("delay"),
Interval: timeToPtr(10 * time.Minute),
Attempts: intToPtr(5),
Delay: timeToPtr(15 * time.Second),
Mode: stringToPtr("delay"),
RenderTemplates: boolToPtr(false),
},
Spreads: []*api.Spread{
{

View File

@ -102,10 +102,11 @@ job "binstore-storagelocker" {
}
restart {
attempts = 5
interval = "10m"
delay = "15s"
mode = "delay"
attempts = 5
interval = "10m"
delay = "15s"
mode = "delay"
render_templates = false
}
reschedule {

View File

@ -1086,3 +1086,21 @@ func TestErrMissingKey(t *testing.T) {
require.NotNil(t, tmpl.ErrMissingKey)
require.True(t, *tmpl.ErrMissingKey)
}
func TestRestartRenderTemplates(t *testing.T) {
ci.Parallel(t)
hclBytes, err := os.ReadFile("test-fixtures/restart-render-templates.hcl")
require.NoError(t, err)
job, err := ParseWithConfig(&ParseConfig{
Path: "test-fixtures/restart-render-templates.hcl",
Body: hclBytes,
AllowFS: false,
})
require.NoError(t, err)
tg := job.TaskGroups[0]
require.NotNil(t, tg.RestartPolicy)
require.True(t, *tg.RestartPolicy.RenderTemplates)
require.Nil(t, tg.Tasks[0].RestartPolicy)
require.False(t, *tg.Tasks[1].RestartPolicy.RenderTemplates)
}

View File

@ -0,0 +1,17 @@
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: MPL-2.0
job "example" {
group "group" {
restart {
render_templates = true
}
task "foo" {
}
task "bar" {
restart {
render_templates = false
}
}
}
}

View File

@ -44,10 +44,11 @@ func Job() *structs.Job {
SizeMB: 150,
},
RestartPolicy: &structs.RestartPolicy{
Attempts: 3,
Interval: 10 * time.Minute,
Delay: 1 * time.Minute,
Mode: structs.RestartPolicyModeDelay,
Attempts: 3,
Interval: 10 * time.Minute,
Delay: 1 * time.Minute,
Mode: structs.RestartPolicyModeDelay,
RenderTemplates: false,
},
ReschedulePolicy: &structs.ReschedulePolicy{
Attempts: 2,

View File

@ -1861,6 +1861,12 @@ func TestTaskGroupDiff(t *testing.T) {
Old: "",
New: "fail",
},
{
Type: DiffTypeAdded,
Name: "RenderTemplates",
Old: "",
New: "false",
},
},
},
},
@ -1908,6 +1914,12 @@ func TestTaskGroupDiff(t *testing.T) {
Old: "fail",
New: "",
},
{
Type: DiffTypeDeleted,
Name: "RenderTemplates",
Old: "false",
New: "",
},
},
},
},
@ -1925,10 +1937,11 @@ func TestTaskGroupDiff(t *testing.T) {
},
New: &TaskGroup{
RestartPolicy: &RestartPolicy{
Attempts: 2,
Interval: 2 * time.Second,
Delay: 2 * time.Second,
Mode: "delay",
Attempts: 2,
Interval: 2 * time.Second,
Delay: 2 * time.Second,
Mode: "delay",
RenderTemplates: true,
},
},
Expected: &TaskGroupDiff{
@ -1962,6 +1975,12 @@ func TestTaskGroupDiff(t *testing.T) {
Old: "fail",
New: "delay",
},
{
Type: DiffTypeEdited,
Name: "RenderTemplates",
Old: "false",
New: "true",
},
},
},
},
@ -1972,18 +1991,20 @@ func TestTaskGroupDiff(t *testing.T) {
Contextual: true,
Old: &TaskGroup{
RestartPolicy: &RestartPolicy{
Attempts: 1,
Interval: 1 * time.Second,
Delay: 1 * time.Second,
Mode: "fail",
Attempts: 1,
Interval: 1 * time.Second,
Delay: 1 * time.Second,
Mode: "fail",
RenderTemplates: false,
},
},
New: &TaskGroup{
RestartPolicy: &RestartPolicy{
Attempts: 2,
Interval: 2 * time.Second,
Delay: 1 * time.Second,
Mode: "fail",
Attempts: 2,
Interval: 2 * time.Second,
Delay: 1 * time.Second,
Mode: "fail",
RenderTemplates: true,
},
},
Expected: &TaskGroupDiff{
@ -2017,6 +2038,12 @@ func TestTaskGroupDiff(t *testing.T) {
Old: "fail",
New: "fail",
},
{
Type: DiffTypeEdited,
Name: "RenderTemplates",
Old: "false",
New: "true",
},
},
},
},

View File

@ -5865,16 +5865,18 @@ var (
// Canonicalize in api/tasks.go
DefaultServiceJobRestartPolicy = RestartPolicy{
Delay: 15 * time.Second,
Attempts: 2,
Interval: 30 * time.Minute,
Mode: RestartPolicyModeFail,
Delay: 15 * time.Second,
Attempts: 2,
Interval: 30 * time.Minute,
Mode: RestartPolicyModeFail,
RenderTemplates: false,
}
DefaultBatchJobRestartPolicy = RestartPolicy{
Delay: 15 * time.Second,
Attempts: 3,
Interval: 24 * time.Hour,
Mode: RestartPolicyModeFail,
Delay: 15 * time.Second,
Attempts: 3,
Interval: 24 * time.Hour,
Mode: RestartPolicyModeFail,
RenderTemplates: false,
}
)
@ -6207,6 +6209,9 @@ type RestartPolicy struct {
// Mode controls what happens when the task restarts more than attempt times
// in an interval.
Mode string
// RenderTemplates is flag to explicitly render all templates on task restart
RenderTemplates bool
}
func (r *RestartPolicy) Copy() *RestartPolicy {

View File

@ -4307,9 +4307,10 @@ func TestRestartPolicy_Validate(t *testing.T) {
// Policy with acceptable restart options passes
p := &RestartPolicy{
Mode: RestartPolicyModeFail,
Attempts: 0,
Interval: 5 * time.Second,
Mode: RestartPolicyModeFail,
Attempts: 0,
Interval: 5 * time.Second,
RenderTemplates: true,
}
if err := p.Validate(); err != nil {
t.Fatalf("err: %v", err)

View File

@ -258,6 +258,12 @@ func tasksUpdated(jobA, jobB *structs.Job, taskGroup string) comparison {
return difference("volume request", a.Volumes, b.Volumes)
}
// Check if restart.render_templates is updated
// this requires a destructive update for template hook to receive the new config
if a.RestartPolicy.RenderTemplates != b.RestartPolicy.RenderTemplates {
return difference("group restart render_templates", a.RestartPolicy.RenderTemplates, b.RestartPolicy.RenderTemplates)
}
// Check each task
for _, at := range a.Tasks {
bt := b.LookupTask(at.Name)
@ -319,6 +325,11 @@ func tasksUpdated(jobA, jobB *structs.Job, taskGroup string) comparison {
if at.LogConfig.Disabled != bt.LogConfig.Disabled {
return difference("task log disabled", at.LogConfig.Disabled, bt.LogConfig.Disabled)
}
// Check if restart.render_templates is updated
if at.RestartPolicy.RenderTemplates != bt.RestartPolicy.RenderTemplates {
return difference("task restart render_templates", at.RestartPolicy.RenderTemplates, bt.RestartPolicy.RenderTemplates)
}
}
// none of the fields that trigger a destructive update were modified,

View File

@ -1407,3 +1407,19 @@ func TestUtil_UpdateNonTerminalAllocsToLost(t *testing.T) {
expected = []string{}
require.True(t, reflect.DeepEqual(allocsLost, expected), "actual: %v, expected: %v", allocsLost, expected)
}
func TestTaskGroupUpdated_Restart(t *testing.T) {
ci.Parallel(t)
j1 := mock.Job()
name := j1.TaskGroups[0].Name
j2 := j1.Copy()
j3 := j1.Copy()
must.False(t, tasksUpdated(j1, j2, name).modified)
j2.TaskGroups[0].RestartPolicy.RenderTemplates = true
must.True(t, tasksUpdated(j1, j2, name).modified)
j3.TaskGroups[0].Tasks[0].RestartPolicy.RenderTemplates = true
must.True(t, tasksUpdated(j1, j3, name).modified)
}

View File

@ -36,10 +36,11 @@ For example, assuming that the task group restart policy is:
```hcl
restart {
interval = "30m"
attempts = 2
delay = "15s"
mode = "fail"
interval = "30m"
attempts = 2
delay = "15s"
mode = "fail"
render_templates = true
}
```
@ -55,10 +56,11 @@ then the effective restart policy for the task will be:
```hcl
restart {
interval = "30m"
attempts = 5
delay = "15s"
mode = "fail"
interval = "30m"
attempts = 5
delay = "15s"
mode = "fail"
render_templates = true
}
```
@ -86,6 +88,11 @@ level, so that the Connect sidecar can inherit the default `restart`.
than `attempts` times in an interval. For a detailed explanation of these
values and their behavior, please see the [mode values section](#mode-values).
- `render_templates` `(bool: false)` - Specifies whether to re-render all
templates when a task is restarted. If set to `true`, all templates will be re-rendered
when the task restarts. This can be useful for re-fetching Vault secrets, even if the
lease on the existing secrets has not yet expired.
### `restart` Parameter Defaults
The values for many of the `restart` parameters vary by job type. Here are the
@ -95,10 +102,11 @@ defaults by job type:
```hcl
restart {
attempts = 3
delay = "15s"
interval = "24h"
mode = "fail"
attempts = 3
delay = "15s"
interval = "24h"
mode = "fail"
render_templates = false
}
```
@ -106,10 +114,11 @@ defaults by job type:
```hcl
restart {
interval = "30m"
attempts = 2
delay = "15s"
mode = "fail"
interval = "30m"
attempts = 2
delay = "15s"
mode = "fail"
render_templates = false
}
```