on_warning=false -> ignore_warnings=false

Treat warnings as unhealthy by default
This commit is contained in:
Michael Schurter 2017-09-10 17:00:25 -07:00
parent 8a87475498
commit a180c00fc3
5 changed files with 37 additions and 35 deletions

View File

@ -82,9 +82,9 @@ func (r *RestartPolicy) Merge(rp *RestartPolicy) {
// CheckRestart describes if and when a task should be restarted based on
// failing health checks.
type CheckRestart struct {
Limit int `mapstructure:"limit"`
Grace time.Duration `mapstructure:"grace_period"`
OnWarning bool `mapstructure:"on_warning"`
Limit int `mapstructure:"limit"`
Grace time.Duration `mapstructure:"grace_period"`
IgnoreWarnings bool `mapstructure:"ignore_warnings"`
}
// The ServiceCheck data model represents the consul health check that

View File

@ -34,12 +34,12 @@ type checkRestart struct {
// remove this checkID (if true only checkID will be set)
remove bool
task TaskRestarter
restartDelay time.Duration
grace time.Duration
interval time.Duration
timeLimit time.Duration
warning bool
task TaskRestarter
restartDelay time.Duration
grace time.Duration
interval time.Duration
timeLimit time.Duration
ignoreWarnings bool
// Mutable fields
@ -61,8 +61,8 @@ func (c *checkRestart) update(now time.Time, status string) {
switch status {
case api.HealthCritical:
case api.HealthWarning:
if !c.warning {
// Warnings are ok, reset state and exit
if c.ignoreWarnings {
// Warnings are ignored, reset state and exit
c.unhealthyStart = time.Time{}
return
}
@ -79,6 +79,8 @@ func (c *checkRestart) update(now time.Time, status string) {
if c.unhealthyStart.IsZero() {
// First failure, set restart deadline
c.logger.Printf("[DEBUG] consul.health: alloc %q task %q check %q became unhealthy. Restarting in %s if not healthy",
c.allocID, c.taskName, c.checkName, c.timeLimit)
c.unhealthyStart = now
}
@ -224,18 +226,18 @@ func (w *checkWatcher) Watch(allocID, taskName, checkID string, check *structs.S
}
c := checkRestart{
allocID: allocID,
taskName: taskName,
checkID: checkID,
checkName: check.Name,
task: restarter,
restartDelay: restarter.RestartDelay(),
interval: check.Interval,
grace: check.CheckRestart.Grace,
graceUntil: time.Now().Add(check.CheckRestart.Grace),
timeLimit: check.Interval * time.Duration(check.CheckRestart.Limit-1),
warning: check.CheckRestart.OnWarning,
logger: w.logger,
allocID: allocID,
taskName: taskName,
checkID: checkID,
checkName: check.Name,
task: restarter,
restartDelay: restarter.RestartDelay(),
interval: check.Interval,
grace: check.CheckRestart.Grace,
graceUntil: time.Now().Add(check.CheckRestart.Grace),
timeLimit: check.Interval * time.Duration(check.CheckRestart.Limit-1),
ignoreWarnings: check.CheckRestart.IgnoreWarnings,
logger: w.logger,
}
select {

View File

@ -687,9 +687,9 @@ func ApiTaskToStructsTask(apiTask *api.Task, structsTask *structs.Task) {
}
if service.CheckRestart != nil {
structsTask.Services[i].CheckRestart = &structs.CheckRestart{
Limit: service.CheckRestart.Limit,
Grace: service.CheckRestart.Grace,
OnWarning: service.CheckRestart.OnWarning,
Limit: service.CheckRestart.Limit,
Grace: service.CheckRestart.Grace,
IgnoreWarnings: service.CheckRestart.IgnoreWarnings,
}
}
@ -713,9 +713,9 @@ func ApiTaskToStructsTask(apiTask *api.Task, structsTask *structs.Task) {
}
if check.CheckRestart != nil {
structsTask.Services[i].Checks[j].CheckRestart = &structs.CheckRestart{
Limit: check.CheckRestart.Limit,
Grace: check.CheckRestart.Grace,
OnWarning: check.CheckRestart.OnWarning,
Limit: check.CheckRestart.Limit,
Grace: check.CheckRestart.Grace,
IgnoreWarnings: check.CheckRestart.IgnoreWarnings,
}
}
}

View File

@ -1063,7 +1063,7 @@ func parseCheckRestart(cro *ast.ObjectItem) (*api.CheckRestart, error) {
valid := []string{
"limit",
"grace_period",
"on_warning",
"ignore_warnings",
}
if err := checkHCLKeys(cro.Val, valid); err != nil {

View File

@ -2760,9 +2760,9 @@ func (tg *TaskGroup) GoString() string {
// CheckRestart describes if and when a task should be restarted based on
// failing health checks.
type CheckRestart struct {
Limit int // Restart task after this many unhealthy intervals
Grace time.Duration // Grace time to give tasks after starting to get healthy
OnWarning bool // If true treat checks in `warning` as unhealthy
Limit int // Restart task after this many unhealthy intervals
Grace time.Duration // Grace time to give tasks after starting to get healthy
IgnoreWarnings bool // If true treat checks in `warning` as passing
}
func (c *CheckRestart) Copy() *CheckRestart {
@ -2798,8 +2798,8 @@ func (c *CheckRestart) Merge(o *CheckRestart) *CheckRestart {
nc.Grace = o.Grace
}
if !nc.OnWarning {
nc.OnWarning = o.OnWarning
if nc.IgnoreWarnings {
nc.IgnoreWarnings = o.IgnoreWarnings
}
return nc