2017-08-26 05:40:18 +00:00
|
|
|
package consul
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"fmt"
|
|
|
|
"time"
|
|
|
|
|
2018-09-13 17:43:40 +00:00
|
|
|
log "github.com/hashicorp/go-hclog"
|
|
|
|
|
2017-08-26 05:40:18 +00:00
|
|
|
"github.com/hashicorp/consul/api"
|
|
|
|
"github.com/hashicorp/nomad/nomad/structs"
|
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
|
|
|
// defaultPollFreq is the default rate to poll the Consul Checks API
|
|
|
|
defaultPollFreq = 900 * time.Millisecond
|
|
|
|
)
|
|
|
|
|
2017-09-11 00:22:03 +00:00
|
|
|
// ChecksAPI is the part of the Consul API the checkWatcher requires.
|
|
|
|
type ChecksAPI interface {
|
|
|
|
// Checks returns a list of all checks.
|
2017-08-26 05:40:18 +00:00
|
|
|
Checks() (map[string]*api.AgentCheck, error)
|
|
|
|
}
|
|
|
|
|
2017-09-14 05:42:43 +00:00
|
|
|
// TaskRestarter allows the checkWatcher to restart tasks.
|
2017-08-26 05:40:18 +00:00
|
|
|
type TaskRestarter interface {
|
2018-07-20 00:40:25 +00:00
|
|
|
Restart(ctx context.Context, event *structs.TaskEvent, failure bool) error
|
2017-08-26 05:40:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// checkRestart handles restarting a task if a check is unhealthy.
|
|
|
|
type checkRestart struct {
|
|
|
|
allocID string
|
|
|
|
taskName string
|
|
|
|
checkID string
|
|
|
|
checkName string
|
2017-09-14 06:13:34 +00:00
|
|
|
taskKey string // composite of allocID + taskName for uniqueness
|
2017-08-26 05:40:18 +00:00
|
|
|
|
2017-09-11 00:00:25 +00:00
|
|
|
task TaskRestarter
|
|
|
|
grace time.Duration
|
|
|
|
interval time.Duration
|
|
|
|
timeLimit time.Duration
|
|
|
|
ignoreWarnings bool
|
2017-09-10 23:25:13 +00:00
|
|
|
|
|
|
|
// Mutable fields
|
2017-08-26 05:40:18 +00:00
|
|
|
|
2017-09-14 17:36:32 +00:00
|
|
|
// unhealthyState is the time a check first went unhealthy. Set to the
|
2017-08-26 05:40:18 +00:00
|
|
|
// zero value if the check passes before timeLimit.
|
2017-09-14 17:36:32 +00:00
|
|
|
unhealthyState time.Time
|
2017-08-26 05:40:18 +00:00
|
|
|
|
2017-09-10 23:25:13 +00:00
|
|
|
// graceUntil is when the check's grace period expires and unhealthy
|
|
|
|
// checks should be counted.
|
|
|
|
graceUntil time.Time
|
|
|
|
|
2018-09-13 17:43:40 +00:00
|
|
|
logger log.Logger
|
2017-08-26 05:40:18 +00:00
|
|
|
}
|
|
|
|
|
2018-03-11 17:52:32 +00:00
|
|
|
// apply restart state for check and restart task if necessary. Current
|
2017-08-26 05:40:18 +00:00
|
|
|
// timestamp is passed in so all check updates have the same view of time (and
|
|
|
|
// to ease testing).
|
2017-09-14 05:42:43 +00:00
|
|
|
//
|
|
|
|
// Returns true if a restart was triggered in which case this check should be
|
|
|
|
// removed (checks are added on task startup).
|
2018-07-20 00:40:25 +00:00
|
|
|
func (c *checkRestart) apply(ctx context.Context, now time.Time, status string) bool {
|
2017-09-13 06:15:46 +00:00
|
|
|
healthy := func() {
|
2017-09-14 17:36:32 +00:00
|
|
|
if !c.unhealthyState.IsZero() {
|
2018-09-13 17:43:40 +00:00
|
|
|
c.logger.Debug("canceling restart because check became healthy")
|
2017-09-14 17:36:32 +00:00
|
|
|
c.unhealthyState = time.Time{}
|
2017-09-13 06:15:46 +00:00
|
|
|
}
|
|
|
|
}
|
2017-08-26 05:40:18 +00:00
|
|
|
switch status {
|
|
|
|
case api.HealthCritical:
|
|
|
|
case api.HealthWarning:
|
2017-09-11 00:00:25 +00:00
|
|
|
if c.ignoreWarnings {
|
|
|
|
// Warnings are ignored, reset state and exit
|
2017-09-13 06:15:46 +00:00
|
|
|
healthy()
|
2017-09-14 05:42:43 +00:00
|
|
|
return false
|
2017-08-26 05:40:18 +00:00
|
|
|
}
|
|
|
|
default:
|
|
|
|
// All other statuses are ok, reset state and exit
|
2017-09-13 06:15:46 +00:00
|
|
|
healthy()
|
2017-09-14 05:42:43 +00:00
|
|
|
return false
|
2017-08-26 05:40:18 +00:00
|
|
|
}
|
|
|
|
|
2017-09-10 23:25:13 +00:00
|
|
|
if now.Before(c.graceUntil) {
|
2017-09-14 05:42:43 +00:00
|
|
|
// In grace period, exit
|
|
|
|
return false
|
2017-08-26 05:40:18 +00:00
|
|
|
}
|
|
|
|
|
2017-09-14 17:36:32 +00:00
|
|
|
if c.unhealthyState.IsZero() {
|
2017-08-26 05:40:18 +00:00
|
|
|
// First failure, set restart deadline
|
2017-09-13 06:15:46 +00:00
|
|
|
if c.timeLimit != 0 {
|
2018-09-13 17:43:40 +00:00
|
|
|
c.logger.Debug("check became unhealthy. Will restart if check doesn't become healthy", "time_limit", c.timeLimit)
|
2017-09-13 06:15:46 +00:00
|
|
|
}
|
2017-09-14 17:36:32 +00:00
|
|
|
c.unhealthyState = now
|
2017-08-26 05:40:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// restart timeLimit after start of this check becoming unhealthy
|
2017-09-14 17:36:32 +00:00
|
|
|
restartAt := c.unhealthyState.Add(c.timeLimit)
|
2017-08-26 05:40:18 +00:00
|
|
|
|
|
|
|
// Must test >= because if limit=1, restartAt == first failure
|
2017-09-10 23:25:13 +00:00
|
|
|
if now.Equal(restartAt) || now.After(restartAt) {
|
2017-08-26 05:40:18 +00:00
|
|
|
// hasn't become healthy by deadline, restart!
|
2018-09-13 17:43:40 +00:00
|
|
|
c.logger.Debug("restarting due to unhealthy check")
|
2017-09-10 23:25:13 +00:00
|
|
|
|
|
|
|
// Tell TaskRunner to restart due to failure
|
2018-07-20 00:40:25 +00:00
|
|
|
reason := fmt.Sprintf("healthcheck: check %q unhealthy", c.checkName)
|
|
|
|
event := structs.NewTaskEvent(structs.TaskRestartSignal).SetRestartReason(reason)
|
2019-07-17 22:22:21 +00:00
|
|
|
go asyncRestart(ctx, c.logger, c.task, event)
|
2017-09-14 05:42:43 +00:00
|
|
|
return true
|
2017-08-26 05:40:18 +00:00
|
|
|
}
|
2017-09-14 05:42:43 +00:00
|
|
|
|
|
|
|
return false
|
2017-08-26 05:40:18 +00:00
|
|
|
}
|
|
|
|
|
2019-07-17 22:22:21 +00:00
|
|
|
// asyncRestart mimics the pre-0.9 TaskRunner.Restart behavior and is intended
|
|
|
|
// to be called in a goroutine.
|
|
|
|
func asyncRestart(ctx context.Context, logger log.Logger, task TaskRestarter, event *structs.TaskEvent) {
|
|
|
|
// Check watcher restarts are always failures
|
|
|
|
const failure = true
|
|
|
|
|
|
|
|
// Restarting is asynchronous so there's no reason to allow this
|
|
|
|
// goroutine to block indefinitely.
|
|
|
|
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
if err := task.Restart(ctx, event, failure); err != nil {
|
|
|
|
// Restart errors are not actionable and only relevant when
|
|
|
|
// debugging allocation lifecycle management.
|
|
|
|
logger.Debug("error restarting task", "error", err,
|
|
|
|
"event_time", event.Time, "event_type", event.Type)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-09-14 04:35:12 +00:00
|
|
|
// checkWatchUpdates add or remove checks from the watcher
|
|
|
|
type checkWatchUpdate struct {
|
|
|
|
checkID string
|
|
|
|
remove bool
|
|
|
|
checkRestart *checkRestart
|
|
|
|
}
|
|
|
|
|
2017-08-26 05:40:18 +00:00
|
|
|
// checkWatcher watches Consul checks and restarts tasks when they're
|
|
|
|
// unhealthy.
|
|
|
|
type checkWatcher struct {
|
2017-09-11 00:22:03 +00:00
|
|
|
consul ChecksAPI
|
2017-08-26 05:40:18 +00:00
|
|
|
|
2017-09-11 00:22:03 +00:00
|
|
|
// pollFreq is how often to poll the checks API and defaults to
|
|
|
|
// defaultPollFreq
|
2017-08-26 05:40:18 +00:00
|
|
|
pollFreq time.Duration
|
|
|
|
|
2017-09-14 04:35:12 +00:00
|
|
|
// checkUpdateCh is how watches (and removals) are sent to the main
|
|
|
|
// watching loop
|
|
|
|
checkUpdateCh chan checkWatchUpdate
|
2017-08-26 05:40:18 +00:00
|
|
|
|
|
|
|
// done is closed when Run has exited
|
|
|
|
done chan struct{}
|
|
|
|
|
|
|
|
// lastErr is true if the last Consul call failed. It is used to
|
|
|
|
// squelch repeated error messages.
|
|
|
|
lastErr bool
|
|
|
|
|
2018-09-13 17:43:40 +00:00
|
|
|
logger log.Logger
|
2017-08-26 05:40:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// newCheckWatcher creates a new checkWatcher but does not call its Run method.
|
2018-09-13 17:43:40 +00:00
|
|
|
func newCheckWatcher(logger log.Logger, consul ChecksAPI) *checkWatcher {
|
2017-08-26 05:40:18 +00:00
|
|
|
return &checkWatcher{
|
2017-09-14 04:35:12 +00:00
|
|
|
consul: consul,
|
|
|
|
pollFreq: defaultPollFreq,
|
|
|
|
checkUpdateCh: make(chan checkWatchUpdate, 8),
|
|
|
|
done: make(chan struct{}),
|
2018-09-13 17:43:40 +00:00
|
|
|
logger: logger.ResetNamed("consul.health"),
|
2017-08-26 05:40:18 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Run the main Consul checks watching loop to restart tasks when their checks
|
|
|
|
// fail. Blocks until context is canceled.
|
|
|
|
func (w *checkWatcher) Run(ctx context.Context) {
|
|
|
|
defer close(w.done)
|
|
|
|
|
|
|
|
// map of check IDs to their metadata
|
|
|
|
checks := map[string]*checkRestart{}
|
|
|
|
|
|
|
|
// timer for check polling
|
|
|
|
checkTimer := time.NewTimer(0)
|
|
|
|
defer checkTimer.Stop() // ensure timer is never leaked
|
|
|
|
|
2017-09-14 04:50:46 +00:00
|
|
|
stopTimer := func() {
|
|
|
|
checkTimer.Stop()
|
|
|
|
select {
|
|
|
|
case <-checkTimer.C:
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// disable by default
|
|
|
|
stopTimer()
|
|
|
|
|
2017-08-26 05:40:18 +00:00
|
|
|
// Main watch loop
|
|
|
|
for {
|
2017-09-14 05:42:43 +00:00
|
|
|
// disable polling if there are no checks
|
|
|
|
if len(checks) == 0 {
|
|
|
|
stopTimer()
|
|
|
|
}
|
|
|
|
|
2017-09-14 04:50:46 +00:00
|
|
|
select {
|
|
|
|
case update := <-w.checkUpdateCh:
|
|
|
|
if update.remove {
|
|
|
|
// Remove a check
|
|
|
|
delete(checks, update.checkID)
|
|
|
|
continue
|
2017-08-26 05:40:18 +00:00
|
|
|
}
|
|
|
|
|
2017-09-14 04:50:46 +00:00
|
|
|
// Add/update a check
|
|
|
|
checks[update.checkID] = update.checkRestart
|
2018-09-13 17:43:40 +00:00
|
|
|
w.logger.Debug("watching check", "alloc_id", update.checkRestart.allocID,
|
|
|
|
"task", update.checkRestart.taskName, "check", update.checkRestart.checkName)
|
2017-09-14 04:50:46 +00:00
|
|
|
|
|
|
|
// if first check was added make sure polling is enabled
|
|
|
|
if len(checks) == 1 {
|
|
|
|
stopTimer()
|
2017-08-26 05:40:18 +00:00
|
|
|
checkTimer.Reset(w.pollFreq)
|
2017-09-14 04:50:46 +00:00
|
|
|
}
|
2017-08-26 05:40:18 +00:00
|
|
|
|
2017-09-14 04:50:46 +00:00
|
|
|
case <-ctx.Done():
|
|
|
|
return
|
2017-08-26 05:40:18 +00:00
|
|
|
|
2017-09-14 04:50:46 +00:00
|
|
|
case <-checkTimer.C:
|
|
|
|
checkTimer.Reset(w.pollFreq)
|
|
|
|
|
|
|
|
// Set "now" as the point in time the following check results represent
|
|
|
|
now := time.Now()
|
|
|
|
|
|
|
|
results, err := w.consul.Checks()
|
|
|
|
if err != nil {
|
|
|
|
if !w.lastErr {
|
|
|
|
w.lastErr = true
|
2018-09-13 17:43:40 +00:00
|
|
|
w.logger.Error("failed retrieving health checks", "error", err)
|
2017-08-26 05:40:18 +00:00
|
|
|
}
|
2017-09-14 04:50:46 +00:00
|
|
|
continue
|
|
|
|
}
|
2017-08-26 05:40:18 +00:00
|
|
|
|
2017-09-14 04:50:46 +00:00
|
|
|
w.lastErr = false
|
2017-08-26 05:40:18 +00:00
|
|
|
|
2017-09-14 06:13:34 +00:00
|
|
|
// Keep track of tasks restarted this period so they
|
|
|
|
// are only restarted once and all of their checks are
|
|
|
|
// removed.
|
|
|
|
restartedTasks := map[string]struct{}{}
|
|
|
|
|
2017-09-14 04:50:46 +00:00
|
|
|
// Loop over watched checks and update their status from results
|
|
|
|
for cid, check := range checks {
|
2018-07-20 00:40:25 +00:00
|
|
|
// Shortcircuit if told to exit
|
|
|
|
if ctx.Err() != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-09-14 06:13:34 +00:00
|
|
|
if _, ok := restartedTasks[check.taskKey]; ok {
|
|
|
|
// Check for this task already restarted; remove and skip check
|
|
|
|
delete(checks, cid)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2017-09-14 04:50:46 +00:00
|
|
|
result, ok := results[cid]
|
|
|
|
if !ok {
|
|
|
|
// Only warn if outside grace period to avoid races with check registration
|
|
|
|
if now.After(check.graceUntil) {
|
2018-09-13 17:43:40 +00:00
|
|
|
w.logger.Warn("watched check not found in Consul", "check", check.checkName, "check_id", cid)
|
2017-09-14 04:50:46 +00:00
|
|
|
}
|
|
|
|
continue
|
2017-08-26 05:40:18 +00:00
|
|
|
}
|
2017-09-14 04:50:46 +00:00
|
|
|
|
2018-07-20 00:40:25 +00:00
|
|
|
restarted := check.apply(ctx, now, result.Status)
|
2017-09-14 05:42:43 +00:00
|
|
|
if restarted {
|
|
|
|
// Checks are registered+watched on
|
|
|
|
// startup, so it's safe to remove them
|
|
|
|
// whenever they're restarted
|
|
|
|
delete(checks, cid)
|
2017-09-14 06:13:34 +00:00
|
|
|
|
|
|
|
restartedTasks[check.taskKey] = struct{}{}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure even passing checks for restartedTasks are removed
|
|
|
|
if len(restartedTasks) > 0 {
|
|
|
|
for cid, check := range checks {
|
|
|
|
if _, ok := restartedTasks[check.taskKey]; ok {
|
|
|
|
delete(checks, cid)
|
|
|
|
}
|
2017-09-14 05:42:43 +00:00
|
|
|
}
|
2017-08-26 05:40:18 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-09-15 22:19:53 +00:00
|
|
|
// Watch a check and restart its task if unhealthy.
|
2017-08-26 05:40:18 +00:00
|
|
|
func (w *checkWatcher) Watch(allocID, taskName, checkID string, check *structs.ServiceCheck, restarter TaskRestarter) {
|
2017-09-14 16:58:35 +00:00
|
|
|
if !check.TriggersRestarts() {
|
2017-08-26 05:40:18 +00:00
|
|
|
// Not watched, noop
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-09-14 04:35:12 +00:00
|
|
|
c := &checkRestart{
|
2017-09-11 00:00:25 +00:00
|
|
|
allocID: allocID,
|
|
|
|
taskName: taskName,
|
|
|
|
checkID: checkID,
|
|
|
|
checkName: check.Name,
|
2017-09-14 06:13:34 +00:00
|
|
|
taskKey: fmt.Sprintf("%s%s", allocID, taskName), // unique task ID
|
2017-09-11 00:00:25 +00:00
|
|
|
task: restarter,
|
|
|
|
interval: check.Interval,
|
|
|
|
grace: check.CheckRestart.Grace,
|
|
|
|
graceUntil: time.Now().Add(check.CheckRestart.Grace),
|
|
|
|
timeLimit: check.Interval * time.Duration(check.CheckRestart.Limit-1),
|
|
|
|
ignoreWarnings: check.CheckRestart.IgnoreWarnings,
|
2018-09-13 17:43:40 +00:00
|
|
|
logger: w.logger.With("alloc_id", allocID, "task", taskName, "check", check.Name),
|
2017-08-26 05:40:18 +00:00
|
|
|
}
|
|
|
|
|
2017-09-14 04:35:12 +00:00
|
|
|
update := checkWatchUpdate{
|
|
|
|
checkID: checkID,
|
|
|
|
checkRestart: c,
|
|
|
|
}
|
|
|
|
|
2017-08-26 05:40:18 +00:00
|
|
|
select {
|
2017-09-14 04:35:12 +00:00
|
|
|
case w.checkUpdateCh <- update:
|
2017-08-26 05:40:18 +00:00
|
|
|
// sent watch
|
|
|
|
case <-w.done:
|
|
|
|
// exited; nothing to do
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-09-15 22:19:53 +00:00
|
|
|
// Unwatch a check.
|
2017-08-26 05:40:18 +00:00
|
|
|
func (w *checkWatcher) Unwatch(cid string) {
|
2017-09-14 04:35:12 +00:00
|
|
|
c := checkWatchUpdate{
|
2017-08-26 05:40:18 +00:00
|
|
|
checkID: cid,
|
|
|
|
remove: true,
|
|
|
|
}
|
|
|
|
select {
|
2017-09-14 04:35:12 +00:00
|
|
|
case w.checkUpdateCh <- c:
|
2017-08-26 05:40:18 +00:00
|
|
|
// sent remove watch
|
|
|
|
case <-w.done:
|
|
|
|
// exited; nothing to do
|
|
|
|
}
|
|
|
|
}
|