2015-11-05 19:12:31 +00:00
|
|
|
package client
|
|
|
|
|
|
|
|
import (
|
2015-12-17 18:37:53 +00:00
|
|
|
"math/rand"
|
2016-02-04 03:43:44 +00:00
|
|
|
"sync"
|
2015-11-05 19:12:31 +00:00
|
|
|
"time"
|
2015-11-14 06:07:13 +00:00
|
|
|
|
2016-02-29 00:56:05 +00:00
|
|
|
cstructs "github.com/hashicorp/nomad/client/driver/structs"
|
2015-11-14 06:07:13 +00:00
|
|
|
"github.com/hashicorp/nomad/nomad/structs"
|
2015-11-05 19:12:31 +00:00
|
|
|
)
|
|
|
|
|
2015-12-18 20:11:12 +00:00
|
|
|
// jitter is the percent of jitter added to restart delays.
|
|
|
|
const jitter = 0.25
|
|
|
|
|
2016-02-02 23:08:07 +00:00
|
|
|
func newRestartTracker(policy *structs.RestartPolicy, jobType string) *RestartTracker {
|
|
|
|
onSuccess := true
|
|
|
|
if jobType == structs.JobTypeBatch {
|
|
|
|
onSuccess = false
|
|
|
|
}
|
2015-12-18 20:17:13 +00:00
|
|
|
return &RestartTracker{
|
|
|
|
startTime: time.Now(),
|
2016-02-02 23:08:07 +00:00
|
|
|
onSuccess: onSuccess,
|
2015-12-18 20:17:13 +00:00
|
|
|
policy: policy,
|
2015-12-17 18:37:53 +00:00
|
|
|
rand: rand.New(rand.NewSource(time.Now().Unix())),
|
2015-11-05 19:12:31 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-12-18 20:17:13 +00:00
|
|
|
type RestartTracker struct {
|
2016-02-29 00:56:05 +00:00
|
|
|
waitRes *cstructs.WaitResult
|
|
|
|
startErr error
|
2015-12-18 20:17:13 +00:00
|
|
|
count int // Current number of attempts.
|
2016-02-02 23:08:07 +00:00
|
|
|
onSuccess bool // Whether to restart on successful exit code.
|
2015-12-18 20:17:13 +00:00
|
|
|
startTime time.Time // When the interval began
|
|
|
|
policy *structs.RestartPolicy
|
2015-12-17 18:37:53 +00:00
|
|
|
rand *rand.Rand
|
2016-02-04 03:43:44 +00:00
|
|
|
lock sync.Mutex
|
2015-11-14 06:07:13 +00:00
|
|
|
}
|
|
|
|
|
2016-02-04 03:43:44 +00:00
|
|
|
// SetPolicy updates the policy used to determine restarts.
|
|
|
|
func (r *RestartTracker) SetPolicy(policy *structs.RestartPolicy) {
|
|
|
|
r.lock.Lock()
|
|
|
|
defer r.lock.Unlock()
|
|
|
|
r.policy = policy
|
|
|
|
}
|
|
|
|
|
2016-02-29 00:56:05 +00:00
|
|
|
// SetStartError is used to mark the most recent start error. If starting was
|
|
|
|
// successful the error should be nil.
|
|
|
|
func (r *RestartTracker) SetStartError(err error) *RestartTracker {
|
|
|
|
r.lock.Lock()
|
|
|
|
defer r.lock.Unlock()
|
|
|
|
r.startErr = err
|
|
|
|
return r
|
|
|
|
}
|
|
|
|
|
|
|
|
// SetWaitResult is used to mark the most recent wait result.
|
|
|
|
func (r *RestartTracker) SetWaitResult(res *cstructs.WaitResult) *RestartTracker {
|
|
|
|
r.lock.Lock()
|
|
|
|
defer r.lock.Unlock()
|
|
|
|
r.waitRes = res
|
|
|
|
return r
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetState returns the tasks next state given the set exit code and start
|
|
|
|
// error. One of the following states are returned:
|
|
|
|
// * TaskRestarting - Task should be restarted
|
|
|
|
// * TaskNotRestarting - Task should not be restarted and has exceeded its
|
|
|
|
// restart policy.
|
|
|
|
// * TaskTerminated - Task has terminated successfully and does not need a
|
|
|
|
// restart.
|
|
|
|
//
|
|
|
|
// If TaskRestarting is returned, the duration is how long to wait until
|
|
|
|
// starting the task again.
|
|
|
|
func (r *RestartTracker) GetState() (string, time.Duration) {
|
2016-02-04 03:43:44 +00:00
|
|
|
r.lock.Lock()
|
|
|
|
defer r.lock.Unlock()
|
|
|
|
|
2016-02-02 22:17:39 +00:00
|
|
|
// Hot path if no attempts are expected
|
|
|
|
if r.policy.Attempts == 0 {
|
2016-02-29 00:56:05 +00:00
|
|
|
if r.waitRes != nil && r.waitRes.Successful() {
|
|
|
|
return structs.TaskTerminated, 0
|
|
|
|
}
|
|
|
|
|
|
|
|
return structs.TaskNotRestarting, 0
|
2016-02-02 22:17:39 +00:00
|
|
|
}
|
|
|
|
|
2016-02-29 00:56:05 +00:00
|
|
|
r.count++
|
|
|
|
|
2015-12-18 20:17:13 +00:00
|
|
|
// Check if we have entered a new interval.
|
|
|
|
end := r.startTime.Add(r.policy.Interval)
|
|
|
|
now := time.Now()
|
|
|
|
if now.After(end) {
|
|
|
|
r.count = 0
|
|
|
|
r.startTime = now
|
|
|
|
}
|
2015-11-05 19:12:31 +00:00
|
|
|
|
2016-02-29 00:56:05 +00:00
|
|
|
if r.startErr != nil {
|
|
|
|
return r.handleStartError()
|
|
|
|
} else if r.waitRes != nil {
|
|
|
|
return r.handleWaitResult()
|
|
|
|
} else {
|
|
|
|
return "", 0
|
|
|
|
}
|
|
|
|
}
|
2015-11-06 01:13:25 +00:00
|
|
|
|
2016-02-29 00:56:05 +00:00
|
|
|
// handleStartError returns the new state and potential wait duration for
|
|
|
|
// restarting the task after it was not successfully started. On start errors,
|
|
|
|
// the restart policy is always treated as fail mode to ensure we don't
|
|
|
|
// infinitely try to start a task.
|
|
|
|
func (r *RestartTracker) handleStartError() (string, time.Duration) {
|
|
|
|
// If the error is not recoverable, do not restart.
|
|
|
|
if rerr, ok := r.startErr.(*cstructs.RecoverableError); !(ok && rerr.Recoverable) {
|
|
|
|
return structs.TaskNotRestarting, 0
|
2015-11-05 19:12:31 +00:00
|
|
|
}
|
|
|
|
|
2016-02-29 00:56:05 +00:00
|
|
|
if r.count > r.policy.Attempts {
|
|
|
|
return structs.TaskNotRestarting, 0
|
2015-12-18 20:17:13 +00:00
|
|
|
}
|
2015-11-05 19:12:31 +00:00
|
|
|
|
2016-02-29 00:56:05 +00:00
|
|
|
return structs.TaskRestarting, r.jitter()
|
2015-11-05 19:12:31 +00:00
|
|
|
}
|
|
|
|
|
2016-02-29 00:56:05 +00:00
|
|
|
// handleWaitResult returns the new state and potential wait duration for
|
|
|
|
// restarting the task after it has exited.
|
|
|
|
func (r *RestartTracker) handleWaitResult() (string, time.Duration) {
|
|
|
|
// If the task started successfully and restart on success isn't specified,
|
|
|
|
// don't restart but don't mark as failed.
|
|
|
|
if r.waitRes.Successful() && !r.onSuccess {
|
|
|
|
return structs.TaskTerminated, 0
|
|
|
|
}
|
|
|
|
|
|
|
|
if r.count > r.policy.Attempts {
|
|
|
|
if r.policy.Mode == structs.RestartPolicyModeFail {
|
|
|
|
return structs.TaskNotRestarting, 0
|
|
|
|
} else {
|
|
|
|
return structs.TaskRestarting, r.getDelay()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return structs.TaskRestarting, r.jitter()
|
|
|
|
}
|
|
|
|
|
|
|
|
// getDelay returns the delay time to enter the next interval.
|
|
|
|
func (r *RestartTracker) getDelay() time.Duration {
|
|
|
|
end := r.startTime.Add(r.policy.Interval)
|
|
|
|
now := time.Now()
|
|
|
|
return end.Sub(now)
|
2015-12-17 18:37:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// jitter returns the delay time plus a jitter.
|
|
|
|
func (r *RestartTracker) jitter() time.Duration {
|
2016-01-20 20:00:20 +00:00
|
|
|
// Get the delay and ensure it is valid.
|
2015-12-17 18:37:53 +00:00
|
|
|
d := r.policy.Delay.Nanoseconds()
|
2016-01-20 20:00:20 +00:00
|
|
|
if d == 0 {
|
|
|
|
d = 1
|
|
|
|
}
|
|
|
|
|
2015-12-18 20:11:12 +00:00
|
|
|
j := float64(r.rand.Int63n(d)) * jitter
|
|
|
|
return time.Duration(d + int64(j))
|
2015-11-06 01:13:25 +00:00
|
|
|
}
|
|
|
|
|
2015-12-18 20:17:13 +00:00
|
|
|
// Returns a tracker that never restarts.
|
|
|
|
func noRestartsTracker() *RestartTracker {
|
|
|
|
policy := &structs.RestartPolicy{Attempts: 0, Mode: structs.RestartPolicyModeFail}
|
2016-02-02 23:08:07 +00:00
|
|
|
return newRestartTracker(policy, structs.JobTypeBatch)
|
2015-11-05 19:12:31 +00:00
|
|
|
}
|