2017-06-26 21:23:52 +00:00
|
|
|
package deploymentwatcher
|
|
|
|
|
|
|
|
import (
|
2017-06-28 04:36:16 +00:00
|
|
|
"context"
|
2017-06-26 21:23:52 +00:00
|
|
|
"log"
|
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
|
2017-06-28 04:36:16 +00:00
|
|
|
"golang.org/x/time/rate"
|
|
|
|
|
2017-08-31 00:45:32 +00:00
|
|
|
memdb "github.com/hashicorp/go-memdb"
|
2017-07-06 22:03:27 +00:00
|
|
|
"github.com/hashicorp/nomad/helper"
|
2017-09-29 16:58:48 +00:00
|
|
|
"github.com/hashicorp/nomad/helper/uuid"
|
2017-08-31 00:45:32 +00:00
|
|
|
"github.com/hashicorp/nomad/nomad/state"
|
2017-06-26 21:23:52 +00:00
|
|
|
"github.com/hashicorp/nomad/nomad/structs"
|
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
2017-07-03 18:08:35 +00:00
|
|
|
// perJobEvalBatchPeriod is the batching length before creating an evaluation to
|
2017-06-26 21:23:52 +00:00
|
|
|
// trigger the scheduler when allocations are marked as healthy.
|
2017-07-03 18:08:35 +00:00
|
|
|
perJobEvalBatchPeriod = 1 * time.Second
|
2017-06-26 21:23:52 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// deploymentTriggers are the set of functions required to trigger changes on
|
|
|
|
// behalf of a deployment
|
|
|
|
type deploymentTriggers interface {
|
|
|
|
// createEvaluation is used to create an evaluation.
|
|
|
|
createEvaluation(eval *structs.Evaluation) (uint64, error)
|
|
|
|
|
|
|
|
// upsertJob is used to roll back a job when autoreverting for a deployment
|
|
|
|
upsertJob(job *structs.Job) (uint64, error)
|
|
|
|
|
|
|
|
// upsertDeploymentStatusUpdate is used to upsert a deployment status update
|
|
|
|
// and an optional evaluation and job to upsert
|
|
|
|
upsertDeploymentStatusUpdate(u *structs.DeploymentStatusUpdate, eval *structs.Evaluation, job *structs.Job) (uint64, error)
|
|
|
|
|
|
|
|
// upsertDeploymentPromotion is used to promote canaries in a deployment
|
|
|
|
upsertDeploymentPromotion(req *structs.ApplyDeploymentPromoteRequest) (uint64, error)
|
|
|
|
|
|
|
|
// upsertDeploymentAllocHealth is used to set the health of allocations in a
|
|
|
|
// deployment
|
|
|
|
upsertDeploymentAllocHealth(req *structs.ApplyDeploymentAllocHealthRequest) (uint64, error)
|
|
|
|
}
|
|
|
|
|
|
|
|
// deploymentWatcher is used to watch a single deployment and trigger the
|
2018-03-11 19:06:05 +00:00
|
|
|
// scheduler when allocation health transitions.
|
2017-06-26 21:23:52 +00:00
|
|
|
type deploymentWatcher struct {
|
2017-06-28 04:36:16 +00:00
|
|
|
// queryLimiter is used to limit the rate of blocking queries
|
|
|
|
queryLimiter *rate.Limiter
|
|
|
|
|
2017-06-26 21:23:52 +00:00
|
|
|
// deploymentTriggers holds the methods required to trigger changes on behalf of the
|
|
|
|
// deployment
|
|
|
|
deploymentTriggers
|
|
|
|
|
2017-08-31 00:45:32 +00:00
|
|
|
// state is the state that is watched for state changes.
|
|
|
|
state *state.StateStore
|
2017-06-26 21:23:52 +00:00
|
|
|
|
|
|
|
// d is the deployment being watched
|
|
|
|
d *structs.Deployment
|
|
|
|
|
|
|
|
// j is the job the deployment is for
|
|
|
|
j *structs.Job
|
|
|
|
|
|
|
|
// outstandingBatch marks whether an outstanding function exists to create
|
|
|
|
// the evaluation. Access should be done through the lock
|
|
|
|
outstandingBatch bool
|
|
|
|
|
2017-06-28 21:32:11 +00:00
|
|
|
// latestEval is the latest eval for the job. It is updated by the watch
|
|
|
|
// loop and any time an evaluation is created. The field should be accessed
|
|
|
|
// by holding the lock or using the setter and getter methods.
|
2017-06-28 20:19:41 +00:00
|
|
|
latestEval uint64
|
|
|
|
|
2017-06-26 21:23:52 +00:00
|
|
|
logger *log.Logger
|
2017-06-28 04:36:16 +00:00
|
|
|
ctx context.Context
|
|
|
|
exitFn context.CancelFunc
|
2017-06-26 21:23:52 +00:00
|
|
|
l sync.RWMutex
|
|
|
|
}
|
|
|
|
|
|
|
|
// newDeploymentWatcher returns a deployment watcher that is used to watch
|
|
|
|
// deployments and trigger the scheduler as needed.
|
2017-06-28 21:32:11 +00:00
|
|
|
func newDeploymentWatcher(parent context.Context, queryLimiter *rate.Limiter,
|
2017-08-31 00:45:32 +00:00
|
|
|
logger *log.Logger, state *state.StateStore, d *structs.Deployment,
|
2017-06-28 21:32:11 +00:00
|
|
|
j *structs.Job, triggers deploymentTriggers) *deploymentWatcher {
|
2017-06-26 21:23:52 +00:00
|
|
|
|
2017-06-28 04:36:16 +00:00
|
|
|
ctx, exitFn := context.WithCancel(parent)
|
2017-06-26 21:23:52 +00:00
|
|
|
w := &deploymentWatcher{
|
2017-07-03 18:08:35 +00:00
|
|
|
queryLimiter: queryLimiter,
|
|
|
|
d: d,
|
|
|
|
j: j,
|
2017-08-31 00:45:32 +00:00
|
|
|
state: state,
|
2017-07-03 18:08:35 +00:00
|
|
|
deploymentTriggers: triggers,
|
|
|
|
logger: logger,
|
|
|
|
ctx: ctx,
|
|
|
|
exitFn: exitFn,
|
2017-06-26 21:23:52 +00:00
|
|
|
}
|
|
|
|
|
2017-06-28 21:32:11 +00:00
|
|
|
// Start the long lived watcher that scans for allocation updates
|
2017-06-26 21:23:52 +00:00
|
|
|
go w.watch()
|
2017-06-28 21:32:11 +00:00
|
|
|
|
2017-06-26 21:23:52 +00:00
|
|
|
return w
|
|
|
|
}
|
|
|
|
|
2017-06-28 04:36:16 +00:00
|
|
|
func (w *deploymentWatcher) SetAllocHealth(
|
|
|
|
req *structs.DeploymentAllocHealthRequest,
|
|
|
|
resp *structs.DeploymentUpdateResponse) error {
|
2017-06-26 21:23:52 +00:00
|
|
|
|
|
|
|
// If we are failing the deployment, update the status and potentially
|
|
|
|
// rollback
|
|
|
|
var j *structs.Job
|
|
|
|
var u *structs.DeploymentStatusUpdate
|
|
|
|
|
|
|
|
// If there are unhealthy allocations we need to mark the deployment as
|
|
|
|
// failed and check if we should roll back to a stable job.
|
|
|
|
if l := len(req.UnhealthyAllocationIDs); l != 0 {
|
|
|
|
unhealthy := make(map[string]struct{}, l)
|
|
|
|
for _, alloc := range req.UnhealthyAllocationIDs {
|
|
|
|
unhealthy[alloc] = struct{}{}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get the allocations for the deployment
|
2017-08-31 00:45:32 +00:00
|
|
|
snap, err := w.state.Snapshot()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
allocs, err := snap.AllocsByDeployment(nil, req.DeploymentID)
|
|
|
|
if err != nil {
|
2017-06-28 04:36:16 +00:00
|
|
|
return err
|
2017-06-26 21:23:52 +00:00
|
|
|
}
|
|
|
|
|
2017-07-03 18:08:35 +00:00
|
|
|
// Determine if we should autorevert to an older job
|
2017-06-26 21:23:52 +00:00
|
|
|
desc := structs.DeploymentStatusDescriptionFailedAllocations
|
2017-08-31 00:45:32 +00:00
|
|
|
for _, alloc := range allocs {
|
2017-06-26 21:23:52 +00:00
|
|
|
// Check that the alloc has been marked unhealthy
|
|
|
|
if _, ok := unhealthy[alloc.ID]; !ok {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if the group has autorevert set
|
2017-06-30 19:35:59 +00:00
|
|
|
group, ok := w.d.TaskGroups[alloc.TaskGroup]
|
|
|
|
if !ok || !group.AutoRevert {
|
2017-06-26 21:23:52 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
var err error
|
|
|
|
j, err = w.latestStableJob()
|
|
|
|
if err != nil {
|
2017-06-28 04:36:16 +00:00
|
|
|
return err
|
2017-06-26 21:23:52 +00:00
|
|
|
}
|
|
|
|
|
2017-07-06 22:03:27 +00:00
|
|
|
if j != nil {
|
2017-11-03 20:33:34 +00:00
|
|
|
j, desc = w.handleRollbackValidity(j, desc)
|
2017-07-06 22:03:27 +00:00
|
|
|
}
|
2017-06-26 21:23:52 +00:00
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
u = w.getDeploymentStatusUpdate(structs.DeploymentStatusFailed, desc)
|
|
|
|
}
|
|
|
|
|
2017-09-07 23:56:15 +00:00
|
|
|
// Canonicalize the job in case it doesn't have namespace set
|
|
|
|
j.Canonicalize()
|
|
|
|
|
2017-06-26 21:23:52 +00:00
|
|
|
// Create the request
|
|
|
|
areq := &structs.ApplyDeploymentAllocHealthRequest{
|
|
|
|
DeploymentAllocHealthRequest: *req,
|
|
|
|
Eval: w.getEval(),
|
|
|
|
DeploymentUpdate: u,
|
|
|
|
Job: j,
|
|
|
|
}
|
|
|
|
|
|
|
|
index, err := w.upsertDeploymentAllocHealth(areq)
|
|
|
|
if err != nil {
|
2017-06-28 04:36:16 +00:00
|
|
|
return err
|
2017-06-26 21:23:52 +00:00
|
|
|
}
|
|
|
|
|
2017-06-28 04:36:16 +00:00
|
|
|
// Build the response
|
|
|
|
resp.EvalID = areq.Eval.ID
|
|
|
|
resp.EvalCreateIndex = index
|
|
|
|
resp.DeploymentModifyIndex = index
|
2017-06-29 05:00:18 +00:00
|
|
|
resp.Index = index
|
2017-07-06 22:03:27 +00:00
|
|
|
if j != nil {
|
|
|
|
resp.RevertedJobVersion = helper.Uint64ToPtr(j.Version)
|
|
|
|
}
|
2017-06-28 20:19:41 +00:00
|
|
|
w.setLatestEval(index)
|
2017-06-28 04:36:16 +00:00
|
|
|
return nil
|
2017-06-26 21:23:52 +00:00
|
|
|
}
|
2017-11-03 21:49:16 +00:00
|
|
|
|
|
|
|
// handleRollbackValidity checks if the job being rolled back to has the same spec as the existing job
|
|
|
|
// Returns a modified description and job accordingly.
|
|
|
|
func (w *deploymentWatcher) handleRollbackValidity(rollbackJob *structs.Job, desc string) (*structs.Job, string) {
|
2017-11-03 20:33:34 +00:00
|
|
|
// Only rollback if job being changed has a different spec.
|
|
|
|
// This prevents an infinite revert cycle when a previously stable version of the job fails to start up during a rollback
|
|
|
|
// If the job we are trying to rollback to is identical to the current job, we stop because the rollback will not succeed.
|
2017-11-03 21:49:16 +00:00
|
|
|
if w.j.SpecChanged(rollbackJob) {
|
|
|
|
desc = structs.DeploymentStatusDescriptionRollback(desc, rollbackJob.Version)
|
2017-11-03 20:33:34 +00:00
|
|
|
} else {
|
2017-11-03 21:49:16 +00:00
|
|
|
desc = structs.DeploymentStatusDescriptionRollbackNoop(desc, rollbackJob.Version)
|
|
|
|
rollbackJob = nil
|
2017-11-03 20:33:34 +00:00
|
|
|
}
|
2017-11-03 21:49:16 +00:00
|
|
|
return rollbackJob, desc
|
2017-11-03 20:33:34 +00:00
|
|
|
}
|
2017-06-26 21:23:52 +00:00
|
|
|
|
2017-06-28 04:36:16 +00:00
|
|
|
func (w *deploymentWatcher) PromoteDeployment(
|
|
|
|
req *structs.DeploymentPromoteRequest,
|
|
|
|
resp *structs.DeploymentUpdateResponse) error {
|
2017-06-26 21:23:52 +00:00
|
|
|
|
|
|
|
// Create the request
|
|
|
|
areq := &structs.ApplyDeploymentPromoteRequest{
|
|
|
|
DeploymentPromoteRequest: *req,
|
|
|
|
Eval: w.getEval(),
|
|
|
|
}
|
|
|
|
|
|
|
|
index, err := w.upsertDeploymentPromotion(areq)
|
|
|
|
if err != nil {
|
2017-06-28 04:36:16 +00:00
|
|
|
return err
|
2017-06-26 21:23:52 +00:00
|
|
|
}
|
|
|
|
|
2017-06-28 04:36:16 +00:00
|
|
|
// Build the response
|
|
|
|
resp.EvalID = areq.Eval.ID
|
|
|
|
resp.EvalCreateIndex = index
|
|
|
|
resp.DeploymentModifyIndex = index
|
2017-06-29 05:00:18 +00:00
|
|
|
resp.Index = index
|
2017-06-28 20:19:41 +00:00
|
|
|
w.setLatestEval(index)
|
2017-06-28 04:36:16 +00:00
|
|
|
return nil
|
2017-06-26 21:23:52 +00:00
|
|
|
}
|
|
|
|
|
2017-06-28 04:36:16 +00:00
|
|
|
func (w *deploymentWatcher) PauseDeployment(
|
|
|
|
req *structs.DeploymentPauseRequest,
|
|
|
|
resp *structs.DeploymentUpdateResponse) error {
|
2017-08-07 21:13:05 +00:00
|
|
|
// Determine the status we should transition to and if we need to create an
|
2017-06-26 21:23:52 +00:00
|
|
|
// evaluation
|
|
|
|
status, desc := structs.DeploymentStatusPaused, structs.DeploymentStatusDescriptionPaused
|
|
|
|
var eval *structs.Evaluation
|
|
|
|
evalID := ""
|
|
|
|
if !req.Pause {
|
|
|
|
status, desc = structs.DeploymentStatusRunning, structs.DeploymentStatusDescriptionRunning
|
2017-06-28 04:36:16 +00:00
|
|
|
eval = w.getEval()
|
2017-06-26 21:23:52 +00:00
|
|
|
evalID = eval.ID
|
|
|
|
}
|
|
|
|
update := w.getDeploymentStatusUpdate(status, desc)
|
|
|
|
|
|
|
|
// Commit the change
|
|
|
|
i, err := w.upsertDeploymentStatusUpdate(update, eval, nil)
|
|
|
|
if err != nil {
|
2017-06-28 04:36:16 +00:00
|
|
|
return err
|
2017-06-26 21:23:52 +00:00
|
|
|
}
|
|
|
|
|
2017-06-28 04:36:16 +00:00
|
|
|
// Build the response
|
2017-06-29 05:00:18 +00:00
|
|
|
if evalID != "" {
|
|
|
|
resp.EvalID = evalID
|
|
|
|
resp.EvalCreateIndex = i
|
|
|
|
}
|
2017-06-28 04:36:16 +00:00
|
|
|
resp.DeploymentModifyIndex = i
|
2017-06-29 05:00:18 +00:00
|
|
|
resp.Index = i
|
2017-06-28 20:19:41 +00:00
|
|
|
w.setLatestEval(i)
|
2017-06-28 04:36:16 +00:00
|
|
|
return nil
|
2017-06-26 21:23:52 +00:00
|
|
|
}
|
|
|
|
|
2017-06-28 23:29:48 +00:00
|
|
|
func (w *deploymentWatcher) FailDeployment(
|
2017-06-29 05:00:18 +00:00
|
|
|
req *structs.DeploymentFailRequest,
|
2017-06-28 23:29:48 +00:00
|
|
|
resp *structs.DeploymentUpdateResponse) error {
|
|
|
|
|
|
|
|
status, desc := structs.DeploymentStatusFailed, structs.DeploymentStatusDescriptionFailedByUser
|
2017-07-06 22:03:27 +00:00
|
|
|
|
|
|
|
// Determine if we should rollback
|
|
|
|
rollback := false
|
|
|
|
for _, state := range w.d.TaskGroups {
|
|
|
|
if state.AutoRevert {
|
|
|
|
rollback = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
var rollbackJob *structs.Job
|
|
|
|
if rollback {
|
|
|
|
var err error
|
|
|
|
rollbackJob, err = w.latestStableJob()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if rollbackJob != nil {
|
2017-11-03 20:33:34 +00:00
|
|
|
rollbackJob, desc = w.handleRollbackValidity(rollbackJob, desc)
|
2017-08-12 22:50:51 +00:00
|
|
|
} else {
|
|
|
|
desc = structs.DeploymentStatusDescriptionNoRollbackTarget(desc)
|
2017-07-06 22:03:27 +00:00
|
|
|
}
|
|
|
|
}
|
2017-06-28 23:29:48 +00:00
|
|
|
|
|
|
|
// Commit the change
|
2017-07-06 22:03:27 +00:00
|
|
|
update := w.getDeploymentStatusUpdate(status, desc)
|
|
|
|
eval := w.getEval()
|
|
|
|
i, err := w.upsertDeploymentStatusUpdate(update, eval, rollbackJob)
|
2017-06-28 23:29:48 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Build the response
|
|
|
|
resp.EvalID = eval.ID
|
|
|
|
resp.EvalCreateIndex = i
|
|
|
|
resp.DeploymentModifyIndex = i
|
2017-06-29 05:00:18 +00:00
|
|
|
resp.Index = i
|
2017-07-06 22:03:27 +00:00
|
|
|
if rollbackJob != nil {
|
|
|
|
resp.RevertedJobVersion = helper.Uint64ToPtr(rollbackJob.Version)
|
|
|
|
}
|
2017-06-28 23:29:48 +00:00
|
|
|
w.setLatestEval(i)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-06-26 21:23:52 +00:00
|
|
|
// StopWatch stops watching the deployment. This should be called whenever a
|
|
|
|
// deployment is completed or the watcher is no longer needed.
|
|
|
|
func (w *deploymentWatcher) StopWatch() {
|
2017-06-28 04:36:16 +00:00
|
|
|
w.exitFn()
|
2017-06-26 21:23:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// watch is the long running watcher that takes actions upon allocation changes
|
|
|
|
func (w *deploymentWatcher) watch() {
|
2017-07-18 20:02:40 +00:00
|
|
|
allocIndex := uint64(1)
|
2017-06-26 21:23:52 +00:00
|
|
|
for {
|
|
|
|
// Block getting all allocations that are part of the deployment using
|
|
|
|
// the last evaluation index. This will have us block waiting for
|
|
|
|
// something to change past what the scheduler has evaluated.
|
2017-08-31 00:45:32 +00:00
|
|
|
allocs, index, err := w.getAllocs(allocIndex)
|
2017-06-28 04:36:16 +00:00
|
|
|
if err != nil {
|
2017-07-19 18:15:53 +00:00
|
|
|
if err == context.Canceled || w.ctx.Err() == context.Canceled {
|
2017-06-28 04:36:16 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
w.logger.Printf("[ERR] nomad.deployment_watcher: failed to retrieve allocations for deployment %q: %v", w.d.ID, err)
|
2017-07-03 18:08:35 +00:00
|
|
|
return
|
2017-06-26 21:23:52 +00:00
|
|
|
}
|
2017-08-31 00:45:32 +00:00
|
|
|
allocIndex = index
|
2017-06-26 21:23:52 +00:00
|
|
|
|
2017-06-28 21:32:11 +00:00
|
|
|
// Get the latest evaluation index
|
2017-06-28 20:19:41 +00:00
|
|
|
latestEval, err := w.latestEvalIndex()
|
2017-06-28 04:36:16 +00:00
|
|
|
if err != nil {
|
2017-07-19 18:15:53 +00:00
|
|
|
if err == context.Canceled || w.ctx.Err() == context.Canceled {
|
2017-06-28 04:36:16 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
w.logger.Printf("[ERR] nomad.deployment_watcher: failed to determine last evaluation index for job %q: %v", w.d.JobID, err)
|
2017-07-03 18:08:35 +00:00
|
|
|
return
|
2017-06-28 04:36:16 +00:00
|
|
|
}
|
2017-06-26 21:23:52 +00:00
|
|
|
|
|
|
|
// Create an evaluation trigger if there is any allocation whose
|
|
|
|
// deployment status has been updated past the latest eval index.
|
|
|
|
createEval, failDeployment, rollback := false, false, false
|
2017-08-31 00:45:32 +00:00
|
|
|
for _, alloc := range allocs {
|
2017-06-28 04:36:16 +00:00
|
|
|
if alloc.DeploymentStatus == nil || alloc.DeploymentStatus.ModifyIndex <= latestEval {
|
2017-06-26 21:23:52 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2017-06-28 04:36:16 +00:00
|
|
|
// We need to create an eval
|
|
|
|
createEval = true
|
2017-06-26 21:23:52 +00:00
|
|
|
|
|
|
|
if alloc.DeploymentStatus.IsUnhealthy() {
|
|
|
|
// Check if the group has autorevert set
|
2017-06-30 19:35:59 +00:00
|
|
|
group, ok := w.d.TaskGroups[alloc.TaskGroup]
|
|
|
|
if ok && group.AutoRevert {
|
2017-06-26 21:23:52 +00:00
|
|
|
rollback = true
|
|
|
|
}
|
|
|
|
|
|
|
|
// Since we have an unhealthy allocation, fail the deployment
|
|
|
|
failDeployment = true
|
|
|
|
}
|
|
|
|
|
|
|
|
// All conditions have been hit so we can break
|
|
|
|
if createEval && failDeployment && rollback {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Change the deployments status to failed
|
|
|
|
if failDeployment {
|
|
|
|
// Default description
|
|
|
|
desc := structs.DeploymentStatusDescriptionFailedAllocations
|
|
|
|
|
|
|
|
// Rollback to the old job if necessary
|
|
|
|
var j *structs.Job
|
|
|
|
if rollback {
|
|
|
|
var err error
|
|
|
|
j, err = w.latestStableJob()
|
|
|
|
if err != nil {
|
|
|
|
w.logger.Printf("[ERR] nomad.deployment_watcher: failed to lookup latest stable job for %q: %v", w.d.JobID, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Description should include that the job is being rolled back to
|
|
|
|
// version N
|
2017-06-28 19:58:05 +00:00
|
|
|
if j != nil {
|
2017-11-03 20:33:34 +00:00
|
|
|
j, desc = w.handleRollbackValidity(j, desc)
|
2017-08-12 22:50:51 +00:00
|
|
|
} else {
|
|
|
|
desc = structs.DeploymentStatusDescriptionNoRollbackTarget(desc)
|
2017-06-28 19:58:05 +00:00
|
|
|
}
|
2017-06-26 21:23:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Update the status of the deployment to failed and create an
|
|
|
|
// evaluation.
|
2017-07-03 18:08:35 +00:00
|
|
|
e := w.getEval()
|
|
|
|
u := w.getDeploymentStatusUpdate(structs.DeploymentStatusFailed, desc)
|
2017-06-26 21:23:52 +00:00
|
|
|
if index, err := w.upsertDeploymentStatusUpdate(u, e, j); err != nil {
|
|
|
|
w.logger.Printf("[ERR] nomad.deployment_watcher: failed to update deployment %q status: %v", w.d.ID, err)
|
|
|
|
} else {
|
2017-06-28 20:19:41 +00:00
|
|
|
w.setLatestEval(index)
|
2017-06-26 21:23:52 +00:00
|
|
|
}
|
|
|
|
} else if createEval {
|
|
|
|
// Create an eval to push the deployment along
|
2017-08-31 00:45:32 +00:00
|
|
|
w.createEvalBatched(index)
|
2017-06-26 21:23:52 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// latestStableJob returns the latest stable job. It may be nil if none exist
|
|
|
|
func (w *deploymentWatcher) latestStableJob() (*structs.Job, error) {
|
2017-08-31 00:45:32 +00:00
|
|
|
snap, err := w.state.Snapshot()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2017-09-07 23:56:15 +00:00
|
|
|
versions, err := snap.JobVersionsByID(nil, w.d.Namespace, w.d.JobID)
|
2017-08-31 00:45:32 +00:00
|
|
|
if err != nil {
|
2017-06-26 21:23:52 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
var stable *structs.Job
|
2017-08-31 00:45:32 +00:00
|
|
|
for _, job := range versions {
|
2017-06-26 21:23:52 +00:00
|
|
|
if job.Stable {
|
|
|
|
stable = job
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return stable, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// createEvalBatched creates an eval but batches calls together
|
2017-06-28 19:58:05 +00:00
|
|
|
func (w *deploymentWatcher) createEvalBatched(forIndex uint64) {
|
2017-06-26 21:23:52 +00:00
|
|
|
w.l.Lock()
|
|
|
|
defer w.l.Unlock()
|
|
|
|
|
2017-06-28 20:19:41 +00:00
|
|
|
if w.outstandingBatch || forIndex < w.latestEval {
|
2017-06-26 21:23:52 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-06-28 19:58:05 +00:00
|
|
|
w.outstandingBatch = true
|
|
|
|
|
2017-07-03 18:08:35 +00:00
|
|
|
time.AfterFunc(perJobEvalBatchPeriod, func() {
|
2018-02-20 20:47:43 +00:00
|
|
|
// If the timer has been created and then we shutdown, we need to no-op
|
|
|
|
// the evaluation creation.
|
|
|
|
select {
|
|
|
|
case <-w.ctx.Done():
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
2017-06-28 21:32:11 +00:00
|
|
|
// Create the eval
|
|
|
|
evalCreateIndex, err := w.createEvaluation(w.getEval())
|
|
|
|
if err != nil {
|
2017-06-28 20:19:41 +00:00
|
|
|
w.logger.Printf("[ERR] nomad.deployment_watcher: failed to create evaluation for deployment %q: %v", w.d.ID, err)
|
2017-06-28 21:32:11 +00:00
|
|
|
} else {
|
|
|
|
w.setLatestEval(evalCreateIndex)
|
2017-06-28 20:19:41 +00:00
|
|
|
}
|
|
|
|
|
2017-06-26 21:23:52 +00:00
|
|
|
w.l.Lock()
|
|
|
|
w.outstandingBatch = false
|
|
|
|
w.l.Unlock()
|
|
|
|
|
2017-07-03 18:08:35 +00:00
|
|
|
})
|
2017-06-26 21:23:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// getEval returns an evaluation suitable for the deployment
|
|
|
|
func (w *deploymentWatcher) getEval() *structs.Evaluation {
|
|
|
|
return &structs.Evaluation{
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: w.j.Namespace,
|
2017-06-26 21:23:52 +00:00
|
|
|
Priority: w.j.Priority,
|
|
|
|
Type: w.j.Type,
|
2017-07-03 18:08:35 +00:00
|
|
|
TriggeredBy: structs.EvalTriggerDeploymentWatcher,
|
2017-06-26 21:23:52 +00:00
|
|
|
JobID: w.j.ID,
|
|
|
|
DeploymentID: w.d.ID,
|
|
|
|
Status: structs.EvalStatusPending,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// getDeploymentStatusUpdate returns a deployment status update
|
|
|
|
func (w *deploymentWatcher) getDeploymentStatusUpdate(status, desc string) *structs.DeploymentStatusUpdate {
|
|
|
|
return &structs.DeploymentStatusUpdate{
|
|
|
|
DeploymentID: w.d.ID,
|
|
|
|
Status: status,
|
|
|
|
StatusDescription: desc,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// getAllocs retrieves the allocations that are part of the deployment blocking
|
|
|
|
// at the given index.
|
2017-08-31 00:45:32 +00:00
|
|
|
func (w *deploymentWatcher) getAllocs(index uint64) ([]*structs.AllocListStub, uint64, error) {
|
|
|
|
resp, index, err := w.state.BlockingQuery(w.getAllocsImpl, index, w.ctx)
|
|
|
|
if err != nil {
|
|
|
|
return nil, 0, err
|
|
|
|
}
|
|
|
|
if err := w.ctx.Err(); err != nil {
|
|
|
|
return nil, 0, err
|
2017-06-28 04:36:16 +00:00
|
|
|
}
|
|
|
|
|
2017-08-31 00:45:32 +00:00
|
|
|
return resp.([]*structs.AllocListStub), index, nil
|
|
|
|
}
|
2017-06-26 21:23:52 +00:00
|
|
|
|
2017-08-31 00:45:32 +00:00
|
|
|
// getDeploysImpl retrieves all deployments from the passed state store.
|
|
|
|
func (w *deploymentWatcher) getAllocsImpl(ws memdb.WatchSet, state *state.StateStore) (interface{}, uint64, error) {
|
|
|
|
if err := w.queryLimiter.Wait(w.ctx); err != nil {
|
|
|
|
return nil, 0, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Capture all the allocations
|
|
|
|
allocs, err := state.AllocsByDeployment(ws, w.d.ID)
|
|
|
|
if err != nil {
|
|
|
|
return nil, 0, err
|
|
|
|
}
|
|
|
|
|
|
|
|
stubs := make([]*structs.AllocListStub, 0, len(allocs))
|
|
|
|
for _, alloc := range allocs {
|
|
|
|
stubs = append(stubs, alloc.Stub())
|
2017-06-28 04:36:16 +00:00
|
|
|
}
|
2017-06-26 21:23:52 +00:00
|
|
|
|
2017-08-31 00:45:32 +00:00
|
|
|
// Use the last index that affected the jobs table
|
|
|
|
index, err := state.Index("allocs")
|
|
|
|
if err != nil {
|
|
|
|
return nil, index, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return stubs, index, nil
|
2017-06-26 21:23:52 +00:00
|
|
|
}
|
|
|
|
|
2017-06-28 04:36:16 +00:00
|
|
|
// latestEvalIndex returns the index of the last evaluation created for
|
2017-06-26 21:23:52 +00:00
|
|
|
// the job. The index is used to determine if an allocation update requires an
|
|
|
|
// evaluation to be triggered.
|
2017-06-28 04:36:16 +00:00
|
|
|
func (w *deploymentWatcher) latestEvalIndex() (uint64, error) {
|
|
|
|
if err := w.queryLimiter.Wait(w.ctx); err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
|
2017-08-31 00:45:32 +00:00
|
|
|
snap, err := w.state.Snapshot()
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
2017-06-26 21:23:52 +00:00
|
|
|
}
|
2017-08-31 00:45:32 +00:00
|
|
|
|
2017-09-07 23:56:15 +00:00
|
|
|
evals, err := snap.EvalsByJob(nil, w.d.Namespace, w.d.JobID)
|
2017-06-26 21:23:52 +00:00
|
|
|
if err != nil {
|
2017-06-28 04:36:16 +00:00
|
|
|
return 0, err
|
2017-06-26 21:23:52 +00:00
|
|
|
}
|
|
|
|
|
2017-08-31 00:45:32 +00:00
|
|
|
if len(evals) == 0 {
|
|
|
|
idx, err := snap.Index("evals")
|
|
|
|
if err != nil {
|
|
|
|
w.setLatestEval(idx)
|
|
|
|
}
|
|
|
|
return idx, err
|
2017-06-28 04:36:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Prefer using the snapshot index. Otherwise use the create index
|
2017-08-31 00:45:32 +00:00
|
|
|
e := evals[0]
|
2017-06-28 04:36:16 +00:00
|
|
|
if e.SnapshotIndex != 0 {
|
2017-06-28 20:19:41 +00:00
|
|
|
w.setLatestEval(e.SnapshotIndex)
|
2017-06-28 04:36:16 +00:00
|
|
|
return e.SnapshotIndex, nil
|
2017-06-26 21:23:52 +00:00
|
|
|
}
|
|
|
|
|
2017-06-28 20:19:41 +00:00
|
|
|
w.setLatestEval(e.CreateIndex)
|
2017-06-28 04:36:16 +00:00
|
|
|
return e.CreateIndex, nil
|
2017-06-26 21:23:52 +00:00
|
|
|
}
|
2017-06-28 20:19:41 +00:00
|
|
|
|
2017-06-28 21:32:11 +00:00
|
|
|
// setLatestEval sets the given index as the latest eval unless the currently
|
|
|
|
// stored index is higher.
|
2017-06-28 20:19:41 +00:00
|
|
|
func (w *deploymentWatcher) setLatestEval(index uint64) {
|
|
|
|
w.l.Lock()
|
|
|
|
defer w.l.Unlock()
|
|
|
|
if index > w.latestEval {
|
|
|
|
w.latestEval = index
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-06-28 21:32:11 +00:00
|
|
|
// getLatestEval returns the latest eval index.
|
2017-06-28 20:19:41 +00:00
|
|
|
func (w *deploymentWatcher) getLatestEval() uint64 {
|
|
|
|
w.l.Lock()
|
|
|
|
defer w.l.Unlock()
|
|
|
|
return w.latestEval
|
|
|
|
}
|