2015-08-11 21:27:14 +00:00
|
|
|
package state
|
2015-06-01 15:49:10 +00:00
|
|
|
|
|
|
|
import (
|
2015-07-03 21:46:30 +00:00
|
|
|
"fmt"
|
2015-06-01 15:49:10 +00:00
|
|
|
"io"
|
|
|
|
"log"
|
2015-06-03 09:21:59 +00:00
|
|
|
|
2015-07-03 21:46:30 +00:00
|
|
|
"github.com/hashicorp/go-memdb"
|
2015-07-04 01:19:43 +00:00
|
|
|
"github.com/hashicorp/nomad/nomad/structs"
|
2015-06-01 15:49:10 +00:00
|
|
|
)
|
|
|
|
|
2015-10-29 01:11:55 +00:00
|
|
|
// IndexEntry is used with the "index" table
|
|
|
|
// for managing the latest Raft index affecting a table.
|
|
|
|
type IndexEntry struct {
|
|
|
|
Key string
|
|
|
|
Value uint64
|
|
|
|
}
|
|
|
|
|
2015-07-04 00:50:54 +00:00
|
|
|
// The StateStore is responsible for maintaining all the Nomad
|
2015-06-01 15:49:10 +00:00
|
|
|
// state. It is manipulated by the FSM which maintains consistency
|
|
|
|
// through the use of Raft. The goals of the StateStore are to provide
|
|
|
|
// high concurrency for read operations without blocking writes, and
|
2015-07-04 01:19:43 +00:00
|
|
|
// to provide write availability in the face of reads. EVERY object
|
|
|
|
// returned as a result of a read against the state store should be
|
|
|
|
// considered a constant and NEVER modified in place.
|
2015-06-01 15:49:10 +00:00
|
|
|
type StateStore struct {
|
2015-08-06 18:09:59 +00:00
|
|
|
logger *log.Logger
|
|
|
|
db *memdb.MemDB
|
2017-02-05 20:03:11 +00:00
|
|
|
|
|
|
|
// abandonCh is used to signal watchers that this state store has been
|
|
|
|
// abandoned (usually during a restore). This is only ever closed.
|
|
|
|
abandonCh chan struct{}
|
2015-06-01 15:49:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// NewStateStore is used to create a new state store
|
2015-08-06 18:09:59 +00:00
|
|
|
func NewStateStore(logOutput io.Writer) (*StateStore, error) {
|
2015-07-03 21:46:30 +00:00
|
|
|
// Create the MemDB
|
|
|
|
db, err := memdb.NewMemDB(stateStoreSchema())
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("state store setup failed: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create the state store
|
2015-06-01 15:49:10 +00:00
|
|
|
s := &StateStore{
|
2017-02-05 20:03:11 +00:00
|
|
|
logger: log.New(logOutput, "", log.LstdFlags),
|
|
|
|
db: db,
|
|
|
|
abandonCh: make(chan struct{}),
|
2015-06-01 15:49:10 +00:00
|
|
|
}
|
|
|
|
return s, nil
|
|
|
|
}
|
|
|
|
|
2015-06-03 09:21:59 +00:00
|
|
|
// Snapshot is used to create a point in time snapshot. Because
|
2015-07-03 21:46:30 +00:00
|
|
|
// we use MemDB, we just need to snapshot the state of the underlying
|
|
|
|
// database.
|
2015-06-01 15:49:10 +00:00
|
|
|
func (s *StateStore) Snapshot() (*StateSnapshot, error) {
|
2015-06-03 09:21:59 +00:00
|
|
|
snap := &StateSnapshot{
|
2015-06-03 09:26:49 +00:00
|
|
|
StateStore: StateStore{
|
2015-08-06 18:09:59 +00:00
|
|
|
logger: s.logger,
|
|
|
|
db: s.db.Snapshot(),
|
2015-06-03 09:21:59 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
return snap, nil
|
2015-06-01 15:49:10 +00:00
|
|
|
}
|
2015-07-04 01:19:43 +00:00
|
|
|
|
2015-07-04 17:16:52 +00:00
|
|
|
// Restore is used to optimize the efficiency of rebuilding
|
|
|
|
// state by minimizing the number of transactions and checking
|
|
|
|
// overhead.
|
|
|
|
func (s *StateStore) Restore() (*StateRestore, error) {
|
|
|
|
txn := s.db.Txn(true)
|
2015-08-23 01:57:15 +00:00
|
|
|
r := &StateRestore{
|
2017-02-05 20:45:57 +00:00
|
|
|
txn: txn,
|
2015-08-23 01:57:15 +00:00
|
|
|
}
|
|
|
|
return r, nil
|
|
|
|
}
|
|
|
|
|
2017-02-05 20:03:11 +00:00
|
|
|
// AbandonCh returns a channel you can wait on to know if the state store was
|
|
|
|
// abandoned.
|
|
|
|
func (s *StateStore) AbandonCh() <-chan struct{} {
|
|
|
|
return s.abandonCh
|
|
|
|
}
|
|
|
|
|
|
|
|
// Abandon is used to signal that the given state store has been abandoned.
|
|
|
|
// Calling this more than one time will panic.
|
|
|
|
func (s *StateStore) Abandon() {
|
|
|
|
close(s.abandonCh)
|
2015-10-27 21:36:32 +00:00
|
|
|
}
|
|
|
|
|
2017-05-05 20:52:01 +00:00
|
|
|
// UpsertPlanResults is used to upsert the results of a plan.
|
|
|
|
func (s *StateStore) UpsertPlanResults(index uint64, results *structs.ApplyPlanResultsRequest) error {
|
|
|
|
txn := s.db.Txn(true)
|
|
|
|
defer txn.Abort()
|
|
|
|
|
|
|
|
// Upsert the newly created deployment
|
|
|
|
if results.CreatedDeployment != nil {
|
|
|
|
if err := s.upsertDeploymentImpl(index, results.CreatedDeployment, true, txn); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-11 19:49:04 +00:00
|
|
|
// Update the status of deployments effected by the plan.
|
|
|
|
if len(results.DeploymentUpdates) != 0 {
|
|
|
|
s.upsertDeploymentUpdates(index, results.DeploymentUpdates, txn)
|
|
|
|
}
|
|
|
|
|
2017-05-05 20:52:01 +00:00
|
|
|
// Attach the job to all the allocations. It is pulled out in the payload to
|
|
|
|
// avoid the redundancy of encoding, but should be denormalized prior to
|
|
|
|
// being inserted into MemDB.
|
|
|
|
structs.DenormalizeAllocationJobs(results.Job, results.Alloc)
|
|
|
|
|
|
|
|
// Calculate the total resources of allocations. It is pulled out in the
|
|
|
|
// payload to avoid encoding something that can be computed, but should be
|
|
|
|
// denormalized prior to being inserted into MemDB.
|
|
|
|
for _, alloc := range results.Alloc {
|
|
|
|
if alloc.Resources != nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
alloc.Resources = new(structs.Resources)
|
|
|
|
for _, task := range alloc.TaskResources {
|
|
|
|
alloc.Resources.Add(task)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add the shared resources
|
|
|
|
alloc.Resources.Add(alloc.SharedResources)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Upsert the allocations
|
|
|
|
if err := s.upsertAllocsImpl(index, results.Alloc, txn); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
txn.Commit()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-05-11 19:49:04 +00:00
|
|
|
// upsertDeploymentUpdates updates the deployments given the passed status
|
|
|
|
// updates.
|
|
|
|
func (s *StateStore) upsertDeploymentUpdates(index uint64, updates []*structs.DeploymentStatusUpdate, txn *memdb.Txn) error {
|
|
|
|
for _, d := range updates {
|
|
|
|
raw, err := txn.First("deployment", "id", d.DeploymentID)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if raw == nil {
|
|
|
|
return fmt.Errorf("Deployment ID %q couldn't be updated as it does not exist", d.DeploymentID)
|
|
|
|
}
|
|
|
|
|
|
|
|
copy := raw.(*structs.Deployment).Copy()
|
|
|
|
|
|
|
|
// Apply the new status
|
|
|
|
copy.Status = d.Status
|
|
|
|
copy.StatusDescription = d.StatusDescription
|
|
|
|
copy.ModifyIndex = index
|
|
|
|
|
|
|
|
// Insert the deployment
|
|
|
|
if err := txn.Insert("deployment", copy); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := txn.Insert("index", &IndexEntry{"deployment", index}); err != nil {
|
|
|
|
return fmt.Errorf("index update failed: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-08-03 23:08:30 +00:00
|
|
|
// UpsertJobSummary upserts a job summary into the state store.
|
2016-07-20 21:09:03 +00:00
|
|
|
func (s *StateStore) UpsertJobSummary(index uint64, jobSummary *structs.JobSummary) error {
|
|
|
|
txn := s.db.Txn(true)
|
|
|
|
defer txn.Abort()
|
|
|
|
|
2017-05-05 20:52:01 +00:00
|
|
|
// Check if the job summary already exists
|
2017-05-10 22:26:00 +00:00
|
|
|
existing, err := txn.First("job_summary", "id", jobSummary.JobID)
|
2017-05-05 20:52:01 +00:00
|
|
|
if err != nil {
|
2017-05-10 22:26:00 +00:00
|
|
|
return fmt.Errorf("job summary lookup failed: %v", err)
|
2017-05-05 20:52:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Setup the indexes correctly
|
|
|
|
if existing != nil {
|
|
|
|
jobSummary.CreateIndex = existing.(*structs.JobSummary).CreateIndex
|
|
|
|
jobSummary.ModifyIndex = index
|
|
|
|
} else {
|
|
|
|
jobSummary.CreateIndex = index
|
|
|
|
jobSummary.ModifyIndex = index
|
|
|
|
}
|
|
|
|
|
2016-08-03 23:08:30 +00:00
|
|
|
// Update the index
|
2017-01-06 18:34:55 +00:00
|
|
|
if err := txn.Insert("job_summary", jobSummary); err != nil {
|
2016-07-20 21:09:03 +00:00
|
|
|
return err
|
|
|
|
}
|
2016-08-03 23:08:30 +00:00
|
|
|
|
|
|
|
// Update the indexes table for job summary
|
|
|
|
if err := txn.Insert("index", &IndexEntry{"job_summary", index}); err != nil {
|
|
|
|
return fmt.Errorf("index update failed: %v", err)
|
|
|
|
}
|
|
|
|
|
2016-07-20 21:09:03 +00:00
|
|
|
txn.Commit()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-07-26 00:26:10 +00:00
|
|
|
// DeleteJobSummary deletes the job summary with the given ID. This is for
|
|
|
|
// testing purposes only.
|
|
|
|
func (s *StateStore) DeleteJobSummary(index uint64, id string) error {
|
|
|
|
txn := s.db.Txn(true)
|
|
|
|
defer txn.Abort()
|
|
|
|
|
|
|
|
// Delete the job summary
|
|
|
|
if _, err := txn.DeleteAll("job_summary", "id", id); err != nil {
|
|
|
|
return fmt.Errorf("deleting job summary failed: %v", err)
|
|
|
|
}
|
2016-08-03 23:08:30 +00:00
|
|
|
if err := txn.Insert("index", &IndexEntry{"job_summary", index}); err != nil {
|
|
|
|
return fmt.Errorf("index update failed: %v", err)
|
|
|
|
}
|
2016-07-26 00:26:10 +00:00
|
|
|
txn.Commit()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-05-10 22:26:00 +00:00
|
|
|
// UpsertDeployment is used to insert a new deployment. If cancelPrior is set to
|
|
|
|
// true, all prior deployments for the same job will be cancelled.
|
2017-04-24 21:49:23 +00:00
|
|
|
func (s *StateStore) UpsertDeployment(index uint64, deployment *structs.Deployment, cancelPrior bool) error {
|
|
|
|
txn := s.db.Txn(true)
|
|
|
|
defer txn.Abort()
|
2017-05-05 20:52:01 +00:00
|
|
|
if err := s.upsertDeploymentImpl(index, deployment, cancelPrior, txn); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
txn.Commit()
|
|
|
|
return nil
|
|
|
|
}
|
2017-04-24 21:49:23 +00:00
|
|
|
|
2017-05-05 20:52:01 +00:00
|
|
|
func (s *StateStore) upsertDeploymentImpl(index uint64, deployment *structs.Deployment, cancelPrior bool, txn *memdb.Txn) error {
|
2017-04-24 21:49:23 +00:00
|
|
|
// Go through and cancel any active deployment for the job.
|
|
|
|
if cancelPrior {
|
2017-05-10 22:26:00 +00:00
|
|
|
s.cancelPriorDeployments(index, deployment, txn)
|
2017-04-24 21:49:23 +00:00
|
|
|
}
|
|
|
|
|
2017-05-05 20:52:01 +00:00
|
|
|
// Check if the deployment already exists
|
|
|
|
existing, err := txn.First("deployment", "id", deployment.ID)
|
|
|
|
if err != nil {
|
2017-05-10 22:26:00 +00:00
|
|
|
return fmt.Errorf("deployment lookup failed: %v", err)
|
2017-05-05 20:52:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Setup the indexes correctly
|
|
|
|
if existing != nil {
|
|
|
|
deployment.CreateIndex = existing.(*structs.Deployment).CreateIndex
|
|
|
|
deployment.ModifyIndex = index
|
|
|
|
} else {
|
|
|
|
deployment.CreateIndex = index
|
|
|
|
deployment.ModifyIndex = index
|
|
|
|
}
|
|
|
|
|
2017-04-24 21:49:23 +00:00
|
|
|
// Insert the deployment
|
|
|
|
if err := txn.Insert("deployment", deployment); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update the indexes table for deployment
|
|
|
|
if err := txn.Insert("index", &IndexEntry{"deployment", index}); err != nil {
|
|
|
|
return fmt.Errorf("index update failed: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-05-10 22:26:00 +00:00
|
|
|
// cancelPriorDeployments cancels any prior deployments for the job.
|
|
|
|
func (s *StateStore) cancelPriorDeployments(index uint64, deployment *structs.Deployment, txn *memdb.Txn) error {
|
|
|
|
iter, err := txn.Get("deployment", "job", deployment.JobID)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("deployment lookup failed: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
for {
|
|
|
|
raw := iter.Next()
|
|
|
|
if raw == nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the deployment is active
|
|
|
|
d := raw.(*structs.Deployment)
|
|
|
|
if !d.Active() {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// We need to cancel so make a copy and set its status
|
|
|
|
cancelled := d.Copy()
|
|
|
|
cancelled.ModifyIndex = index
|
|
|
|
cancelled.Status = structs.DeploymentStatusCancelled
|
|
|
|
cancelled.StatusDescription = fmt.Sprintf("Cancelled in favor of deployment %q", deployment.ID)
|
|
|
|
|
|
|
|
// Insert the cancelled deployment
|
|
|
|
if err := txn.Insert("deployment", cancelled); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-04-24 21:49:23 +00:00
|
|
|
func (s *StateStore) Deployments(ws memdb.WatchSet) (memdb.ResultIterator, error) {
|
|
|
|
txn := s.db.Txn(false)
|
|
|
|
|
|
|
|
// Walk the entire deployments table
|
|
|
|
iter, err := txn.Get("deployment", "id")
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
ws.Add(iter.WatchCh())
|
|
|
|
return iter, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *StateStore) DeploymentByID(ws memdb.WatchSet, deploymentID string) (*structs.Deployment, error) {
|
|
|
|
txn := s.db.Txn(false)
|
2017-05-05 20:52:01 +00:00
|
|
|
return s.deploymentByIDImpl(ws, deploymentID, txn)
|
|
|
|
}
|
2017-04-24 21:49:23 +00:00
|
|
|
|
2017-05-05 20:52:01 +00:00
|
|
|
func (s *StateStore) deploymentByIDImpl(ws memdb.WatchSet, deploymentID string, txn *memdb.Txn) (*structs.Deployment, error) {
|
2017-04-24 21:49:23 +00:00
|
|
|
watchCh, existing, err := txn.FirstWatch("deployment", "id", deploymentID)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("node lookup failed: %v", err)
|
|
|
|
}
|
|
|
|
ws.Add(watchCh)
|
|
|
|
|
|
|
|
if existing != nil {
|
|
|
|
return existing.(*structs.Deployment), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
2017-05-05 20:52:01 +00:00
|
|
|
func (s *StateStore) DeploymentsByJobID(ws memdb.WatchSet, jobID string) ([]*structs.Deployment, error) {
|
2017-04-24 21:49:23 +00:00
|
|
|
txn := s.db.Txn(false)
|
|
|
|
|
|
|
|
// Get an iterator over the deployments
|
2017-05-10 22:26:00 +00:00
|
|
|
iter, err := txn.Get("deployment", "job", jobID)
|
2017-04-24 21:49:23 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
ws.Add(iter.WatchCh())
|
|
|
|
|
|
|
|
var out []*structs.Deployment
|
|
|
|
for {
|
|
|
|
raw := iter.Next()
|
|
|
|
if raw == nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
d := raw.(*structs.Deployment)
|
|
|
|
out = append(out, d)
|
|
|
|
}
|
|
|
|
|
|
|
|
return out, nil
|
|
|
|
}
|
|
|
|
|
2017-05-10 22:26:00 +00:00
|
|
|
// LatestDeploymentByJobID returns the latest deployment for the given job. The
|
|
|
|
// latest is determined strictly by CreateIndex.
|
2017-05-05 20:52:01 +00:00
|
|
|
func (s *StateStore) LatestDeploymentByJobID(ws memdb.WatchSet, jobID string) (*structs.Deployment, error) {
|
|
|
|
txn := s.db.Txn(false)
|
|
|
|
|
|
|
|
// Get an iterator over the deployments
|
2017-05-10 22:26:00 +00:00
|
|
|
iter, err := txn.Get("deployment", "job", jobID)
|
2017-05-05 20:52:01 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
ws.Add(iter.WatchCh())
|
|
|
|
|
|
|
|
var out *structs.Deployment
|
|
|
|
for {
|
|
|
|
raw := iter.Next()
|
|
|
|
if raw == nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
d := raw.(*structs.Deployment)
|
|
|
|
if out == nil || out.CreateIndex < d.CreateIndex {
|
|
|
|
out = d
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return out, nil
|
|
|
|
}
|
|
|
|
|
2017-04-24 21:49:23 +00:00
|
|
|
// DeleteDeployment is used to delete a deployment by ID
|
|
|
|
func (s *StateStore) DeleteDeployment(index uint64, deploymentID string) error {
|
|
|
|
txn := s.db.Txn(true)
|
|
|
|
defer txn.Abort()
|
|
|
|
|
|
|
|
// Lookup the deployment
|
|
|
|
existing, err := txn.First("deployment", "id", deploymentID)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("deployment lookup failed: %v", err)
|
|
|
|
}
|
|
|
|
if existing == nil {
|
|
|
|
return fmt.Errorf("deployment not found")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Delete the deployment
|
|
|
|
if err := txn.Delete("deployment", existing); err != nil {
|
|
|
|
return fmt.Errorf("deployment delete failed: %v", err)
|
|
|
|
}
|
|
|
|
if err := txn.Insert("index", &IndexEntry{"deployment", index}); err != nil {
|
|
|
|
return fmt.Errorf("index update failed: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
txn.Commit()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-09-07 03:39:06 +00:00
|
|
|
// UpsertNode is used to register a node or update a node definition
|
2015-09-07 02:51:50 +00:00
|
|
|
// This is assumed to be triggered by the client, so we retain the value
|
|
|
|
// of drain which is set by the scheduler.
|
2015-09-07 03:39:06 +00:00
|
|
|
func (s *StateStore) UpsertNode(index uint64, node *structs.Node) error {
|
2015-07-04 01:19:43 +00:00
|
|
|
txn := s.db.Txn(true)
|
|
|
|
defer txn.Abort()
|
|
|
|
|
|
|
|
// Check if the node already exists
|
|
|
|
existing, err := txn.First("nodes", "id", node.ID)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("node lookup failed: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Setup the indexes correctly
|
|
|
|
if existing != nil {
|
2015-09-07 02:51:50 +00:00
|
|
|
exist := existing.(*structs.Node)
|
|
|
|
node.CreateIndex = exist.CreateIndex
|
2015-07-04 01:19:43 +00:00
|
|
|
node.ModifyIndex = index
|
2015-09-07 02:51:50 +00:00
|
|
|
node.Drain = exist.Drain // Retain the drain mode
|
2015-07-04 01:19:43 +00:00
|
|
|
} else {
|
|
|
|
node.CreateIndex = index
|
|
|
|
node.ModifyIndex = index
|
|
|
|
}
|
|
|
|
|
|
|
|
// Insert the node
|
|
|
|
if err := txn.Insert("nodes", node); err != nil {
|
|
|
|
return fmt.Errorf("node insert failed: %v", err)
|
|
|
|
}
|
2015-07-06 21:30:43 +00:00
|
|
|
if err := txn.Insert("index", &IndexEntry{"nodes", index}); err != nil {
|
|
|
|
return fmt.Errorf("index update failed: %v", err)
|
|
|
|
}
|
2015-07-04 01:19:43 +00:00
|
|
|
|
|
|
|
txn.Commit()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-09-07 03:39:06 +00:00
|
|
|
// DeleteNode is used to deregister a node
|
|
|
|
func (s *StateStore) DeleteNode(index uint64, nodeID string) error {
|
2015-07-04 01:19:43 +00:00
|
|
|
txn := s.db.Txn(true)
|
|
|
|
defer txn.Abort()
|
|
|
|
|
|
|
|
// Lookup the node
|
|
|
|
existing, err := txn.First("nodes", "id", nodeID)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("node lookup failed: %v", err)
|
|
|
|
}
|
|
|
|
if existing == nil {
|
|
|
|
return fmt.Errorf("node not found")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Delete the node
|
|
|
|
if err := txn.Delete("nodes", existing); err != nil {
|
|
|
|
return fmt.Errorf("node delete failed: %v", err)
|
|
|
|
}
|
2015-07-06 21:30:43 +00:00
|
|
|
if err := txn.Insert("index", &IndexEntry{"nodes", index}); err != nil {
|
|
|
|
return fmt.Errorf("index update failed: %v", err)
|
|
|
|
}
|
2015-07-04 01:19:43 +00:00
|
|
|
|
|
|
|
txn.Commit()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// UpdateNodeStatus is used to update the status of a node
|
2015-09-07 02:51:50 +00:00
|
|
|
func (s *StateStore) UpdateNodeStatus(index uint64, nodeID, status string) error {
|
2015-07-04 01:19:43 +00:00
|
|
|
txn := s.db.Txn(true)
|
|
|
|
defer txn.Abort()
|
|
|
|
|
|
|
|
// Lookup the node
|
|
|
|
existing, err := txn.First("nodes", "id", nodeID)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("node lookup failed: %v", err)
|
|
|
|
}
|
|
|
|
if existing == nil {
|
|
|
|
return fmt.Errorf("node not found")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Copy the existing node
|
|
|
|
existingNode := existing.(*structs.Node)
|
|
|
|
copyNode := new(structs.Node)
|
|
|
|
*copyNode = *existingNode
|
|
|
|
|
|
|
|
// Update the status in the copy
|
|
|
|
copyNode.Status = status
|
|
|
|
copyNode.ModifyIndex = index
|
|
|
|
|
|
|
|
// Insert the node
|
|
|
|
if err := txn.Insert("nodes", copyNode); err != nil {
|
|
|
|
return fmt.Errorf("node update failed: %v", err)
|
|
|
|
}
|
2015-07-06 21:30:43 +00:00
|
|
|
if err := txn.Insert("index", &IndexEntry{"nodes", index}); err != nil {
|
|
|
|
return fmt.Errorf("index update failed: %v", err)
|
|
|
|
}
|
2015-07-04 01:19:43 +00:00
|
|
|
|
|
|
|
txn.Commit()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-09-07 02:51:50 +00:00
|
|
|
// UpdateNodeDrain is used to update the drain of a node
|
|
|
|
func (s *StateStore) UpdateNodeDrain(index uint64, nodeID string, drain bool) error {
|
|
|
|
txn := s.db.Txn(true)
|
|
|
|
defer txn.Abort()
|
|
|
|
|
|
|
|
// Lookup the node
|
|
|
|
existing, err := txn.First("nodes", "id", nodeID)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("node lookup failed: %v", err)
|
|
|
|
}
|
|
|
|
if existing == nil {
|
|
|
|
return fmt.Errorf("node not found")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Copy the existing node
|
|
|
|
existingNode := existing.(*structs.Node)
|
|
|
|
copyNode := new(structs.Node)
|
|
|
|
*copyNode = *existingNode
|
|
|
|
|
|
|
|
// Update the drain in the copy
|
|
|
|
copyNode.Drain = drain
|
|
|
|
copyNode.ModifyIndex = index
|
|
|
|
|
|
|
|
// Insert the node
|
|
|
|
if err := txn.Insert("nodes", copyNode); err != nil {
|
|
|
|
return fmt.Errorf("node update failed: %v", err)
|
|
|
|
}
|
|
|
|
if err := txn.Insert("index", &IndexEntry{"nodes", index}); err != nil {
|
|
|
|
return fmt.Errorf("index update failed: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
txn.Commit()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-09-07 03:51:01 +00:00
|
|
|
// NodeByID is used to lookup a node by ID
|
2017-02-05 20:45:57 +00:00
|
|
|
func (s *StateStore) NodeByID(ws memdb.WatchSet, nodeID string) (*structs.Node, error) {
|
2015-07-04 01:19:43 +00:00
|
|
|
txn := s.db.Txn(false)
|
|
|
|
|
2017-02-05 20:45:57 +00:00
|
|
|
watchCh, existing, err := txn.FirstWatch("nodes", "id", nodeID)
|
2015-07-04 01:19:43 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("node lookup failed: %v", err)
|
|
|
|
}
|
2017-02-05 20:45:57 +00:00
|
|
|
ws.Add(watchCh)
|
2015-07-04 01:19:43 +00:00
|
|
|
|
|
|
|
if existing != nil {
|
2015-12-19 20:05:17 +00:00
|
|
|
return existing.(*structs.Node), nil
|
2015-07-04 01:19:43 +00:00
|
|
|
}
|
|
|
|
return nil, nil
|
|
|
|
}
|
2015-07-04 17:16:52 +00:00
|
|
|
|
2015-12-22 22:44:33 +00:00
|
|
|
// NodesByIDPrefix is used to lookup nodes by prefix
|
2017-02-05 20:45:57 +00:00
|
|
|
func (s *StateStore) NodesByIDPrefix(ws memdb.WatchSet, nodeID string) (memdb.ResultIterator, error) {
|
2015-12-19 20:05:17 +00:00
|
|
|
txn := s.db.Txn(false)
|
|
|
|
|
|
|
|
iter, err := txn.Get("nodes", "id_prefix", nodeID)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("node lookup failed: %v", err)
|
|
|
|
}
|
2017-02-05 20:45:57 +00:00
|
|
|
ws.Add(iter.WatchCh())
|
2015-12-19 20:05:17 +00:00
|
|
|
|
|
|
|
return iter, nil
|
|
|
|
}
|
|
|
|
|
2015-07-04 17:16:52 +00:00
|
|
|
// Nodes returns an iterator over all the nodes
|
2017-02-05 20:45:57 +00:00
|
|
|
func (s *StateStore) Nodes(ws memdb.WatchSet) (memdb.ResultIterator, error) {
|
2015-07-04 17:16:52 +00:00
|
|
|
txn := s.db.Txn(false)
|
|
|
|
|
|
|
|
// Walk the entire nodes table
|
|
|
|
iter, err := txn.Get("nodes", "id")
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-02-05 20:45:57 +00:00
|
|
|
ws.Add(iter.WatchCh())
|
2015-07-04 17:16:52 +00:00
|
|
|
return iter, nil
|
|
|
|
}
|
|
|
|
|
2015-09-07 03:39:06 +00:00
|
|
|
// UpsertJob is used to register a job or update a job definition
|
|
|
|
func (s *StateStore) UpsertJob(index uint64, job *structs.Job) error {
|
2015-07-07 16:41:05 +00:00
|
|
|
txn := s.db.Txn(true)
|
|
|
|
defer txn.Abort()
|
|
|
|
|
|
|
|
// Check if the job already exists
|
2015-07-23 22:15:48 +00:00
|
|
|
existing, err := txn.First("jobs", "id", job.ID)
|
2015-07-07 16:41:05 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("job lookup failed: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Setup the indexes correctly
|
|
|
|
if existing != nil {
|
|
|
|
job.CreateIndex = existing.(*structs.Job).CreateIndex
|
|
|
|
job.ModifyIndex = index
|
2016-01-12 17:50:33 +00:00
|
|
|
job.JobModifyIndex = index
|
2017-04-13 20:54:57 +00:00
|
|
|
job.Version = existing.(*structs.Job).Version + 1
|
2016-01-12 01:34:25 +00:00
|
|
|
|
|
|
|
// Compute the job status
|
|
|
|
var err error
|
|
|
|
job.Status, err = s.getJobStatus(txn, job, false)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("setting job status for %q failed: %v", job.ID, err)
|
|
|
|
}
|
2015-07-07 16:41:05 +00:00
|
|
|
} else {
|
|
|
|
job.CreateIndex = index
|
|
|
|
job.ModifyIndex = index
|
2016-01-12 17:50:33 +00:00
|
|
|
job.JobModifyIndex = index
|
2017-04-13 20:54:57 +00:00
|
|
|
job.Version = 0
|
2016-01-09 02:22:59 +00:00
|
|
|
|
2017-02-05 20:45:57 +00:00
|
|
|
if err := s.setJobStatus(index, txn, job, false, ""); err != nil {
|
2016-12-07 00:58:44 +00:00
|
|
|
return fmt.Errorf("setting job status for %q failed: %v", job.ID, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Have to get the job again since it could have been updated
|
|
|
|
updated, err := txn.First("jobs", "id", job.ID)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("job lookup failed: %v", err)
|
|
|
|
}
|
|
|
|
if updated != nil {
|
|
|
|
job = updated.(*structs.Job)
|
2016-01-09 02:22:59 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-02-05 20:45:57 +00:00
|
|
|
if err := s.updateSummaryWithJob(index, job, txn); err != nil {
|
2016-07-25 21:11:32 +00:00
|
|
|
return fmt.Errorf("unable to create job summary: %v", err)
|
|
|
|
}
|
2016-07-20 21:09:03 +00:00
|
|
|
|
2017-04-13 21:54:22 +00:00
|
|
|
if err := s.upsertJobVersion(index, job, txn); err != nil {
|
2017-04-24 21:49:23 +00:00
|
|
|
return fmt.Errorf("unable to upsert job into job_version table: %v", err)
|
2017-04-13 20:54:57 +00:00
|
|
|
}
|
|
|
|
|
2016-09-14 22:43:42 +00:00
|
|
|
// Create the EphemeralDisk if it's nil by adding up DiskMB from task resources.
|
2016-09-02 00:41:50 +00:00
|
|
|
// COMPAT 0.4.1 -> 0.5
|
2016-09-14 22:43:42 +00:00
|
|
|
s.addEphemeralDiskToTaskGroups(job)
|
2016-08-25 18:00:20 +00:00
|
|
|
|
2015-07-07 16:41:05 +00:00
|
|
|
// Insert the job
|
|
|
|
if err := txn.Insert("jobs", job); err != nil {
|
|
|
|
return fmt.Errorf("job insert failed: %v", err)
|
|
|
|
}
|
|
|
|
if err := txn.Insert("index", &IndexEntry{"jobs", index}); err != nil {
|
|
|
|
return fmt.Errorf("index update failed: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
txn.Commit()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-09-07 03:39:06 +00:00
|
|
|
// DeleteJob is used to deregister a job
|
|
|
|
func (s *StateStore) DeleteJob(index uint64, jobID string) error {
|
2015-07-07 16:41:05 +00:00
|
|
|
txn := s.db.Txn(true)
|
|
|
|
defer txn.Abort()
|
|
|
|
|
|
|
|
// Lookup the node
|
2015-07-23 22:15:48 +00:00
|
|
|
existing, err := txn.First("jobs", "id", jobID)
|
2015-07-07 16:41:05 +00:00
|
|
|
if err != nil {
|
2015-07-23 22:15:48 +00:00
|
|
|
return fmt.Errorf("job lookup failed: %v", err)
|
2015-07-07 16:41:05 +00:00
|
|
|
}
|
|
|
|
if existing == nil {
|
|
|
|
return fmt.Errorf("job not found")
|
|
|
|
}
|
|
|
|
|
2016-12-07 04:15:10 +00:00
|
|
|
// Check if we should update a parent job summary
|
|
|
|
job := existing.(*structs.Job)
|
|
|
|
if job.ParentID != "" {
|
|
|
|
summaryRaw, err := txn.First("job_summary", "id", job.ParentID)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("unable to retrieve summary for parent job: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Only continue if the summary exists. It could not exist if the parent
|
|
|
|
// job was removed
|
|
|
|
if summaryRaw != nil {
|
2017-01-06 18:34:55 +00:00
|
|
|
existing := summaryRaw.(*structs.JobSummary)
|
2016-12-07 04:15:10 +00:00
|
|
|
pSummary := existing.Copy()
|
|
|
|
if pSummary.Children != nil {
|
|
|
|
|
2017-01-11 21:18:36 +00:00
|
|
|
modified := false
|
2016-12-07 04:15:10 +00:00
|
|
|
switch job.Status {
|
|
|
|
case structs.JobStatusPending:
|
|
|
|
pSummary.Children.Pending--
|
|
|
|
pSummary.Children.Dead++
|
2017-01-11 21:18:36 +00:00
|
|
|
modified = true
|
2016-12-07 04:15:10 +00:00
|
|
|
case structs.JobStatusRunning:
|
|
|
|
pSummary.Children.Running--
|
|
|
|
pSummary.Children.Dead++
|
2017-01-11 21:18:36 +00:00
|
|
|
modified = true
|
2016-12-07 04:15:10 +00:00
|
|
|
case structs.JobStatusDead:
|
|
|
|
default:
|
|
|
|
return fmt.Errorf("unknown old job status %q", job.Status)
|
|
|
|
}
|
|
|
|
|
2017-01-11 21:18:36 +00:00
|
|
|
if modified {
|
|
|
|
// Update the modify index
|
|
|
|
pSummary.ModifyIndex = index
|
2016-12-07 04:15:10 +00:00
|
|
|
|
2017-01-11 21:18:36 +00:00
|
|
|
// Insert the summary
|
|
|
|
if err := txn.Insert("job_summary", pSummary); err != nil {
|
|
|
|
return fmt.Errorf("job summary insert failed: %v", err)
|
|
|
|
}
|
|
|
|
if err := txn.Insert("index", &IndexEntry{"job_summary", index}); err != nil {
|
|
|
|
return fmt.Errorf("index update failed: %v", err)
|
|
|
|
}
|
2016-12-07 04:15:10 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Delete the job
|
2015-07-07 16:41:05 +00:00
|
|
|
if err := txn.Delete("jobs", existing); err != nil {
|
|
|
|
return fmt.Errorf("job delete failed: %v", err)
|
|
|
|
}
|
|
|
|
if err := txn.Insert("index", &IndexEntry{"jobs", index}); err != nil {
|
|
|
|
return fmt.Errorf("index update failed: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-04-15 03:54:30 +00:00
|
|
|
// Delete the job versions
|
|
|
|
if err := s.deleteJobVersions(index, job, txn); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-07-01 07:17:35 +00:00
|
|
|
// Delete the job summary
|
2016-07-12 22:00:35 +00:00
|
|
|
if _, err = txn.DeleteAll("job_summary", "id", jobID); err != nil {
|
2016-07-03 03:04:02 +00:00
|
|
|
return fmt.Errorf("deleing job summary failed: %v", err)
|
2016-07-01 07:17:35 +00:00
|
|
|
}
|
2016-07-18 23:51:47 +00:00
|
|
|
if err := txn.Insert("index", &IndexEntry{"job_summary", index}); err != nil {
|
|
|
|
return fmt.Errorf("index update failed: %v", err)
|
|
|
|
}
|
2016-07-01 07:17:35 +00:00
|
|
|
|
2015-07-07 16:41:05 +00:00
|
|
|
txn.Commit()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-04-15 03:54:30 +00:00
|
|
|
// deleteJobVersions deletes all versions of the given job.
|
|
|
|
func (s *StateStore) deleteJobVersions(index uint64, job *structs.Job, txn *memdb.Txn) error {
|
2017-04-24 21:49:23 +00:00
|
|
|
iter, err := txn.Get("job_version", "id_prefix", job.ID)
|
2017-04-15 03:54:30 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
for {
|
|
|
|
raw := iter.Next()
|
|
|
|
if raw == nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the ID is an exact match
|
|
|
|
j := raw.(*structs.Job)
|
|
|
|
if j.ID != job.ID {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2017-04-24 21:49:23 +00:00
|
|
|
if _, err = txn.DeleteAll("job_version", "id", job.ID, job.Version); err != nil {
|
2017-04-19 17:54:03 +00:00
|
|
|
return fmt.Errorf("deleting job versions failed: %v", err)
|
2017-04-15 03:54:30 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := txn.Insert("index", &IndexEntry{"job_summary", index}); err != nil {
|
|
|
|
return fmt.Errorf("index update failed: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-04-13 21:54:22 +00:00
|
|
|
// upsertJobVersion inserts a job into its historic version table and limits the
|
|
|
|
// number of job versions that are tracked.
|
|
|
|
func (s *StateStore) upsertJobVersion(index uint64, job *structs.Job, txn *memdb.Txn) error {
|
2017-04-13 20:54:57 +00:00
|
|
|
// Insert the job
|
2017-04-24 21:49:23 +00:00
|
|
|
if err := txn.Insert("job_version", job); err != nil {
|
|
|
|
return fmt.Errorf("failed to insert job into job_version table: %v", err)
|
2017-04-13 20:54:57 +00:00
|
|
|
}
|
|
|
|
|
2017-04-24 21:49:23 +00:00
|
|
|
if err := txn.Insert("index", &IndexEntry{"job_version", index}); err != nil {
|
2017-04-13 20:54:57 +00:00
|
|
|
return fmt.Errorf("index update failed: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get all the historic jobs for this ID
|
2017-04-13 21:54:22 +00:00
|
|
|
all, err := s.jobVersionByID(txn, nil, job.ID)
|
2017-04-13 20:54:57 +00:00
|
|
|
if err != nil {
|
2017-04-13 21:54:22 +00:00
|
|
|
return fmt.Errorf("failed to look up job versions for %q: %v", job.ID, err)
|
2017-04-13 20:54:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// If we are below the limit there is no GCing to be done
|
2017-04-13 21:54:22 +00:00
|
|
|
if len(all) <= structs.JobTrackedVersions {
|
2017-04-13 20:54:57 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// We have to delete a historic job to make room.
|
|
|
|
// Find index of the highest versioned stable job
|
|
|
|
stableIdx := -1
|
|
|
|
for i, j := range all {
|
|
|
|
if j.Stable {
|
|
|
|
stableIdx = i
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the stable job is the oldest version, do a swap to bring it into the
|
|
|
|
// keep set.
|
2017-04-13 21:54:22 +00:00
|
|
|
max := structs.JobTrackedVersions
|
2017-04-13 20:54:57 +00:00
|
|
|
if stableIdx == max {
|
|
|
|
all[max-1], all[max] = all[max], all[max-1]
|
|
|
|
}
|
|
|
|
|
|
|
|
// Delete the job outside of the set that are being kept.
|
|
|
|
d := all[max]
|
2017-04-24 21:49:23 +00:00
|
|
|
if err := txn.Delete("job_version", d); err != nil {
|
|
|
|
return fmt.Errorf("failed to delete job %v (%d) from job_version", d.ID, d.Version)
|
2017-04-13 20:54:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-04-19 23:44:38 +00:00
|
|
|
// JobByID is used to lookup a job by its ID. JobByID returns the current/latest job
|
|
|
|
// version.
|
2017-02-05 20:45:57 +00:00
|
|
|
func (s *StateStore) JobByID(ws memdb.WatchSet, id string) (*structs.Job, error) {
|
2015-07-07 16:41:05 +00:00
|
|
|
txn := s.db.Txn(false)
|
|
|
|
|
2017-02-05 20:45:57 +00:00
|
|
|
watchCh, existing, err := txn.FirstWatch("jobs", "id", id)
|
2015-07-07 16:41:05 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("job lookup failed: %v", err)
|
|
|
|
}
|
2017-02-05 20:45:57 +00:00
|
|
|
ws.Add(watchCh)
|
2015-07-07 16:41:05 +00:00
|
|
|
|
|
|
|
if existing != nil {
|
2015-12-19 20:05:17 +00:00
|
|
|
return existing.(*structs.Job), nil
|
2015-07-07 16:41:05 +00:00
|
|
|
}
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
2015-12-22 22:44:33 +00:00
|
|
|
// JobsByIDPrefix is used to lookup a job by prefix
|
2017-02-05 20:45:57 +00:00
|
|
|
func (s *StateStore) JobsByIDPrefix(ws memdb.WatchSet, id string) (memdb.ResultIterator, error) {
|
2015-12-19 20:05:17 +00:00
|
|
|
txn := s.db.Txn(false)
|
|
|
|
|
|
|
|
iter, err := txn.Get("jobs", "id_prefix", id)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("job lookup failed: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-02-05 20:45:57 +00:00
|
|
|
ws.Add(iter.WatchCh())
|
|
|
|
|
2015-12-19 20:05:17 +00:00
|
|
|
return iter, nil
|
|
|
|
}
|
|
|
|
|
2017-04-13 21:54:22 +00:00
|
|
|
// JobVersionsByID returns all the tracked versions of a job.
|
|
|
|
func (s *StateStore) JobVersionsByID(ws memdb.WatchSet, id string) ([]*structs.Job, error) {
|
2017-04-13 20:54:57 +00:00
|
|
|
txn := s.db.Txn(false)
|
2017-04-13 21:54:22 +00:00
|
|
|
return s.jobVersionByID(txn, &ws, id)
|
2017-04-13 20:54:57 +00:00
|
|
|
}
|
|
|
|
|
2017-04-13 21:54:22 +00:00
|
|
|
// jobVersionByID is the underlying implementation for retrieving all tracked
|
2017-04-13 20:54:57 +00:00
|
|
|
// versions of a job and is called under an existing transaction. A watch set
|
|
|
|
// can optionally be passed in to add the job histories to the watch set.
|
2017-04-13 21:54:22 +00:00
|
|
|
func (s *StateStore) jobVersionByID(txn *memdb.Txn, ws *memdb.WatchSet, id string) ([]*structs.Job, error) {
|
2017-04-13 20:54:57 +00:00
|
|
|
// Get all the historic jobs for this ID
|
2017-04-24 21:49:23 +00:00
|
|
|
iter, err := txn.Get("job_version", "id_prefix", id)
|
2017-04-13 20:54:57 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if ws != nil {
|
|
|
|
ws.Add(iter.WatchCh())
|
|
|
|
}
|
|
|
|
|
|
|
|
var all []*structs.Job
|
|
|
|
for {
|
|
|
|
raw := iter.Next()
|
|
|
|
if raw == nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the ID is an exact match
|
|
|
|
j := raw.(*structs.Job)
|
|
|
|
if j.ID != id {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
all = append(all, j)
|
|
|
|
}
|
|
|
|
|
2017-04-19 17:54:03 +00:00
|
|
|
// Reverse so that highest versions first
|
|
|
|
for i, j := 0, len(all)-1; i < j; i, j = i+1, j-1 {
|
|
|
|
all[i], all[j] = all[j], all[i]
|
|
|
|
}
|
2017-04-13 20:54:57 +00:00
|
|
|
|
|
|
|
return all, nil
|
|
|
|
}
|
|
|
|
|
2017-04-18 22:11:33 +00:00
|
|
|
// JobByIDAndVersion returns the job identified by its ID and Version
|
|
|
|
func (s *StateStore) JobByIDAndVersion(ws memdb.WatchSet, id string, version uint64) (*structs.Job, error) {
|
|
|
|
txn := s.db.Txn(false)
|
2017-04-24 21:49:23 +00:00
|
|
|
watchCh, existing, err := txn.FirstWatch("job_version", "id", id, version)
|
2017-04-18 22:11:33 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
ws.Add(watchCh)
|
|
|
|
|
|
|
|
if existing != nil {
|
|
|
|
job := existing.(*structs.Job)
|
|
|
|
return job, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
2017-04-24 21:49:23 +00:00
|
|
|
func (s *StateStore) JobVersions(ws memdb.WatchSet) (memdb.ResultIterator, error) {
|
|
|
|
txn := s.db.Txn(false)
|
|
|
|
|
|
|
|
// Walk the entire deployments table
|
|
|
|
iter, err := txn.Get("job_version", "id")
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
ws.Add(iter.WatchCh())
|
|
|
|
return iter, nil
|
|
|
|
}
|
|
|
|
|
2015-07-07 16:41:05 +00:00
|
|
|
// Jobs returns an iterator over all the jobs
|
2017-02-05 20:45:57 +00:00
|
|
|
func (s *StateStore) Jobs(ws memdb.WatchSet) (memdb.ResultIterator, error) {
|
2015-07-07 16:41:05 +00:00
|
|
|
txn := s.db.Txn(false)
|
|
|
|
|
|
|
|
// Walk the entire jobs table
|
|
|
|
iter, err := txn.Get("jobs", "id")
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-02-05 20:45:57 +00:00
|
|
|
|
|
|
|
ws.Add(iter.WatchCh())
|
|
|
|
|
2015-07-07 16:41:05 +00:00
|
|
|
return iter, nil
|
|
|
|
}
|
|
|
|
|
2015-12-04 17:49:42 +00:00
|
|
|
// JobsByPeriodic returns an iterator over all the periodic or non-periodic jobs.
|
2017-02-05 20:45:57 +00:00
|
|
|
func (s *StateStore) JobsByPeriodic(ws memdb.WatchSet, periodic bool) (memdb.ResultIterator, error) {
|
2015-12-04 17:49:42 +00:00
|
|
|
txn := s.db.Txn(false)
|
|
|
|
|
|
|
|
iter, err := txn.Get("jobs", "periodic", periodic)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-02-05 20:45:57 +00:00
|
|
|
|
|
|
|
ws.Add(iter.WatchCh())
|
|
|
|
|
2015-12-04 17:49:42 +00:00
|
|
|
return iter, nil
|
|
|
|
}
|
|
|
|
|
2015-10-20 17:57:53 +00:00
|
|
|
// JobsByScheduler returns an iterator over all the jobs with the specific
|
|
|
|
// scheduler type.
|
2017-02-05 20:45:57 +00:00
|
|
|
func (s *StateStore) JobsByScheduler(ws memdb.WatchSet, schedulerType string) (memdb.ResultIterator, error) {
|
2015-10-20 17:57:53 +00:00
|
|
|
txn := s.db.Txn(false)
|
|
|
|
|
|
|
|
// Return an iterator for jobs with the specific type.
|
|
|
|
iter, err := txn.Get("jobs", "type", schedulerType)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-02-05 20:45:57 +00:00
|
|
|
|
|
|
|
ws.Add(iter.WatchCh())
|
|
|
|
|
2015-10-20 17:57:53 +00:00
|
|
|
return iter, nil
|
|
|
|
}
|
|
|
|
|
2015-12-15 03:20:57 +00:00
|
|
|
// JobsByGC returns an iterator over all jobs eligible or uneligible for garbage
|
|
|
|
// collection.
|
2017-02-05 20:45:57 +00:00
|
|
|
func (s *StateStore) JobsByGC(ws memdb.WatchSet, gc bool) (memdb.ResultIterator, error) {
|
2015-12-15 03:20:57 +00:00
|
|
|
txn := s.db.Txn(false)
|
|
|
|
|
|
|
|
iter, err := txn.Get("jobs", "gc", gc)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-02-05 20:45:57 +00:00
|
|
|
|
|
|
|
ws.Add(iter.WatchCh())
|
|
|
|
|
2015-12-15 03:20:57 +00:00
|
|
|
return iter, nil
|
|
|
|
}
|
|
|
|
|
2016-07-03 03:04:02 +00:00
|
|
|
// JobSummary returns a job summary object which matches a specific id.
|
2017-02-05 20:45:57 +00:00
|
|
|
func (s *StateStore) JobSummaryByID(ws memdb.WatchSet, jobID string) (*structs.JobSummary, error) {
|
2016-06-30 19:04:22 +00:00
|
|
|
txn := s.db.Txn(false)
|
|
|
|
|
2017-02-05 20:45:57 +00:00
|
|
|
watchCh, existing, err := txn.FirstWatch("job_summary", "id", jobID)
|
2016-06-30 19:04:22 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-02-05 20:45:57 +00:00
|
|
|
|
|
|
|
ws.Add(watchCh)
|
|
|
|
|
2016-06-30 19:04:22 +00:00
|
|
|
if existing != nil {
|
2017-01-06 18:34:55 +00:00
|
|
|
summary := existing.(*structs.JobSummary)
|
|
|
|
return summary, nil
|
2016-06-30 19:04:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
2016-07-13 19:25:07 +00:00
|
|
|
// JobSummaries walks the entire job summary table and returns all the job
|
2016-07-05 18:50:44 +00:00
|
|
|
// summary objects
|
2017-02-05 20:45:57 +00:00
|
|
|
func (s *StateStore) JobSummaries(ws memdb.WatchSet) (memdb.ResultIterator, error) {
|
2016-07-05 18:50:44 +00:00
|
|
|
txn := s.db.Txn(false)
|
|
|
|
|
2016-07-12 22:00:35 +00:00
|
|
|
iter, err := txn.Get("job_summary", "id")
|
2016-07-05 18:50:44 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-02-05 20:45:57 +00:00
|
|
|
|
|
|
|
ws.Add(iter.WatchCh())
|
|
|
|
|
2016-07-05 18:50:44 +00:00
|
|
|
return iter, nil
|
|
|
|
}
|
|
|
|
|
2016-07-11 18:23:10 +00:00
|
|
|
// JobSummaryByPrefix is used to look up Job Summary by id prefix
|
2017-02-05 20:45:57 +00:00
|
|
|
func (s *StateStore) JobSummaryByPrefix(ws memdb.WatchSet, id string) (memdb.ResultIterator, error) {
|
2016-07-11 18:23:10 +00:00
|
|
|
txn := s.db.Txn(false)
|
|
|
|
|
2016-07-12 22:00:35 +00:00
|
|
|
iter, err := txn.Get("job_summary", "id_prefix", id)
|
2016-07-11 18:23:10 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("eval lookup failed: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-02-05 20:45:57 +00:00
|
|
|
ws.Add(iter.WatchCh())
|
|
|
|
|
2016-07-11 18:23:10 +00:00
|
|
|
return iter, nil
|
|
|
|
}
|
|
|
|
|
2015-12-19 01:51:30 +00:00
|
|
|
// UpsertPeriodicLaunch is used to register a launch or update it.
|
|
|
|
func (s *StateStore) UpsertPeriodicLaunch(index uint64, launch *structs.PeriodicLaunch) error {
|
|
|
|
txn := s.db.Txn(true)
|
|
|
|
defer txn.Abort()
|
|
|
|
|
|
|
|
// Check if the job already exists
|
2015-12-16 21:46:09 +00:00
|
|
|
existing, err := txn.First("periodic_launch", "id", launch.ID)
|
|
|
|
if err != nil {
|
2015-12-19 01:51:30 +00:00
|
|
|
return fmt.Errorf("periodic launch lookup failed: %v", err)
|
|
|
|
}
|
|
|
|
|
2015-12-16 21:46:09 +00:00
|
|
|
// Setup the indexes correctly
|
|
|
|
if existing != nil {
|
|
|
|
launch.CreateIndex = existing.(*structs.PeriodicLaunch).CreateIndex
|
|
|
|
launch.ModifyIndex = index
|
|
|
|
} else {
|
|
|
|
launch.CreateIndex = index
|
|
|
|
launch.ModifyIndex = index
|
|
|
|
}
|
|
|
|
|
2015-12-19 01:51:30 +00:00
|
|
|
// Insert the job
|
|
|
|
if err := txn.Insert("periodic_launch", launch); err != nil {
|
|
|
|
return fmt.Errorf("launch insert failed: %v", err)
|
|
|
|
}
|
|
|
|
if err := txn.Insert("index", &IndexEntry{"periodic_launch", index}); err != nil {
|
|
|
|
return fmt.Errorf("index update failed: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
txn.Commit()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// DeletePeriodicLaunch is used to delete the periodic launch
|
|
|
|
func (s *StateStore) DeletePeriodicLaunch(index uint64, jobID string) error {
|
|
|
|
txn := s.db.Txn(true)
|
|
|
|
defer txn.Abort()
|
|
|
|
|
|
|
|
// Lookup the launch
|
|
|
|
existing, err := txn.First("periodic_launch", "id", jobID)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("launch lookup failed: %v", err)
|
|
|
|
}
|
|
|
|
if existing == nil {
|
|
|
|
return fmt.Errorf("launch not found")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Delete the launch
|
|
|
|
if err := txn.Delete("periodic_launch", existing); err != nil {
|
|
|
|
return fmt.Errorf("launch delete failed: %v", err)
|
|
|
|
}
|
|
|
|
if err := txn.Insert("index", &IndexEntry{"periodic_launch", index}); err != nil {
|
|
|
|
return fmt.Errorf("index update failed: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
txn.Commit()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// PeriodicLaunchByID is used to lookup a periodic launch by the periodic job
|
|
|
|
// ID.
|
2017-02-05 20:45:57 +00:00
|
|
|
func (s *StateStore) PeriodicLaunchByID(ws memdb.WatchSet, id string) (*structs.PeriodicLaunch, error) {
|
2015-12-19 01:51:30 +00:00
|
|
|
txn := s.db.Txn(false)
|
|
|
|
|
2017-02-05 20:45:57 +00:00
|
|
|
watchCh, existing, err := txn.FirstWatch("periodic_launch", "id", id)
|
2015-12-19 01:51:30 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("periodic launch lookup failed: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-02-05 20:45:57 +00:00
|
|
|
ws.Add(watchCh)
|
|
|
|
|
2015-12-19 01:51:30 +00:00
|
|
|
if existing != nil {
|
|
|
|
return existing.(*structs.PeriodicLaunch), nil
|
|
|
|
}
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
2015-12-07 23:58:17 +00:00
|
|
|
// PeriodicLaunches returns an iterator over all the periodic launches
|
2017-02-05 20:45:57 +00:00
|
|
|
func (s *StateStore) PeriodicLaunches(ws memdb.WatchSet) (memdb.ResultIterator, error) {
|
2015-12-07 23:58:17 +00:00
|
|
|
txn := s.db.Txn(false)
|
|
|
|
|
|
|
|
// Walk the entire table
|
|
|
|
iter, err := txn.Get("periodic_launch", "id")
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-02-05 20:45:57 +00:00
|
|
|
|
|
|
|
ws.Add(iter.WatchCh())
|
|
|
|
|
2015-12-07 23:58:17 +00:00
|
|
|
return iter, nil
|
|
|
|
}
|
|
|
|
|
2017-01-04 23:25:03 +00:00
|
|
|
// UpsertEvals is used to upsert a set of evaluations
|
2015-08-06 21:51:15 +00:00
|
|
|
func (s *StateStore) UpsertEvals(index uint64, evals []*structs.Evaluation) error {
|
2015-07-23 22:43:06 +00:00
|
|
|
txn := s.db.Txn(true)
|
|
|
|
defer txn.Abort()
|
|
|
|
|
|
|
|
// Do a nested upsert
|
2016-01-09 02:22:59 +00:00
|
|
|
jobs := make(map[string]string, len(evals))
|
2015-08-06 21:51:15 +00:00
|
|
|
for _, eval := range evals {
|
2017-02-05 20:45:57 +00:00
|
|
|
if err := s.nestedUpsertEval(txn, index, eval); err != nil {
|
2015-08-06 21:51:15 +00:00
|
|
|
return err
|
|
|
|
}
|
2016-01-09 02:22:59 +00:00
|
|
|
|
|
|
|
jobs[eval.JobID] = ""
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set the job's status
|
2017-02-05 20:45:57 +00:00
|
|
|
if err := s.setJobStatuses(index, txn, jobs, false); err != nil {
|
2016-01-09 02:22:59 +00:00
|
|
|
return fmt.Errorf("setting job status failed: %v", err)
|
2015-07-23 22:43:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
txn.Commit()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// nestedUpsertEvaluation is used to nest an evaluation upsert within a transaction
|
2017-02-05 20:45:57 +00:00
|
|
|
func (s *StateStore) nestedUpsertEval(txn *memdb.Txn, index uint64, eval *structs.Evaluation) error {
|
2015-07-23 22:43:06 +00:00
|
|
|
// Lookup the evaluation
|
|
|
|
existing, err := txn.First("evals", "id", eval.ID)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("eval lookup failed: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update the indexes
|
|
|
|
if existing != nil {
|
|
|
|
eval.CreateIndex = existing.(*structs.Evaluation).CreateIndex
|
|
|
|
eval.ModifyIndex = index
|
|
|
|
} else {
|
|
|
|
eval.CreateIndex = index
|
|
|
|
eval.ModifyIndex = index
|
|
|
|
}
|
|
|
|
|
2016-07-18 22:04:05 +00:00
|
|
|
// Update the job summary
|
|
|
|
summaryRaw, err := txn.First("job_summary", "id", eval.JobID)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("job summary lookup failed: %v", err)
|
|
|
|
}
|
|
|
|
if summaryRaw != nil {
|
2017-01-06 18:34:55 +00:00
|
|
|
js := summaryRaw.(*structs.JobSummary).Copy()
|
|
|
|
hasSummaryChanged := false
|
2016-07-18 22:04:05 +00:00
|
|
|
for tg, num := range eval.QueuedAllocations {
|
2016-07-28 00:07:08 +00:00
|
|
|
if summary, ok := js.Summary[tg]; ok {
|
|
|
|
if summary.Queued != num {
|
|
|
|
summary.Queued = num
|
|
|
|
js.Summary[tg] = summary
|
|
|
|
hasSummaryChanged = true
|
|
|
|
}
|
2016-07-18 22:04:05 +00:00
|
|
|
} else {
|
|
|
|
s.logger.Printf("[ERR] state_store: unable to update queued for job %q and task group %q", eval.JobID, tg)
|
|
|
|
}
|
|
|
|
}
|
2016-07-19 18:18:05 +00:00
|
|
|
|
|
|
|
// Insert the job summary
|
|
|
|
if hasSummaryChanged {
|
|
|
|
js.ModifyIndex = index
|
|
|
|
if err := txn.Insert("job_summary", js); err != nil {
|
|
|
|
return fmt.Errorf("job summary insert failed: %v", err)
|
|
|
|
}
|
|
|
|
if err := txn.Insert("index", &IndexEntry{"job_summary", index}); err != nil {
|
|
|
|
return fmt.Errorf("index update failed: %v", err)
|
|
|
|
}
|
|
|
|
}
|
2016-07-18 22:04:05 +00:00
|
|
|
}
|
|
|
|
|
2017-01-04 23:25:03 +00:00
|
|
|
// Check if the job has any blocked evaluations and cancel them
|
|
|
|
if eval.Status == structs.EvalStatusComplete && len(eval.FailedTGAllocs) == 0 {
|
|
|
|
// Get the blocked evaluation for a job if it exists
|
|
|
|
iter, err := txn.Get("evals", "job", eval.JobID, structs.EvalStatusBlocked)
|
|
|
|
if err != nil {
|
2017-02-28 00:00:19 +00:00
|
|
|
return fmt.Errorf("failed to get blocked evals for job %q: %v", eval.JobID, err)
|
2017-01-04 23:25:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
var blocked []*structs.Evaluation
|
|
|
|
for {
|
|
|
|
raw := iter.Next()
|
|
|
|
if raw == nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
blocked = append(blocked, raw.(*structs.Evaluation))
|
|
|
|
}
|
|
|
|
|
|
|
|
// Go through and update the evals
|
|
|
|
for _, eval := range blocked {
|
|
|
|
newEval := eval.Copy()
|
|
|
|
newEval.Status = structs.EvalStatusCancelled
|
|
|
|
newEval.StatusDescription = fmt.Sprintf("evaluation %q successful", newEval.ID)
|
|
|
|
newEval.ModifyIndex = index
|
|
|
|
if err := txn.Insert("evals", newEval); err != nil {
|
|
|
|
return fmt.Errorf("eval insert failed: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-07-23 22:43:06 +00:00
|
|
|
// Insert the eval
|
|
|
|
if err := txn.Insert("evals", eval); err != nil {
|
|
|
|
return fmt.Errorf("eval insert failed: %v", err)
|
|
|
|
}
|
|
|
|
if err := txn.Insert("index", &IndexEntry{"evals", index}); err != nil {
|
|
|
|
return fmt.Errorf("index update failed: %v", err)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// DeleteEval is used to delete an evaluation
|
2015-08-15 22:39:29 +00:00
|
|
|
func (s *StateStore) DeleteEval(index uint64, evals []string, allocs []string) error {
|
2015-07-23 22:43:06 +00:00
|
|
|
txn := s.db.Txn(true)
|
|
|
|
defer txn.Abort()
|
|
|
|
|
2016-01-09 02:22:59 +00:00
|
|
|
jobs := make(map[string]string, len(evals))
|
2015-08-15 22:39:29 +00:00
|
|
|
for _, eval := range evals {
|
|
|
|
existing, err := txn.First("evals", "id", eval)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("eval lookup failed: %v", err)
|
|
|
|
}
|
|
|
|
if existing == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if err := txn.Delete("evals", existing); err != nil {
|
|
|
|
return fmt.Errorf("eval delete failed: %v", err)
|
|
|
|
}
|
2016-10-30 00:30:34 +00:00
|
|
|
jobID := existing.(*structs.Evaluation).JobID
|
|
|
|
jobs[jobID] = ""
|
2015-07-23 22:43:06 +00:00
|
|
|
}
|
|
|
|
|
2015-08-15 22:39:29 +00:00
|
|
|
for _, alloc := range allocs {
|
|
|
|
existing, err := txn.First("allocs", "id", alloc)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("alloc lookup failed: %v", err)
|
|
|
|
}
|
|
|
|
if existing == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if err := txn.Delete("allocs", existing); err != nil {
|
|
|
|
return fmt.Errorf("alloc delete failed: %v", err)
|
|
|
|
}
|
2015-07-23 22:43:06 +00:00
|
|
|
}
|
2015-08-15 22:39:29 +00:00
|
|
|
|
|
|
|
// Update the indexes
|
2015-07-23 22:43:06 +00:00
|
|
|
if err := txn.Insert("index", &IndexEntry{"evals", index}); err != nil {
|
|
|
|
return fmt.Errorf("index update failed: %v", err)
|
|
|
|
}
|
2015-08-15 22:39:29 +00:00
|
|
|
if err := txn.Insert("index", &IndexEntry{"allocs", index}); err != nil {
|
|
|
|
return fmt.Errorf("index update failed: %v", err)
|
|
|
|
}
|
2015-10-29 01:34:56 +00:00
|
|
|
|
2016-01-09 02:22:59 +00:00
|
|
|
// Set the job's status
|
2017-02-05 20:45:57 +00:00
|
|
|
if err := s.setJobStatuses(index, txn, jobs, true); err != nil {
|
2016-01-09 02:22:59 +00:00
|
|
|
return fmt.Errorf("setting job status failed: %v", err)
|
|
|
|
}
|
|
|
|
|
2015-07-23 22:43:06 +00:00
|
|
|
txn.Commit()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-09-07 03:51:01 +00:00
|
|
|
// EvalByID is used to lookup an eval by its ID
|
2017-02-05 20:45:57 +00:00
|
|
|
func (s *StateStore) EvalByID(ws memdb.WatchSet, id string) (*structs.Evaluation, error) {
|
2015-07-23 22:43:06 +00:00
|
|
|
txn := s.db.Txn(false)
|
|
|
|
|
2017-02-05 20:45:57 +00:00
|
|
|
watchCh, existing, err := txn.FirstWatch("evals", "id", id)
|
2015-07-23 22:43:06 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("eval lookup failed: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-02-05 20:45:57 +00:00
|
|
|
ws.Add(watchCh)
|
|
|
|
|
2015-07-23 22:43:06 +00:00
|
|
|
if existing != nil {
|
2015-12-19 20:05:17 +00:00
|
|
|
return existing.(*structs.Evaluation), nil
|
2015-07-23 22:43:06 +00:00
|
|
|
}
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
2015-12-22 22:44:33 +00:00
|
|
|
// EvalsByIDPrefix is used to lookup evaluations by prefix
|
2017-02-05 20:45:57 +00:00
|
|
|
func (s *StateStore) EvalsByIDPrefix(ws memdb.WatchSet, id string) (memdb.ResultIterator, error) {
|
2015-12-19 20:05:17 +00:00
|
|
|
txn := s.db.Txn(false)
|
|
|
|
|
|
|
|
iter, err := txn.Get("evals", "id_prefix", id)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("eval lookup failed: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-02-05 20:45:57 +00:00
|
|
|
ws.Add(iter.WatchCh())
|
|
|
|
|
2015-12-19 20:05:17 +00:00
|
|
|
return iter, nil
|
|
|
|
}
|
|
|
|
|
2015-09-06 19:10:24 +00:00
|
|
|
// EvalsByJob returns all the evaluations by job id
|
2017-02-05 20:45:57 +00:00
|
|
|
func (s *StateStore) EvalsByJob(ws memdb.WatchSet, jobID string) ([]*structs.Evaluation, error) {
|
2015-09-06 19:10:24 +00:00
|
|
|
txn := s.db.Txn(false)
|
|
|
|
|
|
|
|
// Get an iterator over the node allocations
|
2017-01-04 23:25:03 +00:00
|
|
|
iter, err := txn.Get("evals", "job_prefix", jobID)
|
2015-09-06 19:10:24 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2017-02-05 20:45:57 +00:00
|
|
|
ws.Add(iter.WatchCh())
|
|
|
|
|
2015-09-06 19:10:24 +00:00
|
|
|
var out []*structs.Evaluation
|
|
|
|
for {
|
|
|
|
raw := iter.Next()
|
|
|
|
if raw == nil {
|
|
|
|
break
|
|
|
|
}
|
2017-03-27 17:35:36 +00:00
|
|
|
|
|
|
|
e := raw.(*structs.Evaluation)
|
|
|
|
|
|
|
|
// Filter non-exact matches
|
|
|
|
if e.JobID != jobID {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
out = append(out, e)
|
2015-09-06 19:10:24 +00:00
|
|
|
}
|
|
|
|
return out, nil
|
|
|
|
}
|
|
|
|
|
2015-07-23 22:43:06 +00:00
|
|
|
// Evals returns an iterator over all the evaluations
|
2017-02-05 20:45:57 +00:00
|
|
|
func (s *StateStore) Evals(ws memdb.WatchSet) (memdb.ResultIterator, error) {
|
2015-07-23 22:43:06 +00:00
|
|
|
txn := s.db.Txn(false)
|
|
|
|
|
|
|
|
// Walk the entire table
|
|
|
|
iter, err := txn.Get("evals", "id")
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-02-05 20:45:57 +00:00
|
|
|
|
|
|
|
ws.Add(iter.WatchCh())
|
|
|
|
|
2015-07-23 22:43:06 +00:00
|
|
|
return iter, nil
|
|
|
|
}
|
|
|
|
|
2016-07-13 19:25:07 +00:00
|
|
|
// UpdateAllocsFromClient is used to update an allocation based on input
|
2015-08-26 00:54:45 +00:00
|
|
|
// from a client. While the schedulers are the authority on the allocation for
|
|
|
|
// most things, some updates are authoritative from the client. Specifically,
|
|
|
|
// the desired state comes from the schedulers, while the actual state comes
|
|
|
|
// from clients.
|
2016-02-22 01:49:46 +00:00
|
|
|
func (s *StateStore) UpdateAllocsFromClient(index uint64, allocs []*structs.Allocation) error {
|
2015-08-26 00:54:45 +00:00
|
|
|
txn := s.db.Txn(true)
|
|
|
|
defer txn.Abort()
|
|
|
|
|
2016-02-22 01:49:46 +00:00
|
|
|
// Handle each of the updated allocations
|
|
|
|
for _, alloc := range allocs {
|
2017-02-05 20:45:57 +00:00
|
|
|
if err := s.nestedUpdateAllocFromClient(txn, index, alloc); err != nil {
|
2016-02-22 01:49:46 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update the indexes
|
|
|
|
if err := txn.Insert("index", &IndexEntry{"allocs", index}); err != nil {
|
|
|
|
return fmt.Errorf("index update failed: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
txn.Commit()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// nestedUpdateAllocFromClient is used to nest an update of an allocation with client status
|
2017-02-05 20:45:57 +00:00
|
|
|
func (s *StateStore) nestedUpdateAllocFromClient(txn *memdb.Txn, index uint64, alloc *structs.Allocation) error {
|
2015-08-26 00:54:45 +00:00
|
|
|
// Look for existing alloc
|
|
|
|
existing, err := txn.First("allocs", "id", alloc.ID)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("alloc lookup failed: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Nothing to do if this does not exist
|
|
|
|
if existing == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
exist := existing.(*structs.Allocation)
|
2016-12-12 21:32:30 +00:00
|
|
|
|
2015-08-26 00:54:45 +00:00
|
|
|
// Copy everything from the existing allocation
|
2017-02-07 00:46:23 +00:00
|
|
|
copyAlloc := exist.Copy()
|
2015-08-26 00:54:45 +00:00
|
|
|
|
|
|
|
// Pull in anything the client is the authority on
|
|
|
|
copyAlloc.ClientStatus = alloc.ClientStatus
|
|
|
|
copyAlloc.ClientDescription = alloc.ClientDescription
|
2015-12-17 23:33:57 +00:00
|
|
|
copyAlloc.TaskStates = alloc.TaskStates
|
2015-08-26 00:54:45 +00:00
|
|
|
|
|
|
|
// Update the modify index
|
|
|
|
copyAlloc.ModifyIndex = index
|
|
|
|
|
2017-02-05 20:45:57 +00:00
|
|
|
if err := s.updateSummaryWithAlloc(index, copyAlloc, exist, txn); err != nil {
|
2016-08-02 22:06:39 +00:00
|
|
|
return fmt.Errorf("error updating job summary: %v", err)
|
|
|
|
}
|
|
|
|
|
2015-08-26 00:54:45 +00:00
|
|
|
// Update the allocation
|
|
|
|
if err := txn.Insert("allocs", copyAlloc); err != nil {
|
|
|
|
return fmt.Errorf("alloc insert failed: %v", err)
|
|
|
|
}
|
|
|
|
|
2016-01-09 02:22:59 +00:00
|
|
|
// Set the job's status
|
|
|
|
forceStatus := ""
|
|
|
|
if !copyAlloc.TerminalStatus() {
|
|
|
|
forceStatus = structs.JobStatusRunning
|
|
|
|
}
|
2016-02-22 01:49:46 +00:00
|
|
|
jobs := map[string]string{exist.JobID: forceStatus}
|
2017-02-05 20:45:57 +00:00
|
|
|
if err := s.setJobStatuses(index, txn, jobs, false); err != nil {
|
2016-01-09 02:22:59 +00:00
|
|
|
return fmt.Errorf("setting job status failed: %v", err)
|
|
|
|
}
|
2015-08-26 00:54:45 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-05-05 20:52:01 +00:00
|
|
|
// UpsertAllocs is used to evict a set of allocations and allocate new ones at
|
|
|
|
// the same time.
|
2015-09-07 03:39:06 +00:00
|
|
|
func (s *StateStore) UpsertAllocs(index uint64, allocs []*structs.Allocation) error {
|
2015-08-04 20:56:41 +00:00
|
|
|
txn := s.db.Txn(true)
|
|
|
|
defer txn.Abort()
|
2017-05-05 20:52:01 +00:00
|
|
|
if err := s.upsertAllocsImpl(index, allocs, txn); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
txn.Commit()
|
|
|
|
return nil
|
|
|
|
}
|
2015-10-29 20:52:15 +00:00
|
|
|
|
2017-05-05 20:52:01 +00:00
|
|
|
// upsertAllocs is the actual implementation of UpsertAllocs so that it may be
|
|
|
|
// used with an existing transaction.
|
|
|
|
func (s *StateStore) upsertAllocsImpl(index uint64, allocs []*structs.Allocation, txn *memdb.Txn) error {
|
2015-08-04 20:56:41 +00:00
|
|
|
// Handle the allocations
|
2016-01-12 01:34:25 +00:00
|
|
|
jobs := make(map[string]string, 1)
|
2015-08-04 20:56:41 +00:00
|
|
|
for _, alloc := range allocs {
|
2016-08-01 23:46:05 +00:00
|
|
|
existing, err := txn.First("allocs", "id", alloc.ID)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("alloc lookup failed: %v", err)
|
|
|
|
}
|
|
|
|
exist, _ := existing.(*structs.Allocation)
|
|
|
|
|
2016-07-03 03:04:02 +00:00
|
|
|
if exist == nil {
|
2015-08-04 20:56:41 +00:00
|
|
|
alloc.CreateIndex = index
|
|
|
|
alloc.ModifyIndex = index
|
2016-02-01 21:57:35 +00:00
|
|
|
alloc.AllocModifyIndex = index
|
2017-04-27 01:27:27 +00:00
|
|
|
|
|
|
|
// Issue https://github.com/hashicorp/nomad/issues/2583 uncovered
|
|
|
|
// the a race between a forced garbage collection and the scheduler
|
|
|
|
// marking an allocation as terminal. The issue is that the
|
|
|
|
// allocation from the scheduler has its job normalized and the FSM
|
|
|
|
// will only denormalize if the allocation is not terminal. However
|
|
|
|
// if the allocation is garbage collected, that will result in a
|
|
|
|
// allocation being upserted for the first time without a job
|
|
|
|
// attached. By returning an error here, it will cause the FSM to
|
|
|
|
// error, causing the plan_apply to error and thus causing the
|
|
|
|
// evaluation to be failed. This will force an index refresh that
|
|
|
|
// should solve this issue.
|
|
|
|
if alloc.Job == nil {
|
|
|
|
return fmt.Errorf("attempting to upsert allocation %q without a job", alloc.ID)
|
|
|
|
}
|
2015-08-04 20:56:41 +00:00
|
|
|
} else {
|
2015-08-25 23:26:34 +00:00
|
|
|
alloc.CreateIndex = exist.CreateIndex
|
2015-08-04 20:56:41 +00:00
|
|
|
alloc.ModifyIndex = index
|
2016-02-01 21:57:35 +00:00
|
|
|
alloc.AllocModifyIndex = index
|
2016-08-03 22:45:42 +00:00
|
|
|
|
2017-05-05 20:52:01 +00:00
|
|
|
// Keep the clients task states
|
|
|
|
alloc.TaskStates = exist.TaskStates
|
|
|
|
|
2016-08-03 22:45:42 +00:00
|
|
|
// If the scheduler is marking this allocation as lost we do not
|
|
|
|
// want to reuse the status of the existing allocation.
|
|
|
|
if alloc.ClientStatus != structs.AllocClientStatusLost {
|
|
|
|
alloc.ClientStatus = exist.ClientStatus
|
|
|
|
alloc.ClientDescription = exist.ClientDescription
|
|
|
|
}
|
2016-08-03 00:53:31 +00:00
|
|
|
|
|
|
|
// The job has been denormalized so re-attach the original job
|
|
|
|
if alloc.Job == nil {
|
|
|
|
alloc.Job = exist.Job
|
|
|
|
}
|
2015-08-04 20:56:41 +00:00
|
|
|
}
|
2016-08-02 21:59:41 +00:00
|
|
|
|
2017-05-05 20:52:01 +00:00
|
|
|
if err := s.updateDeploymentWithAlloc(index, alloc, exist, txn); err != nil {
|
|
|
|
return fmt.Errorf("error updating deployment: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-02-05 20:45:57 +00:00
|
|
|
if err := s.updateSummaryWithAlloc(index, alloc, exist, txn); err != nil {
|
2016-08-02 21:59:41 +00:00
|
|
|
return fmt.Errorf("error updating job summary: %v", err)
|
|
|
|
}
|
|
|
|
|
2016-09-14 22:43:42 +00:00
|
|
|
// Create the EphemeralDisk if it's nil by adding up DiskMB from task resources.
|
2016-09-02 00:41:50 +00:00
|
|
|
// COMPAT 0.4.1 -> 0.5
|
|
|
|
if alloc.Job != nil {
|
2016-09-14 22:43:42 +00:00
|
|
|
s.addEphemeralDiskToTaskGroups(alloc.Job)
|
2016-09-02 00:41:50 +00:00
|
|
|
}
|
|
|
|
|
2015-08-04 20:56:41 +00:00
|
|
|
if err := txn.Insert("allocs", alloc); err != nil {
|
|
|
|
return fmt.Errorf("alloc insert failed: %v", err)
|
|
|
|
}
|
2015-10-29 20:52:15 +00:00
|
|
|
|
2016-01-12 01:34:25 +00:00
|
|
|
// If the allocation is running, force the job to running status.
|
2016-01-09 02:22:59 +00:00
|
|
|
forceStatus := ""
|
|
|
|
if !alloc.TerminalStatus() {
|
|
|
|
forceStatus = structs.JobStatusRunning
|
|
|
|
}
|
|
|
|
jobs[alloc.JobID] = forceStatus
|
2015-08-04 20:56:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Update the indexes
|
|
|
|
if err := txn.Insert("index", &IndexEntry{"allocs", index}); err != nil {
|
|
|
|
return fmt.Errorf("index update failed: %v", err)
|
|
|
|
}
|
|
|
|
|
2016-01-09 02:22:59 +00:00
|
|
|
// Set the job's status
|
2017-02-05 20:45:57 +00:00
|
|
|
if err := s.setJobStatuses(index, txn, jobs, false); err != nil {
|
2016-01-09 02:22:59 +00:00
|
|
|
return fmt.Errorf("setting job status failed: %v", err)
|
|
|
|
}
|
|
|
|
|
2015-08-04 20:56:41 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-09-07 03:51:01 +00:00
|
|
|
// AllocByID is used to lookup an allocation by its ID
|
2017-02-05 20:45:57 +00:00
|
|
|
func (s *StateStore) AllocByID(ws memdb.WatchSet, id string) (*structs.Allocation, error) {
|
2015-08-04 20:56:41 +00:00
|
|
|
txn := s.db.Txn(false)
|
|
|
|
|
2017-02-05 20:45:57 +00:00
|
|
|
watchCh, existing, err := txn.FirstWatch("allocs", "id", id)
|
2015-08-04 20:56:41 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("alloc lookup failed: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-02-05 20:45:57 +00:00
|
|
|
ws.Add(watchCh)
|
|
|
|
|
2015-08-04 20:56:41 +00:00
|
|
|
if existing != nil {
|
2015-12-19 20:05:17 +00:00
|
|
|
return existing.(*structs.Allocation), nil
|
2015-08-04 20:56:41 +00:00
|
|
|
}
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
2015-12-22 22:44:33 +00:00
|
|
|
// AllocsByIDPrefix is used to lookup allocs by prefix
|
2017-02-05 20:45:57 +00:00
|
|
|
func (s *StateStore) AllocsByIDPrefix(ws memdb.WatchSet, id string) (memdb.ResultIterator, error) {
|
2015-12-19 20:05:17 +00:00
|
|
|
txn := s.db.Txn(false)
|
|
|
|
|
|
|
|
iter, err := txn.Get("allocs", "id_prefix", id)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("alloc lookup failed: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-02-05 20:45:57 +00:00
|
|
|
ws.Add(iter.WatchCh())
|
|
|
|
|
2015-12-19 20:05:17 +00:00
|
|
|
return iter, nil
|
|
|
|
}
|
|
|
|
|
2015-08-04 23:32:46 +00:00
|
|
|
// AllocsByNode returns all the allocations by node
|
2017-02-05 20:45:57 +00:00
|
|
|
func (s *StateStore) AllocsByNode(ws memdb.WatchSet, node string) ([]*structs.Allocation, error) {
|
2015-08-04 23:32:46 +00:00
|
|
|
txn := s.db.Txn(false)
|
|
|
|
|
2016-02-20 19:24:06 +00:00
|
|
|
// Get an iterator over the node allocations, using only the
|
|
|
|
// node prefix which ignores the terminal status
|
|
|
|
iter, err := txn.Get("allocs", "node_prefix", node)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2017-02-05 20:45:57 +00:00
|
|
|
ws.Add(iter.WatchCh())
|
|
|
|
|
2016-02-20 19:24:06 +00:00
|
|
|
var out []*structs.Allocation
|
|
|
|
for {
|
|
|
|
raw := iter.Next()
|
|
|
|
if raw == nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
out = append(out, raw.(*structs.Allocation))
|
|
|
|
}
|
|
|
|
return out, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// AllocsByNode returns all the allocations by node and terminal status
|
2017-02-05 20:45:57 +00:00
|
|
|
func (s *StateStore) AllocsByNodeTerminal(ws memdb.WatchSet, node string, terminal bool) ([]*structs.Allocation, error) {
|
2016-02-20 19:24:06 +00:00
|
|
|
txn := s.db.Txn(false)
|
|
|
|
|
2015-08-04 23:32:46 +00:00
|
|
|
// Get an iterator over the node allocations
|
2016-02-20 19:24:06 +00:00
|
|
|
iter, err := txn.Get("allocs", "node", node, terminal)
|
2015-08-04 23:32:46 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2017-02-05 20:45:57 +00:00
|
|
|
ws.Add(iter.WatchCh())
|
|
|
|
|
2015-08-07 00:36:10 +00:00
|
|
|
var out []*structs.Allocation
|
|
|
|
for {
|
|
|
|
raw := iter.Next()
|
|
|
|
if raw == nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
out = append(out, raw.(*structs.Allocation))
|
|
|
|
}
|
|
|
|
return out, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// AllocsByJob returns all the allocations by job id
|
2017-02-05 20:45:57 +00:00
|
|
|
func (s *StateStore) AllocsByJob(ws memdb.WatchSet, jobID string, all bool) ([]*structs.Allocation, error) {
|
2015-08-07 00:36:10 +00:00
|
|
|
txn := s.db.Txn(false)
|
|
|
|
|
2016-11-24 12:20:52 +00:00
|
|
|
// Get the job
|
|
|
|
var job *structs.Job
|
|
|
|
rawJob, err := txn.First("jobs", "id", jobID)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if rawJob != nil {
|
|
|
|
job = rawJob.(*structs.Job)
|
|
|
|
}
|
|
|
|
|
2015-08-07 00:36:10 +00:00
|
|
|
// Get an iterator over the node allocations
|
|
|
|
iter, err := txn.Get("allocs", "job", jobID)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2017-02-05 20:45:57 +00:00
|
|
|
ws.Add(iter.WatchCh())
|
|
|
|
|
2015-08-15 22:39:29 +00:00
|
|
|
var out []*structs.Allocation
|
|
|
|
for {
|
|
|
|
raw := iter.Next()
|
|
|
|
if raw == nil {
|
|
|
|
break
|
|
|
|
}
|
2016-11-24 12:20:52 +00:00
|
|
|
|
|
|
|
alloc := raw.(*structs.Allocation)
|
2016-12-20 02:10:02 +00:00
|
|
|
// If the allocation belongs to a job with the same ID but a different
|
|
|
|
// create index and we are not getting all the allocations whose Jobs
|
|
|
|
// matches the same Job ID then we skip it
|
2016-11-24 12:20:52 +00:00
|
|
|
if !all && job != nil && alloc.Job.CreateIndex != job.CreateIndex {
|
|
|
|
continue
|
|
|
|
}
|
2015-08-15 22:39:29 +00:00
|
|
|
out = append(out, raw.(*structs.Allocation))
|
|
|
|
}
|
|
|
|
return out, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// AllocsByEval returns all the allocations by eval id
|
2017-02-05 20:45:57 +00:00
|
|
|
func (s *StateStore) AllocsByEval(ws memdb.WatchSet, evalID string) ([]*structs.Allocation, error) {
|
2015-08-15 22:39:29 +00:00
|
|
|
txn := s.db.Txn(false)
|
|
|
|
|
|
|
|
// Get an iterator over the eval allocations
|
|
|
|
iter, err := txn.Get("allocs", "eval", evalID)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2017-02-05 20:45:57 +00:00
|
|
|
ws.Add(iter.WatchCh())
|
|
|
|
|
2015-08-04 23:32:46 +00:00
|
|
|
var out []*structs.Allocation
|
|
|
|
for {
|
|
|
|
raw := iter.Next()
|
|
|
|
if raw == nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
out = append(out, raw.(*structs.Allocation))
|
|
|
|
}
|
|
|
|
return out, nil
|
|
|
|
}
|
|
|
|
|
2015-08-04 20:56:41 +00:00
|
|
|
// Allocs returns an iterator over all the evaluations
|
2017-02-05 20:45:57 +00:00
|
|
|
func (s *StateStore) Allocs(ws memdb.WatchSet) (memdb.ResultIterator, error) {
|
2015-08-04 20:56:41 +00:00
|
|
|
txn := s.db.Txn(false)
|
|
|
|
|
|
|
|
// Walk the entire table
|
|
|
|
iter, err := txn.Get("allocs", "id")
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-02-05 20:45:57 +00:00
|
|
|
|
|
|
|
ws.Add(iter.WatchCh())
|
|
|
|
|
2015-08-04 20:56:41 +00:00
|
|
|
return iter, nil
|
|
|
|
}
|
|
|
|
|
2016-08-19 01:14:58 +00:00
|
|
|
// UpsertVaultAccessors is used to register a set of Vault Accessors
|
|
|
|
func (s *StateStore) UpsertVaultAccessor(index uint64, accessors []*structs.VaultAccessor) error {
|
|
|
|
txn := s.db.Txn(true)
|
|
|
|
defer txn.Abort()
|
|
|
|
|
|
|
|
for _, accessor := range accessors {
|
|
|
|
// Set the create index
|
|
|
|
accessor.CreateIndex = index
|
|
|
|
|
|
|
|
// Insert the accessor
|
|
|
|
if err := txn.Insert("vault_accessors", accessor); err != nil {
|
|
|
|
return fmt.Errorf("accessor insert failed: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := txn.Insert("index", &IndexEntry{"vault_accessors", index}); err != nil {
|
|
|
|
return fmt.Errorf("index update failed: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
txn.Commit()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-08-22 20:57:27 +00:00
|
|
|
// DeleteVaultAccessors is used to delete a set of Vault Accessors
|
|
|
|
func (s *StateStore) DeleteVaultAccessors(index uint64, accessors []*structs.VaultAccessor) error {
|
2016-08-19 01:14:58 +00:00
|
|
|
txn := s.db.Txn(true)
|
|
|
|
defer txn.Abort()
|
|
|
|
|
|
|
|
// Lookup the accessor
|
2016-08-22 20:57:27 +00:00
|
|
|
for _, accessor := range accessors {
|
|
|
|
// Delete the accessor
|
|
|
|
if err := txn.Delete("vault_accessors", accessor); err != nil {
|
|
|
|
return fmt.Errorf("accessor delete failed: %v", err)
|
|
|
|
}
|
2016-08-19 01:14:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if err := txn.Insert("index", &IndexEntry{"vault_accessors", index}); err != nil {
|
|
|
|
return fmt.Errorf("index update failed: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
txn.Commit()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// VaultAccessor returns the given Vault accessor
|
2017-02-05 20:45:57 +00:00
|
|
|
func (s *StateStore) VaultAccessor(ws memdb.WatchSet, accessor string) (*structs.VaultAccessor, error) {
|
2016-08-19 01:14:58 +00:00
|
|
|
txn := s.db.Txn(false)
|
|
|
|
|
2017-02-05 20:45:57 +00:00
|
|
|
watchCh, existing, err := txn.FirstWatch("vault_accessors", "id", accessor)
|
2016-08-19 01:14:58 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("accessor lookup failed: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-02-05 20:45:57 +00:00
|
|
|
ws.Add(watchCh)
|
|
|
|
|
2016-08-19 01:14:58 +00:00
|
|
|
if existing != nil {
|
|
|
|
return existing.(*structs.VaultAccessor), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// VaultAccessors returns an iterator of Vault accessors.
|
2017-02-05 20:45:57 +00:00
|
|
|
func (s *StateStore) VaultAccessors(ws memdb.WatchSet) (memdb.ResultIterator, error) {
|
2016-08-19 01:14:58 +00:00
|
|
|
txn := s.db.Txn(false)
|
|
|
|
|
|
|
|
iter, err := txn.Get("vault_accessors", "id")
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-02-05 20:45:57 +00:00
|
|
|
|
|
|
|
ws.Add(iter.WatchCh())
|
|
|
|
|
2016-08-19 01:14:58 +00:00
|
|
|
return iter, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// VaultAccessorsByAlloc returns all the Vault accessors by alloc id
|
2017-02-05 20:45:57 +00:00
|
|
|
func (s *StateStore) VaultAccessorsByAlloc(ws memdb.WatchSet, allocID string) ([]*structs.VaultAccessor, error) {
|
2016-08-19 01:14:58 +00:00
|
|
|
txn := s.db.Txn(false)
|
|
|
|
|
|
|
|
// Get an iterator over the accessors
|
|
|
|
iter, err := txn.Get("vault_accessors", "alloc_id", allocID)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2017-02-05 20:45:57 +00:00
|
|
|
ws.Add(iter.WatchCh())
|
|
|
|
|
2016-08-19 01:14:58 +00:00
|
|
|
var out []*structs.VaultAccessor
|
|
|
|
for {
|
|
|
|
raw := iter.Next()
|
|
|
|
if raw == nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
out = append(out, raw.(*structs.VaultAccessor))
|
|
|
|
}
|
|
|
|
return out, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// VaultAccessorsByNode returns all the Vault accessors by node id
|
2017-02-05 20:45:57 +00:00
|
|
|
func (s *StateStore) VaultAccessorsByNode(ws memdb.WatchSet, nodeID string) ([]*structs.VaultAccessor, error) {
|
2016-08-19 01:14:58 +00:00
|
|
|
txn := s.db.Txn(false)
|
|
|
|
|
|
|
|
// Get an iterator over the accessors
|
|
|
|
iter, err := txn.Get("vault_accessors", "node_id", nodeID)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2017-02-05 20:45:57 +00:00
|
|
|
ws.Add(iter.WatchCh())
|
|
|
|
|
2016-08-19 01:14:58 +00:00
|
|
|
var out []*structs.VaultAccessor
|
|
|
|
for {
|
|
|
|
raw := iter.Next()
|
|
|
|
if raw == nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
out = append(out, raw.(*structs.VaultAccessor))
|
|
|
|
}
|
|
|
|
return out, nil
|
|
|
|
}
|
|
|
|
|
2016-05-21 01:07:10 +00:00
|
|
|
// LastIndex returns the greatest index value for all indexes
|
|
|
|
func (s *StateStore) LatestIndex() (uint64, error) {
|
|
|
|
indexes, err := s.Indexes()
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
|
|
|
|
var max uint64 = 0
|
|
|
|
for {
|
|
|
|
raw := indexes.Next()
|
|
|
|
if raw == nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
// Prepare the request struct
|
|
|
|
idx := raw.(*IndexEntry)
|
|
|
|
|
|
|
|
// Determine the max
|
|
|
|
if idx.Value > max {
|
|
|
|
max = idx.Value
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return max, nil
|
|
|
|
}
|
|
|
|
|
2015-09-07 03:51:01 +00:00
|
|
|
// Index finds the matching index value
|
|
|
|
func (s *StateStore) Index(name string) (uint64, error) {
|
2015-07-06 21:30:43 +00:00
|
|
|
txn := s.db.Txn(false)
|
|
|
|
|
|
|
|
// Lookup the first matching index
|
|
|
|
out, err := txn.First("index", "id", name)
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
if out == nil {
|
|
|
|
return 0, nil
|
|
|
|
}
|
|
|
|
return out.(*IndexEntry).Value, nil
|
|
|
|
}
|
|
|
|
|
2016-08-03 18:58:36 +00:00
|
|
|
// RemoveIndex is a helper method to remove an index for testing purposes
|
|
|
|
func (s *StateStore) RemoveIndex(name string) error {
|
|
|
|
txn := s.db.Txn(true)
|
|
|
|
defer txn.Abort()
|
|
|
|
|
|
|
|
if _, err := txn.DeleteAll("index", "id", name); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
txn.Commit()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-07-06 21:30:43 +00:00
|
|
|
// Indexes returns an iterator over all the indexes
|
|
|
|
func (s *StateStore) Indexes() (memdb.ResultIterator, error) {
|
|
|
|
txn := s.db.Txn(false)
|
|
|
|
|
|
|
|
// Walk the entire nodes table
|
|
|
|
iter, err := txn.Get("index", "id")
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return iter, nil
|
|
|
|
}
|
|
|
|
|
2016-08-03 18:58:36 +00:00
|
|
|
// ReconcileJobSummaries re-creates summaries for all jobs present in the state
|
|
|
|
// store
|
2016-08-03 23:08:30 +00:00
|
|
|
func (s *StateStore) ReconcileJobSummaries(index uint64) error {
|
2016-08-03 18:58:36 +00:00
|
|
|
txn := s.db.Txn(true)
|
|
|
|
defer txn.Abort()
|
|
|
|
|
|
|
|
// Get all the jobs
|
|
|
|
iter, err := txn.Get("jobs", "id")
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
for {
|
|
|
|
rawJob := iter.Next()
|
|
|
|
if rawJob == nil {
|
|
|
|
break
|
|
|
|
}
|
2016-08-04 01:08:37 +00:00
|
|
|
job := rawJob.(*structs.Job)
|
2016-08-03 18:58:36 +00:00
|
|
|
|
|
|
|
// Create a job summary for the job
|
2017-01-06 18:34:55 +00:00
|
|
|
summary := &structs.JobSummary{
|
2016-08-03 18:58:36 +00:00
|
|
|
JobID: job.ID,
|
|
|
|
Summary: make(map[string]structs.TaskGroupSummary),
|
|
|
|
}
|
|
|
|
for _, tg := range job.TaskGroups {
|
|
|
|
summary.Summary[tg.Name] = structs.TaskGroupSummary{}
|
|
|
|
}
|
2016-08-04 01:08:37 +00:00
|
|
|
|
|
|
|
// Find all the allocations for the jobs
|
|
|
|
iterAllocs, err := txn.Get("allocs", "job", job.ID)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-08-03 18:58:36 +00:00
|
|
|
// Calculate the summary for the job
|
2016-08-04 01:08:37 +00:00
|
|
|
for {
|
|
|
|
rawAlloc := iterAllocs.Next()
|
|
|
|
if rawAlloc == nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
alloc := rawAlloc.(*structs.Allocation)
|
2016-08-04 22:14:01 +00:00
|
|
|
|
|
|
|
// Ignore the allocation if it doesn't belong to the currently
|
2017-02-11 00:29:28 +00:00
|
|
|
// registered job. The allocation is checked because of issue #2304
|
|
|
|
if alloc.Job == nil || alloc.Job.CreateIndex != job.CreateIndex {
|
2016-08-04 22:14:01 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2016-08-03 18:58:36 +00:00
|
|
|
tg := summary.Summary[alloc.TaskGroup]
|
|
|
|
switch alloc.ClientStatus {
|
|
|
|
case structs.AllocClientStatusFailed:
|
|
|
|
tg.Failed += 1
|
|
|
|
case structs.AllocClientStatusLost:
|
|
|
|
tg.Lost += 1
|
|
|
|
case structs.AllocClientStatusComplete:
|
|
|
|
tg.Complete += 1
|
|
|
|
case structs.AllocClientStatusRunning:
|
|
|
|
tg.Running += 1
|
|
|
|
case structs.AllocClientStatusPending:
|
|
|
|
tg.Starting += 1
|
|
|
|
default:
|
|
|
|
s.logger.Printf("[ERR] state_store: invalid client status: %v in allocation %q", alloc.ClientStatus, alloc.ID)
|
|
|
|
}
|
|
|
|
summary.Summary[alloc.TaskGroup] = tg
|
|
|
|
}
|
|
|
|
|
2016-08-04 22:14:01 +00:00
|
|
|
// Set the create index of the summary same as the job's create index
|
|
|
|
// and the modify index to the current index
|
|
|
|
summary.CreateIndex = job.CreateIndex
|
2016-08-03 23:08:30 +00:00
|
|
|
summary.ModifyIndex = index
|
2016-08-04 22:14:01 +00:00
|
|
|
|
|
|
|
// Insert the job summary
|
2016-08-03 18:58:36 +00:00
|
|
|
if err := txn.Insert("job_summary", summary); err != nil {
|
|
|
|
return fmt.Errorf("error inserting job summary: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update the indexes table for job summary
|
2016-08-03 23:08:30 +00:00
|
|
|
if err := txn.Insert("index", &IndexEntry{"job_summary", index}); err != nil {
|
2016-08-03 18:58:36 +00:00
|
|
|
return fmt.Errorf("index update failed: %v", err)
|
|
|
|
}
|
|
|
|
txn.Commit()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-01-09 02:22:59 +00:00
|
|
|
// setJobStatuses is a helper for calling setJobStatus on multiple jobs by ID.
|
|
|
|
// It takes a map of job IDs to an optional forceStatus string. It returns an
|
|
|
|
// error if the job doesn't exist or setJobStatus fails.
|
2017-02-05 20:45:57 +00:00
|
|
|
func (s *StateStore) setJobStatuses(index uint64, txn *memdb.Txn,
|
2016-01-09 02:22:59 +00:00
|
|
|
jobs map[string]string, evalDelete bool) error {
|
|
|
|
for job, forceStatus := range jobs {
|
|
|
|
existing, err := txn.First("jobs", "id", job)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("job lookup failed: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if existing == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2017-02-05 20:45:57 +00:00
|
|
|
if err := s.setJobStatus(index, txn, existing.(*structs.Job), evalDelete, forceStatus); err != nil {
|
2016-01-09 02:22:59 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// setJobStatus sets the status of the job by looking up associated evaluations
|
|
|
|
// and allocations. evalDelete should be set to true if setJobStatus is being
|
|
|
|
// called because an evaluation is being deleted (potentially because of garbage
|
|
|
|
// collection). If forceStatus is non-empty, the job's status will be set to the
|
|
|
|
// passed status.
|
2017-02-05 20:45:57 +00:00
|
|
|
func (s *StateStore) setJobStatus(index uint64, txn *memdb.Txn,
|
2016-01-09 02:22:59 +00:00
|
|
|
job *structs.Job, evalDelete bool, forceStatus string) error {
|
|
|
|
|
2016-01-12 01:34:25 +00:00
|
|
|
// Capture the current status so we can check if there is a change
|
|
|
|
oldStatus := job.Status
|
2016-12-12 19:42:47 +00:00
|
|
|
if index == job.CreateIndex {
|
|
|
|
oldStatus = ""
|
|
|
|
}
|
2016-01-12 01:34:25 +00:00
|
|
|
newStatus := forceStatus
|
|
|
|
|
|
|
|
// If forceStatus is not set, compute the jobs status.
|
|
|
|
if forceStatus == "" {
|
|
|
|
var err error
|
|
|
|
newStatus, err = s.getJobStatus(txn, job, evalDelete)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fast-path if nothing has changed.
|
|
|
|
if oldStatus == newStatus {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Copy and update the existing job
|
|
|
|
updated := job.Copy()
|
|
|
|
updated.Status = newStatus
|
|
|
|
updated.ModifyIndex = index
|
|
|
|
|
|
|
|
// Insert the job
|
|
|
|
if err := txn.Insert("jobs", updated); err != nil {
|
|
|
|
return fmt.Errorf("job insert failed: %v", err)
|
|
|
|
}
|
|
|
|
if err := txn.Insert("index", &IndexEntry{"jobs", index}); err != nil {
|
|
|
|
return fmt.Errorf("index update failed: %v", err)
|
2016-01-09 02:22:59 +00:00
|
|
|
}
|
2016-12-07 00:58:44 +00:00
|
|
|
|
|
|
|
// Update the children summary
|
|
|
|
if updated.ParentID != "" {
|
|
|
|
// Try to update the summary of the parent job summary
|
|
|
|
summaryRaw, err := txn.First("job_summary", "id", updated.ParentID)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("unable to retrieve summary for parent job: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Only continue if the summary exists. It could not exist if the parent
|
|
|
|
// job was removed
|
|
|
|
if summaryRaw != nil {
|
2017-01-06 18:34:55 +00:00
|
|
|
existing := summaryRaw.(*structs.JobSummary)
|
2016-12-07 00:58:44 +00:00
|
|
|
pSummary := existing.Copy()
|
|
|
|
if pSummary.Children == nil {
|
|
|
|
pSummary.Children = new(structs.JobChildrenSummary)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Determine the transistion and update the correct fields
|
|
|
|
children := pSummary.Children
|
|
|
|
|
|
|
|
// Decrement old status
|
|
|
|
if oldStatus != "" {
|
|
|
|
switch oldStatus {
|
|
|
|
case structs.JobStatusPending:
|
|
|
|
children.Pending--
|
|
|
|
case structs.JobStatusRunning:
|
|
|
|
children.Running--
|
|
|
|
case structs.JobStatusDead:
|
|
|
|
children.Dead--
|
|
|
|
default:
|
|
|
|
return fmt.Errorf("unknown old job status %q", oldStatus)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Increment new status
|
|
|
|
switch newStatus {
|
|
|
|
case structs.JobStatusPending:
|
|
|
|
children.Pending++
|
|
|
|
case structs.JobStatusRunning:
|
|
|
|
children.Running++
|
|
|
|
case structs.JobStatusDead:
|
|
|
|
children.Dead++
|
|
|
|
default:
|
|
|
|
return fmt.Errorf("unknown new job status %q", newStatus)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update the index
|
|
|
|
pSummary.ModifyIndex = index
|
|
|
|
|
|
|
|
// Insert the summary
|
2017-01-06 18:34:55 +00:00
|
|
|
if err := txn.Insert("job_summary", pSummary); err != nil {
|
2016-12-07 00:58:44 +00:00
|
|
|
return fmt.Errorf("job summary insert failed: %v", err)
|
|
|
|
}
|
|
|
|
if err := txn.Insert("index", &IndexEntry{"job_summary", index}); err != nil {
|
|
|
|
return fmt.Errorf("index update failed: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-01-12 01:34:25 +00:00
|
|
|
return nil
|
|
|
|
}
|
2016-01-09 02:22:59 +00:00
|
|
|
|
2016-01-12 01:34:25 +00:00
|
|
|
func (s *StateStore) getJobStatus(txn *memdb.Txn, job *structs.Job, evalDelete bool) (string, error) {
|
2016-01-09 02:22:59 +00:00
|
|
|
allocs, err := txn.Get("allocs", "job", job.ID)
|
|
|
|
if err != nil {
|
2016-01-12 01:34:25 +00:00
|
|
|
return "", err
|
2016-01-09 02:22:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// If there is a non-terminal allocation, the job is running.
|
|
|
|
hasAlloc := false
|
|
|
|
for alloc := allocs.Next(); alloc != nil; alloc = allocs.Next() {
|
|
|
|
hasAlloc = true
|
|
|
|
if !alloc.(*structs.Allocation).TerminalStatus() {
|
2016-01-12 01:34:25 +00:00
|
|
|
return structs.JobStatusRunning, nil
|
2016-01-09 02:22:59 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-04 23:25:03 +00:00
|
|
|
evals, err := txn.Get("evals", "job_prefix", job.ID)
|
2016-01-09 02:22:59 +00:00
|
|
|
if err != nil {
|
2016-01-12 01:34:25 +00:00
|
|
|
return "", err
|
2016-01-09 02:22:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
hasEval := false
|
2017-03-27 17:35:36 +00:00
|
|
|
for raw := evals.Next(); raw != nil; raw = evals.Next() {
|
|
|
|
e := raw.(*structs.Evaluation)
|
|
|
|
|
|
|
|
// Filter non-exact matches
|
|
|
|
if e.JobID != job.ID {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2016-01-09 02:22:59 +00:00
|
|
|
hasEval = true
|
2017-03-27 17:35:36 +00:00
|
|
|
if !e.TerminalStatus() {
|
2016-01-12 01:34:25 +00:00
|
|
|
return structs.JobStatusPending, nil
|
2016-01-09 02:22:59 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// The job is dead if all the allocations and evals are terminal or if there
|
|
|
|
// are no evals because of garbage collection.
|
|
|
|
if evalDelete || hasEval || hasAlloc {
|
2016-01-12 01:34:25 +00:00
|
|
|
return structs.JobStatusDead, nil
|
2016-01-09 02:22:59 +00:00
|
|
|
}
|
|
|
|
|
2017-01-20 18:33:52 +00:00
|
|
|
// If there are no allocations or evaluations it is a new job. If the
|
|
|
|
// job is periodic or is a parameterized job, we mark it as running as
|
|
|
|
// it will never have an allocation/evaluation against it.
|
|
|
|
if job.IsPeriodic() || job.IsParameterized() {
|
2017-04-15 23:47:19 +00:00
|
|
|
// If the job is stopped mark it as dead
|
|
|
|
if job.Stop {
|
|
|
|
return structs.JobStatusDead, nil
|
|
|
|
}
|
|
|
|
|
2016-01-12 01:34:25 +00:00
|
|
|
return structs.JobStatusRunning, nil
|
2016-01-09 02:22:59 +00:00
|
|
|
}
|
2016-01-12 01:34:25 +00:00
|
|
|
return structs.JobStatusPending, nil
|
2016-01-09 02:22:59 +00:00
|
|
|
}
|
|
|
|
|
2016-07-03 03:04:02 +00:00
|
|
|
// updateSummaryWithJob creates or updates job summaries when new jobs are
|
|
|
|
// upserted or existing ones are updated
|
2016-07-20 21:09:03 +00:00
|
|
|
func (s *StateStore) updateSummaryWithJob(index uint64, job *structs.Job,
|
2017-02-05 20:45:57 +00:00
|
|
|
txn *memdb.Txn) error {
|
2016-07-19 23:15:57 +00:00
|
|
|
|
2017-01-06 18:34:55 +00:00
|
|
|
// Update the job summary
|
|
|
|
summaryRaw, err := txn.First("job_summary", "id", job.ID)
|
2016-06-30 19:04:22 +00:00
|
|
|
if err != nil {
|
2017-01-06 18:34:55 +00:00
|
|
|
return fmt.Errorf("job summary lookup failed: %v", err)
|
2016-06-30 19:04:22 +00:00
|
|
|
}
|
2017-01-06 18:34:55 +00:00
|
|
|
|
|
|
|
// Get the summary or create if necessary
|
|
|
|
var summary *structs.JobSummary
|
|
|
|
hasSummaryChanged := false
|
|
|
|
if summaryRaw != nil {
|
|
|
|
summary = summaryRaw.(*structs.JobSummary).Copy()
|
|
|
|
} else {
|
|
|
|
summary = &structs.JobSummary{
|
2016-07-18 23:51:47 +00:00
|
|
|
JobID: job.ID,
|
|
|
|
Summary: make(map[string]structs.TaskGroupSummary),
|
2016-12-15 00:58:54 +00:00
|
|
|
Children: new(structs.JobChildrenSummary),
|
2016-07-18 23:51:47 +00:00
|
|
|
CreateIndex: index,
|
2016-06-30 19:04:22 +00:00
|
|
|
}
|
2016-07-18 23:51:47 +00:00
|
|
|
hasSummaryChanged = true
|
2016-06-30 19:04:22 +00:00
|
|
|
}
|
2017-01-06 18:34:55 +00:00
|
|
|
|
2016-06-30 19:04:22 +00:00
|
|
|
for _, tg := range job.TaskGroups {
|
2017-01-06 18:34:55 +00:00
|
|
|
if _, ok := summary.Summary[tg.Name]; !ok {
|
2016-06-30 19:04:22 +00:00
|
|
|
newSummary := structs.TaskGroupSummary{
|
|
|
|
Complete: 0,
|
|
|
|
Failed: 0,
|
|
|
|
Running: 0,
|
|
|
|
Starting: 0,
|
|
|
|
}
|
2017-01-06 18:34:55 +00:00
|
|
|
summary.Summary[tg.Name] = newSummary
|
2016-07-18 23:51:47 +00:00
|
|
|
hasSummaryChanged = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-02-05 20:45:57 +00:00
|
|
|
// The job summary has changed, so update the modify index.
|
2016-07-18 23:51:47 +00:00
|
|
|
if hasSummaryChanged {
|
2017-01-06 18:34:55 +00:00
|
|
|
summary.ModifyIndex = index
|
2016-07-18 23:51:47 +00:00
|
|
|
|
|
|
|
// Update the indexes table for job summary
|
|
|
|
if err := txn.Insert("index", &IndexEntry{"job_summary", index}); err != nil {
|
|
|
|
return fmt.Errorf("index update failed: %v", err)
|
2016-06-30 19:04:22 +00:00
|
|
|
}
|
2017-01-06 18:34:55 +00:00
|
|
|
if err := txn.Insert("job_summary", summary); err != nil {
|
2016-07-22 06:13:07 +00:00
|
|
|
return err
|
|
|
|
}
|
2016-06-30 19:04:22 +00:00
|
|
|
}
|
2016-07-19 23:15:57 +00:00
|
|
|
|
2016-06-30 19:04:22 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-05-05 20:52:01 +00:00
|
|
|
// updateDeploymentWithAlloc is used to update the deployment state associated
|
|
|
|
// with the given allocation
|
|
|
|
func (s *StateStore) updateDeploymentWithAlloc(index uint64, alloc, existing *structs.Allocation, txn *memdb.Txn) error {
|
|
|
|
// Nothing to do if the allocation is not associated with a deployment
|
|
|
|
if alloc.DeploymentID == "" {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get the deployment
|
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
deployment, err := s.deploymentByIDImpl(ws, alloc.DeploymentID, txn)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if deployment == nil {
|
|
|
|
return fmt.Errorf("allocation %q references unknown deployment %q", alloc.ID, alloc.DeploymentID)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Retrieve the deployment state object
|
|
|
|
_, ok := deployment.TaskGroups[alloc.TaskGroup]
|
|
|
|
if !ok {
|
|
|
|
// If the task group isn't part of the deployment, the task group wasn't
|
|
|
|
// part of a rolling update so nothing to do
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Do not modify in-place. Instead keep track of what must be done
|
|
|
|
placed := 0
|
|
|
|
|
|
|
|
// TODO test when I am sure of what this method will do
|
|
|
|
// XXX Unclear whether this will be helpful because a seperate code path is
|
|
|
|
// likely need for setting health
|
|
|
|
healthy := 0
|
|
|
|
unhealthy := 0
|
|
|
|
|
|
|
|
// If there was no existing allocation, this is a placement and we increment
|
|
|
|
// the placement
|
2017-05-11 19:49:04 +00:00
|
|
|
existingHealthSet := existing != nil && existing.DeploymentStatus != nil && existing.DeploymentStatus.Healthy != nil
|
2017-05-11 18:03:15 +00:00
|
|
|
allocHealthSet := alloc.DeploymentStatus != nil && alloc.DeploymentStatus.Healthy != nil
|
2017-05-05 20:52:01 +00:00
|
|
|
if existing == nil {
|
|
|
|
placed++
|
2017-05-11 18:03:15 +00:00
|
|
|
} else if !existingHealthSet && allocHealthSet {
|
|
|
|
if *alloc.DeploymentStatus.Healthy {
|
2017-05-05 20:52:01 +00:00
|
|
|
healthy++
|
|
|
|
} else {
|
|
|
|
unhealthy++
|
|
|
|
}
|
2017-05-11 18:03:15 +00:00
|
|
|
} else if existingHealthSet && allocHealthSet {
|
2017-05-05 20:52:01 +00:00
|
|
|
// See if it has gone from healthy to unhealthy
|
2017-05-11 18:03:15 +00:00
|
|
|
if *existing.DeploymentStatus.Healthy && !*alloc.DeploymentStatus.Healthy {
|
2017-05-05 20:52:01 +00:00
|
|
|
healthy--
|
|
|
|
unhealthy++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Nothing to do
|
|
|
|
if placed == 0 && healthy == 0 && unhealthy == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create a copy of the deployment object
|
|
|
|
deploymentCopy := deployment.Copy()
|
|
|
|
deploymentCopy.ModifyIndex = index
|
|
|
|
|
|
|
|
if unhealthy != 0 {
|
|
|
|
deploymentCopy.Status = structs.DeploymentStatusFailed
|
|
|
|
deploymentCopy.StatusDescription = "Allocation(s) marked as unhealthy"
|
|
|
|
}
|
|
|
|
|
|
|
|
state := deploymentCopy.TaskGroups[alloc.TaskGroup]
|
|
|
|
state.PlacedAllocs += placed
|
|
|
|
state.HealthyAllocs += healthy
|
|
|
|
state.UnhealthyAllocs += unhealthy
|
|
|
|
|
|
|
|
// Upsert the new deployment
|
|
|
|
if err := s.upsertDeploymentImpl(index, deploymentCopy, false, txn); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-07-03 03:04:02 +00:00
|
|
|
// updateSummaryWithAlloc updates the job summary when allocations are updated
|
|
|
|
// or inserted
|
2016-07-26 22:11:48 +00:00
|
|
|
func (s *StateStore) updateSummaryWithAlloc(index uint64, alloc *structs.Allocation,
|
2017-02-05 20:45:57 +00:00
|
|
|
existingAlloc *structs.Allocation, txn *memdb.Txn) error {
|
2016-08-01 23:46:05 +00:00
|
|
|
|
2016-08-02 00:04:47 +00:00
|
|
|
// We don't have to update the summary if the job is missing
|
2016-08-03 02:14:05 +00:00
|
|
|
if alloc.Job == nil {
|
2016-08-02 00:04:47 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-07-26 22:11:48 +00:00
|
|
|
summaryRaw, err := txn.First("job_summary", "id", alloc.JobID)
|
2016-07-01 07:17:35 +00:00
|
|
|
if err != nil {
|
2017-02-28 00:00:19 +00:00
|
|
|
return fmt.Errorf("unable to lookup job summary for job id %q: %v", alloc.JobID, err)
|
2016-07-01 07:17:35 +00:00
|
|
|
}
|
2017-01-06 18:34:55 +00:00
|
|
|
|
2016-08-03 01:49:57 +00:00
|
|
|
if summaryRaw == nil {
|
2016-08-03 23:58:12 +00:00
|
|
|
// Check if the job is de-registered
|
|
|
|
rawJob, err := txn.First("jobs", "id", alloc.JobID)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("unable to query job: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the job is de-registered then we skip updating it's summary
|
|
|
|
if rawJob == nil {
|
|
|
|
return nil
|
|
|
|
}
|
2017-01-06 18:34:55 +00:00
|
|
|
|
2016-07-26 22:11:48 +00:00
|
|
|
return fmt.Errorf("job summary for job %q is not present", alloc.JobID)
|
2016-07-01 07:17:35 +00:00
|
|
|
}
|
2017-01-06 18:34:55 +00:00
|
|
|
|
|
|
|
// Get a copy of the existing summary
|
|
|
|
jobSummary := summaryRaw.(*structs.JobSummary).Copy()
|
2016-07-01 07:17:35 +00:00
|
|
|
|
2016-08-03 01:49:57 +00:00
|
|
|
// Not updating the job summary because the allocation doesn't belong to the
|
|
|
|
// currently registered job
|
|
|
|
if jobSummary.CreateIndex != alloc.Job.CreateIndex {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-07-26 22:11:48 +00:00
|
|
|
tgSummary, ok := jobSummary.Summary[alloc.TaskGroup]
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("unable to find task group in the job summary: %v", alloc.TaskGroup)
|
|
|
|
}
|
2017-01-06 18:34:55 +00:00
|
|
|
|
|
|
|
summaryChanged := false
|
2016-08-01 23:46:05 +00:00
|
|
|
if existingAlloc == nil {
|
2016-07-26 22:11:48 +00:00
|
|
|
switch alloc.DesiredStatus {
|
|
|
|
case structs.AllocDesiredStatusStop, structs.AllocDesiredStatusEvict:
|
|
|
|
s.logger.Printf("[ERR] state_store: new allocation inserted into state store with id: %v and state: %v",
|
|
|
|
alloc.ID, alloc.DesiredStatus)
|
2016-07-06 00:56:36 +00:00
|
|
|
}
|
2016-07-26 22:11:48 +00:00
|
|
|
switch alloc.ClientStatus {
|
|
|
|
case structs.AllocClientStatusPending:
|
|
|
|
tgSummary.Starting += 1
|
|
|
|
if tgSummary.Queued > 0 {
|
|
|
|
tgSummary.Queued -= 1
|
2016-07-20 21:09:03 +00:00
|
|
|
}
|
2016-07-27 21:46:46 +00:00
|
|
|
summaryChanged = true
|
2016-07-26 22:11:48 +00:00
|
|
|
case structs.AllocClientStatusRunning, structs.AllocClientStatusFailed,
|
|
|
|
structs.AllocClientStatusComplete:
|
|
|
|
s.logger.Printf("[ERR] state_store: new allocation inserted into state store with id: %v and state: %v",
|
|
|
|
alloc.ID, alloc.ClientStatus)
|
|
|
|
}
|
2016-08-01 23:46:05 +00:00
|
|
|
} else if existingAlloc.ClientStatus != alloc.ClientStatus {
|
2016-07-26 22:11:48 +00:00
|
|
|
// Incrementing the client of the bin of the current state
|
|
|
|
switch alloc.ClientStatus {
|
|
|
|
case structs.AllocClientStatusRunning:
|
|
|
|
tgSummary.Running += 1
|
|
|
|
case structs.AllocClientStatusFailed:
|
|
|
|
tgSummary.Failed += 1
|
|
|
|
case structs.AllocClientStatusPending:
|
|
|
|
tgSummary.Starting += 1
|
|
|
|
case structs.AllocClientStatusComplete:
|
|
|
|
tgSummary.Complete += 1
|
|
|
|
case structs.AllocClientStatusLost:
|
|
|
|
tgSummary.Lost += 1
|
|
|
|
}
|
|
|
|
|
|
|
|
// Decrementing the count of the bin of the last state
|
2016-08-01 23:46:05 +00:00
|
|
|
switch existingAlloc.ClientStatus {
|
2016-07-26 22:11:48 +00:00
|
|
|
case structs.AllocClientStatusRunning:
|
|
|
|
tgSummary.Running -= 1
|
|
|
|
case structs.AllocClientStatusPending:
|
|
|
|
tgSummary.Starting -= 1
|
|
|
|
case structs.AllocClientStatusLost:
|
|
|
|
tgSummary.Lost -= 1
|
|
|
|
case structs.AllocClientStatusFailed, structs.AllocClientStatusComplete:
|
2016-08-02 23:08:20 +00:00
|
|
|
default:
|
2016-07-28 22:23:09 +00:00
|
|
|
s.logger.Printf("[ERR] state_store: invalid old state of allocation with id: %v, and state: %v",
|
2016-08-01 23:46:05 +00:00
|
|
|
existingAlloc.ID, existingAlloc.ClientStatus)
|
2016-07-20 21:09:03 +00:00
|
|
|
}
|
2016-07-27 21:46:46 +00:00
|
|
|
summaryChanged = true
|
2016-07-18 23:51:47 +00:00
|
|
|
}
|
2016-07-26 22:11:48 +00:00
|
|
|
jobSummary.Summary[alloc.TaskGroup] = tgSummary
|
2016-07-18 23:51:47 +00:00
|
|
|
|
2016-07-27 21:46:46 +00:00
|
|
|
if summaryChanged {
|
|
|
|
jobSummary.ModifyIndex = index
|
2016-07-18 23:51:47 +00:00
|
|
|
|
|
|
|
// Update the indexes table for job summary
|
|
|
|
if err := txn.Insert("index", &IndexEntry{"job_summary", index}); err != nil {
|
|
|
|
return fmt.Errorf("index update failed: %v", err)
|
|
|
|
}
|
2016-07-01 07:17:35 +00:00
|
|
|
|
2017-01-06 18:34:55 +00:00
|
|
|
if err := txn.Insert("job_summary", jobSummary); err != nil {
|
2016-07-20 21:09:03 +00:00
|
|
|
return fmt.Errorf("updating job summary failed: %v", err)
|
|
|
|
}
|
2016-07-01 07:17:35 +00:00
|
|
|
}
|
2016-07-19 23:15:57 +00:00
|
|
|
|
2016-06-30 19:04:22 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-09-14 22:43:42 +00:00
|
|
|
// addEphemeralDiskToTaskGroups adds missing EphemeralDisk objects to TaskGroups
|
|
|
|
func (s *StateStore) addEphemeralDiskToTaskGroups(job *structs.Job) {
|
2016-09-02 00:41:50 +00:00
|
|
|
for _, tg := range job.TaskGroups {
|
|
|
|
var diskMB int
|
|
|
|
for _, task := range tg.Tasks {
|
|
|
|
if task.Resources != nil {
|
|
|
|
diskMB += task.Resources.DiskMB
|
|
|
|
task.Resources.DiskMB = 0
|
|
|
|
}
|
|
|
|
}
|
2016-11-08 23:24:51 +00:00
|
|
|
if tg.EphemeralDisk != nil {
|
|
|
|
continue
|
|
|
|
}
|
2016-09-14 22:43:42 +00:00
|
|
|
tg.EphemeralDisk = &structs.EphemeralDisk{
|
|
|
|
SizeMB: diskMB,
|
2016-09-02 00:41:50 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-10-29 01:11:55 +00:00
|
|
|
// StateSnapshot is used to provide a point-in-time snapshot
|
|
|
|
type StateSnapshot struct {
|
|
|
|
StateStore
|
|
|
|
}
|
|
|
|
|
|
|
|
// StateRestore is used to optimize the performance when
|
|
|
|
// restoring state by only using a single large transaction
|
|
|
|
// instead of thousands of sub transactions
|
|
|
|
type StateRestore struct {
|
2017-02-05 20:45:57 +00:00
|
|
|
txn *memdb.Txn
|
2015-10-29 01:11:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Abort is used to abort the restore operation
|
|
|
|
func (s *StateRestore) Abort() {
|
|
|
|
s.txn.Abort()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Commit is used to commit the restore operation
|
|
|
|
func (s *StateRestore) Commit() {
|
|
|
|
s.txn.Commit()
|
|
|
|
}
|
|
|
|
|
2015-07-04 17:16:52 +00:00
|
|
|
// NodeRestore is used to restore a node
|
|
|
|
func (r *StateRestore) NodeRestore(node *structs.Node) error {
|
|
|
|
if err := r.txn.Insert("nodes", node); err != nil {
|
|
|
|
return fmt.Errorf("node insert failed: %v", err)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
2015-07-06 21:51:01 +00:00
|
|
|
|
2015-07-07 16:41:05 +00:00
|
|
|
// JobRestore is used to restore a job
|
|
|
|
func (r *StateRestore) JobRestore(job *structs.Job) error {
|
2016-09-14 22:43:42 +00:00
|
|
|
// Create the EphemeralDisk if it's nil by adding up DiskMB from task resources.
|
2016-09-02 00:41:50 +00:00
|
|
|
// COMPAT 0.4.1 -> 0.5
|
2016-09-14 22:43:42 +00:00
|
|
|
r.addEphemeralDiskToTaskGroups(job)
|
2016-08-25 18:00:20 +00:00
|
|
|
|
2015-07-07 16:41:05 +00:00
|
|
|
if err := r.txn.Insert("jobs", job); err != nil {
|
2015-07-23 22:15:48 +00:00
|
|
|
return fmt.Errorf("job insert failed: %v", err)
|
2015-07-07 16:41:05 +00:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-07-23 22:43:06 +00:00
|
|
|
// EvalRestore is used to restore an evaluation
|
|
|
|
func (r *StateRestore) EvalRestore(eval *structs.Evaluation) error {
|
|
|
|
if err := r.txn.Insert("evals", eval); err != nil {
|
|
|
|
return fmt.Errorf("eval insert failed: %v", err)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-08-04 20:56:41 +00:00
|
|
|
// AllocRestore is used to restore an allocation
|
|
|
|
func (r *StateRestore) AllocRestore(alloc *structs.Allocation) error {
|
2016-08-29 19:49:52 +00:00
|
|
|
// Set the shared resources if it's not present
|
2016-09-02 00:41:50 +00:00
|
|
|
// COMPAT 0.4.1 -> 0.5
|
2016-08-29 19:49:52 +00:00
|
|
|
if alloc.SharedResources == nil {
|
|
|
|
alloc.SharedResources = &structs.Resources{
|
|
|
|
DiskMB: alloc.Resources.DiskMB,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-09-14 22:43:42 +00:00
|
|
|
// Create the EphemeralDisk if it's nil by adding up DiskMB from task resources.
|
2016-09-02 00:41:50 +00:00
|
|
|
if alloc.Job != nil {
|
2016-09-14 22:43:42 +00:00
|
|
|
r.addEphemeralDiskToTaskGroups(alloc.Job)
|
2016-09-02 00:41:50 +00:00
|
|
|
}
|
|
|
|
|
2015-08-04 20:56:41 +00:00
|
|
|
if err := r.txn.Insert("allocs", alloc); err != nil {
|
|
|
|
return fmt.Errorf("alloc insert failed: %v", err)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-07-23 22:43:06 +00:00
|
|
|
// IndexRestore is used to restore an index
|
2015-07-06 21:51:01 +00:00
|
|
|
func (r *StateRestore) IndexRestore(idx *IndexEntry) error {
|
|
|
|
if err := r.txn.Insert("index", idx); err != nil {
|
|
|
|
return fmt.Errorf("index insert failed: %v", err)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
2015-10-29 21:47:39 +00:00
|
|
|
|
2015-12-07 23:58:17 +00:00
|
|
|
// PeriodicLaunchRestore is used to restore a periodic launch.
|
|
|
|
func (r *StateRestore) PeriodicLaunchRestore(launch *structs.PeriodicLaunch) error {
|
|
|
|
if err := r.txn.Insert("periodic_launch", launch); err != nil {
|
|
|
|
return fmt.Errorf("periodic launch insert failed: %v", err)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-07-05 18:50:44 +00:00
|
|
|
// JobSummaryRestore is used to restore a job summary
|
|
|
|
func (r *StateRestore) JobSummaryRestore(jobSummary *structs.JobSummary) error {
|
2017-01-06 18:34:55 +00:00
|
|
|
if err := r.txn.Insert("job_summary", jobSummary); err != nil {
|
2016-07-05 18:50:44 +00:00
|
|
|
return fmt.Errorf("job summary insert failed: %v", err)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-04-24 21:49:23 +00:00
|
|
|
// JobVersionRestore is used to restore a job version
|
|
|
|
func (r *StateRestore) JobVersionRestore(version *structs.Job) error {
|
|
|
|
if err := r.txn.Insert("job_version", version); err != nil {
|
|
|
|
return fmt.Errorf("job version insert failed: %v", err)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// DeploymentRestore is used to restore a deployment
|
|
|
|
func (r *StateRestore) DeploymentRestore(deployment *structs.Deployment) error {
|
|
|
|
if err := r.txn.Insert("deployment", deployment); err != nil {
|
|
|
|
return fmt.Errorf("deployment insert failed: %v", err)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-08-19 01:14:58 +00:00
|
|
|
// VaultAccessorRestore is used to restore a vault accessor
|
|
|
|
func (r *StateRestore) VaultAccessorRestore(accessor *structs.VaultAccessor) error {
|
|
|
|
if err := r.txn.Insert("vault_accessors", accessor); err != nil {
|
|
|
|
return fmt.Errorf("vault accessor insert failed: %v", err)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-09-14 22:43:42 +00:00
|
|
|
// addEphemeralDiskToTaskGroups adds missing EphemeralDisk objects to TaskGroups
|
|
|
|
func (r *StateRestore) addEphemeralDiskToTaskGroups(job *structs.Job) {
|
2016-09-02 00:41:50 +00:00
|
|
|
for _, tg := range job.TaskGroups {
|
2016-09-14 22:43:42 +00:00
|
|
|
if tg.EphemeralDisk != nil {
|
2016-09-02 00:41:50 +00:00
|
|
|
continue
|
|
|
|
}
|
2016-09-14 22:43:42 +00:00
|
|
|
var sizeMB int
|
2016-09-02 00:41:50 +00:00
|
|
|
for _, task := range tg.Tasks {
|
|
|
|
if task.Resources != nil {
|
2016-09-14 22:43:42 +00:00
|
|
|
sizeMB += task.Resources.DiskMB
|
2016-09-02 00:41:50 +00:00
|
|
|
task.Resources.DiskMB = 0
|
|
|
|
}
|
|
|
|
}
|
2016-09-14 22:43:42 +00:00
|
|
|
tg.EphemeralDisk = &structs.EphemeralDisk{
|
|
|
|
SizeMB: sizeMB,
|
2016-09-02 00:41:50 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|