2015-06-01 15:49:10 +00:00
|
|
|
package nomad
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"log"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/armon/go-metrics"
|
2015-08-11 21:27:14 +00:00
|
|
|
"github.com/hashicorp/nomad/nomad/state"
|
2015-06-01 15:49:10 +00:00
|
|
|
"github.com/hashicorp/nomad/nomad/structs"
|
|
|
|
"github.com/hashicorp/raft"
|
2016-02-21 02:05:17 +00:00
|
|
|
"github.com/ugorji/go/codec"
|
2015-06-01 15:49:10 +00:00
|
|
|
)
|
|
|
|
|
2015-08-16 00:38:13 +00:00
|
|
|
const (
|
|
|
|
// timeTableGranularity is the granularity of index to time tracking
|
|
|
|
timeTableGranularity = 5 * time.Minute
|
|
|
|
|
|
|
|
// timeTableLimit is the maximum limit of our tracking
|
2015-09-07 18:01:29 +00:00
|
|
|
timeTableLimit = 72 * time.Hour
|
2015-08-16 00:38:13 +00:00
|
|
|
)
|
|
|
|
|
2015-07-06 21:51:01 +00:00
|
|
|
// SnapshotType is prefixed to a record in the FSM snapshot
|
|
|
|
// so that we can determine the type for restore
|
|
|
|
type SnapshotType byte
|
|
|
|
|
|
|
|
const (
|
|
|
|
NodeSnapshot SnapshotType = iota
|
2015-07-07 16:55:47 +00:00
|
|
|
JobSnapshot
|
2015-07-06 21:51:01 +00:00
|
|
|
IndexSnapshot
|
2015-07-23 22:52:38 +00:00
|
|
|
EvalSnapshot
|
2015-08-04 21:04:26 +00:00
|
|
|
AllocSnapshot
|
2015-08-16 00:38:13 +00:00
|
|
|
TimeTableSnapshot
|
2015-12-07 23:58:17 +00:00
|
|
|
PeriodicLaunchSnapshot
|
2016-07-05 18:50:44 +00:00
|
|
|
JobSummarySnapshot
|
2015-07-06 21:51:01 +00:00
|
|
|
)
|
|
|
|
|
2015-06-01 15:49:10 +00:00
|
|
|
// nomadFSM implements a finite state machine that is used
|
|
|
|
// along with Raft to provide strong consistency. We implement
|
|
|
|
// this outside the Server to avoid exposing this outside the package.
|
|
|
|
type nomadFSM struct {
|
2015-12-19 01:26:05 +00:00
|
|
|
evalBroker *EvalBroker
|
2016-01-29 23:31:32 +00:00
|
|
|
blockedEvals *BlockedEvals
|
2015-12-19 01:26:05 +00:00
|
|
|
periodicDispatcher *PeriodicDispatch
|
|
|
|
logOutput io.Writer
|
|
|
|
logger *log.Logger
|
|
|
|
state *state.StateStore
|
|
|
|
timetable *TimeTable
|
2015-06-01 15:49:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// nomadSnapshot is used to provide a snapshot of the current
|
|
|
|
// state in a way that can be accessed concurrently with operations
|
|
|
|
// that may modify the live state.
|
|
|
|
type nomadSnapshot struct {
|
2015-08-16 00:38:13 +00:00
|
|
|
snap *state.StateSnapshot
|
|
|
|
timetable *TimeTable
|
2015-07-06 20:01:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// snapshotHeader is the first entry in our snapshot
|
|
|
|
type snapshotHeader struct {
|
2015-06-01 15:49:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// NewFSMPath is used to construct a new FSM with a blank state
|
2016-01-29 23:31:32 +00:00
|
|
|
func NewFSM(evalBroker *EvalBroker, periodic *PeriodicDispatch,
|
|
|
|
blocked *BlockedEvals, logOutput io.Writer) (*nomadFSM, error) {
|
2015-06-01 15:49:10 +00:00
|
|
|
// Create a state store
|
2015-08-11 21:27:14 +00:00
|
|
|
state, err := state.NewStateStore(logOutput)
|
2015-06-01 15:49:10 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
fsm := &nomadFSM{
|
2015-12-19 01:26:05 +00:00
|
|
|
evalBroker: evalBroker,
|
|
|
|
periodicDispatcher: periodic,
|
2016-01-29 23:31:32 +00:00
|
|
|
blockedEvals: blocked,
|
2015-12-19 01:26:05 +00:00
|
|
|
logOutput: logOutput,
|
|
|
|
logger: log.New(logOutput, "", log.LstdFlags),
|
|
|
|
state: state,
|
|
|
|
timetable: NewTimeTable(timeTableGranularity, timeTableLimit),
|
2015-06-01 15:49:10 +00:00
|
|
|
}
|
|
|
|
return fsm, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Close is used to cleanup resources associated with the FSM
|
|
|
|
func (n *nomadFSM) Close() error {
|
2015-07-03 21:46:30 +00:00
|
|
|
return nil
|
2015-06-01 15:49:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// State is used to return a handle to the current state
|
2015-08-11 21:27:14 +00:00
|
|
|
func (n *nomadFSM) State() *state.StateStore {
|
2015-06-01 15:49:10 +00:00
|
|
|
return n.state
|
|
|
|
}
|
|
|
|
|
2015-08-16 00:38:13 +00:00
|
|
|
// TimeTable returns the time table of transactions
|
|
|
|
func (n *nomadFSM) TimeTable() *TimeTable {
|
|
|
|
return n.timetable
|
|
|
|
}
|
|
|
|
|
2015-06-01 15:49:10 +00:00
|
|
|
func (n *nomadFSM) Apply(log *raft.Log) interface{} {
|
|
|
|
buf := log.Data
|
|
|
|
msgType := structs.MessageType(buf[0])
|
|
|
|
|
2015-08-16 00:38:13 +00:00
|
|
|
// Witness this write
|
|
|
|
n.timetable.Witness(log.Index, time.Now().UTC())
|
|
|
|
|
2015-06-01 15:49:10 +00:00
|
|
|
// Check if this message type should be ignored when unknown. This is
|
|
|
|
// used so that new commands can be added with developer control if older
|
|
|
|
// versions can safely ignore the command, or if they should crash.
|
|
|
|
ignoreUnknown := false
|
|
|
|
if msgType&structs.IgnoreUnknownTypeFlag == structs.IgnoreUnknownTypeFlag {
|
|
|
|
msgType &= ^structs.IgnoreUnknownTypeFlag
|
|
|
|
ignoreUnknown = true
|
|
|
|
}
|
|
|
|
|
|
|
|
switch msgType {
|
2015-07-07 16:51:42 +00:00
|
|
|
case structs.NodeRegisterRequestType:
|
2015-09-07 03:47:42 +00:00
|
|
|
return n.applyUpsertNode(buf[1:], log.Index)
|
2015-07-07 16:51:42 +00:00
|
|
|
case structs.NodeDeregisterRequestType:
|
|
|
|
return n.applyDeregisterNode(buf[1:], log.Index)
|
2015-07-04 01:41:36 +00:00
|
|
|
case structs.NodeUpdateStatusRequestType:
|
|
|
|
return n.applyStatusUpdate(buf[1:], log.Index)
|
2015-09-07 02:55:38 +00:00
|
|
|
case structs.NodeUpdateDrainRequestType:
|
|
|
|
return n.applyDrainUpdate(buf[1:], log.Index)
|
2015-07-07 16:51:42 +00:00
|
|
|
case structs.JobRegisterRequestType:
|
2015-09-07 03:47:42 +00:00
|
|
|
return n.applyUpsertJob(buf[1:], log.Index)
|
2015-07-07 16:51:42 +00:00
|
|
|
case structs.JobDeregisterRequestType:
|
|
|
|
return n.applyDeregisterJob(buf[1:], log.Index)
|
2015-07-23 22:52:38 +00:00
|
|
|
case structs.EvalUpdateRequestType:
|
|
|
|
return n.applyUpdateEval(buf[1:], log.Index)
|
|
|
|
case structs.EvalDeleteRequestType:
|
|
|
|
return n.applyDeleteEval(buf[1:], log.Index)
|
2015-08-04 21:04:26 +00:00
|
|
|
case structs.AllocUpdateRequestType:
|
|
|
|
return n.applyAllocUpdate(buf[1:], log.Index)
|
2015-08-26 01:00:14 +00:00
|
|
|
case structs.AllocClientUpdateRequestType:
|
|
|
|
return n.applyAllocClientUpdate(buf[1:], log.Index)
|
2015-06-01 15:49:10 +00:00
|
|
|
default:
|
|
|
|
if ignoreUnknown {
|
|
|
|
n.logger.Printf("[WARN] nomad.fsm: ignoring unknown message type (%d), upgrade to newer version", msgType)
|
|
|
|
return nil
|
|
|
|
} else {
|
|
|
|
panic(fmt.Errorf("failed to apply request: %#v", buf))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2015-07-04 01:41:36 +00:00
|
|
|
|
2015-09-07 03:47:42 +00:00
|
|
|
func (n *nomadFSM) applyUpsertNode(buf []byte, index uint64) interface{} {
|
2015-07-07 16:51:42 +00:00
|
|
|
defer metrics.MeasureSince([]string{"nomad", "fsm", "register_node"}, time.Now())
|
|
|
|
var req structs.NodeRegisterRequest
|
2015-07-04 01:41:36 +00:00
|
|
|
if err := structs.Decode(buf, &req); err != nil {
|
|
|
|
panic(fmt.Errorf("failed to decode request: %v", err))
|
|
|
|
}
|
|
|
|
|
2015-09-07 03:47:42 +00:00
|
|
|
if err := n.state.UpsertNode(index, req.Node); err != nil {
|
|
|
|
n.logger.Printf("[ERR] nomad.fsm: UpsertNode failed: %v", err)
|
2015-07-04 01:41:36 +00:00
|
|
|
return err
|
|
|
|
}
|
2016-04-01 01:11:27 +00:00
|
|
|
|
|
|
|
// Unblock evals for the nodes computed node class if it is in a ready
|
|
|
|
// state.
|
|
|
|
if req.Node.Status == structs.NodeStatusReady {
|
2016-05-23 22:24:31 +00:00
|
|
|
n.blockedEvals.Unblock(req.Node.ComputedClass, index)
|
2016-04-01 01:11:27 +00:00
|
|
|
}
|
|
|
|
|
2015-07-04 01:41:36 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-07-07 16:51:42 +00:00
|
|
|
func (n *nomadFSM) applyDeregisterNode(buf []byte, index uint64) interface{} {
|
|
|
|
defer metrics.MeasureSince([]string{"nomad", "fsm", "deregister_node"}, time.Now())
|
|
|
|
var req structs.NodeDeregisterRequest
|
2015-07-04 01:41:36 +00:00
|
|
|
if err := structs.Decode(buf, &req); err != nil {
|
|
|
|
panic(fmt.Errorf("failed to decode request: %v", err))
|
|
|
|
}
|
|
|
|
|
2015-09-07 03:47:42 +00:00
|
|
|
if err := n.state.DeleteNode(index, req.NodeID); err != nil {
|
|
|
|
n.logger.Printf("[ERR] nomad.fsm: DeleteNode failed: %v", err)
|
2015-07-04 01:41:36 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (n *nomadFSM) applyStatusUpdate(buf []byte, index uint64) interface{} {
|
|
|
|
defer metrics.MeasureSince([]string{"nomad", "fsm", "node_status_update"}, time.Now())
|
2015-07-07 16:51:42 +00:00
|
|
|
var req structs.NodeUpdateStatusRequest
|
2015-07-04 01:41:36 +00:00
|
|
|
if err := structs.Decode(buf, &req); err != nil {
|
|
|
|
panic(fmt.Errorf("failed to decode request: %v", err))
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := n.state.UpdateNodeStatus(index, req.NodeID, req.Status); err != nil {
|
|
|
|
n.logger.Printf("[ERR] nomad.fsm: UpdateNodeStatus failed: %v", err)
|
|
|
|
return err
|
|
|
|
}
|
2016-01-29 23:31:32 +00:00
|
|
|
|
|
|
|
// Unblock evals for the nodes computed node class if it is in a ready
|
|
|
|
// state.
|
|
|
|
if req.Status == structs.NodeStatusReady {
|
|
|
|
node, err := n.state.NodeByID(req.NodeID)
|
|
|
|
if err != nil {
|
|
|
|
n.logger.Printf("[ERR] nomad.fsm: looking up node %q failed: %v", req.NodeID, err)
|
|
|
|
return err
|
|
|
|
|
|
|
|
}
|
2016-05-23 22:24:31 +00:00
|
|
|
n.blockedEvals.Unblock(node.ComputedClass, index)
|
2016-01-29 23:31:32 +00:00
|
|
|
}
|
|
|
|
|
2015-07-04 01:41:36 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-09-07 02:55:38 +00:00
|
|
|
func (n *nomadFSM) applyDrainUpdate(buf []byte, index uint64) interface{} {
|
|
|
|
defer metrics.MeasureSince([]string{"nomad", "fsm", "node_drain_update"}, time.Now())
|
|
|
|
var req structs.NodeUpdateDrainRequest
|
|
|
|
if err := structs.Decode(buf, &req); err != nil {
|
|
|
|
panic(fmt.Errorf("failed to decode request: %v", err))
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := n.state.UpdateNodeDrain(index, req.NodeID, req.Drain); err != nil {
|
|
|
|
n.logger.Printf("[ERR] nomad.fsm: UpdateNodeDrain failed: %v", err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-09-07 03:47:42 +00:00
|
|
|
func (n *nomadFSM) applyUpsertJob(buf []byte, index uint64) interface{} {
|
2015-07-07 16:51:42 +00:00
|
|
|
defer metrics.MeasureSince([]string{"nomad", "fsm", "register_job"}, time.Now())
|
|
|
|
var req structs.JobRegisterRequest
|
|
|
|
if err := structs.Decode(buf, &req); err != nil {
|
|
|
|
panic(fmt.Errorf("failed to decode request: %v", err))
|
|
|
|
}
|
|
|
|
|
2015-09-07 03:47:42 +00:00
|
|
|
if err := n.state.UpsertJob(index, req.Job); err != nil {
|
|
|
|
n.logger.Printf("[ERR] nomad.fsm: UpsertJob failed: %v", err)
|
2015-07-07 16:51:42 +00:00
|
|
|
return err
|
|
|
|
}
|
2015-12-01 22:54:57 +00:00
|
|
|
|
2015-12-21 21:25:50 +00:00
|
|
|
// We always add the job to the periodic dispatcher because there is the
|
|
|
|
// possibility that the periodic spec was removed and then we should stop
|
|
|
|
// tracking it.
|
|
|
|
if err := n.periodicDispatcher.Add(req.Job); err != nil {
|
|
|
|
n.logger.Printf("[ERR] nomad.fsm: periodicDispatcher.Add failed: %v", err)
|
|
|
|
return err
|
|
|
|
}
|
2015-12-19 01:51:30 +00:00
|
|
|
|
2015-12-21 21:25:50 +00:00
|
|
|
// If it is periodic, record the time it was inserted. This is necessary for
|
|
|
|
// recovering during leader election. It is possible that from the time it
|
|
|
|
// is added to when it was suppose to launch, leader election occurs and the
|
|
|
|
// job was not launched. In this case, we use the insertion time to
|
|
|
|
// determine if a launch was missed.
|
|
|
|
if req.Job.IsPeriodic() {
|
2015-12-24 03:02:31 +00:00
|
|
|
prevLaunch, err := n.state.PeriodicLaunchByID(req.Job.ID)
|
|
|
|
if err != nil {
|
|
|
|
n.logger.Printf("[ERR] nomad.fsm: PeriodicLaunchByID failed: %v", err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2015-12-21 21:25:50 +00:00
|
|
|
// Record the insertion time as a launch. We overload the launch table
|
|
|
|
// such that the first entry is the insertion time.
|
2015-12-24 03:02:31 +00:00
|
|
|
if prevLaunch == nil {
|
|
|
|
launch := &structs.PeriodicLaunch{ID: req.Job.ID, Launch: time.Now()}
|
|
|
|
if err := n.state.UpsertPeriodicLaunch(index, launch); err != nil {
|
|
|
|
n.logger.Printf("[ERR] nomad.fsm: UpsertPeriodicLaunch failed: %v", err)
|
|
|
|
return err
|
|
|
|
}
|
2015-12-19 01:51:30 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if the parent job is periodic and mark the launch time.
|
|
|
|
parentID := req.Job.ParentID
|
|
|
|
if parentID != "" {
|
|
|
|
parent, err := n.state.JobByID(parentID)
|
|
|
|
if err != nil {
|
|
|
|
n.logger.Printf("[ERR] nomad.fsm: JobByID(%v) lookup for parent failed: %v", parentID, err)
|
|
|
|
return err
|
|
|
|
} else if parent == nil {
|
|
|
|
// The parent has been deregistered.
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
if parent.IsPeriodic() {
|
2015-12-24 02:22:16 +00:00
|
|
|
t, err := n.periodicDispatcher.LaunchTime(req.Job.ID)
|
|
|
|
if err != nil {
|
|
|
|
n.logger.Printf("[ERR] nomad.fsm: LaunchTime(%v) failed: %v", req.Job.ID, err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
launch := &structs.PeriodicLaunch{ID: parentID, Launch: t}
|
2015-12-19 01:51:30 +00:00
|
|
|
if err := n.state.UpsertPeriodicLaunch(index, launch); err != nil {
|
|
|
|
n.logger.Printf("[ERR] nomad.fsm: UpsertPeriodicLaunch failed: %v", err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2015-12-01 22:54:57 +00:00
|
|
|
}
|
|
|
|
|
2015-07-07 16:51:42 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (n *nomadFSM) applyDeregisterJob(buf []byte, index uint64) interface{} {
|
|
|
|
defer metrics.MeasureSince([]string{"nomad", "fsm", "deregister_job"}, time.Now())
|
|
|
|
var req structs.JobDeregisterRequest
|
|
|
|
if err := structs.Decode(buf, &req); err != nil {
|
|
|
|
panic(fmt.Errorf("failed to decode request: %v", err))
|
|
|
|
}
|
|
|
|
|
2015-12-24 01:47:37 +00:00
|
|
|
if err := n.state.DeleteJob(index, req.JobID); err != nil {
|
2015-09-07 03:47:42 +00:00
|
|
|
n.logger.Printf("[ERR] nomad.fsm: DeleteJob failed: %v", err)
|
2015-07-07 16:51:42 +00:00
|
|
|
return err
|
|
|
|
}
|
2015-12-01 22:54:57 +00:00
|
|
|
|
2015-12-24 01:47:37 +00:00
|
|
|
if err := n.periodicDispatcher.Remove(req.JobID); err != nil {
|
|
|
|
n.logger.Printf("[ERR] nomad.fsm: periodicDispatcher.Remove failed: %v", err)
|
2015-12-01 22:54:57 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2015-12-24 01:47:37 +00:00
|
|
|
// We always delete from the periodic launch table because it is possible that
|
|
|
|
// the job was updated to be non-perioidic, thus checking if it is periodic
|
|
|
|
// doesn't ensure we clean it up properly.
|
|
|
|
n.state.DeletePeriodicLaunch(index, req.JobID)
|
2015-12-19 01:51:30 +00:00
|
|
|
|
2015-07-07 16:51:42 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-07-23 22:52:38 +00:00
|
|
|
func (n *nomadFSM) applyUpdateEval(buf []byte, index uint64) interface{} {
|
|
|
|
defer metrics.MeasureSince([]string{"nomad", "fsm", "update_eval"}, time.Now())
|
|
|
|
var req structs.EvalUpdateRequest
|
|
|
|
if err := structs.Decode(buf, &req); err != nil {
|
|
|
|
panic(fmt.Errorf("failed to decode request: %v", err))
|
|
|
|
}
|
|
|
|
|
2015-08-06 21:51:15 +00:00
|
|
|
if err := n.state.UpsertEvals(index, req.Evals); err != nil {
|
|
|
|
n.logger.Printf("[ERR] nomad.fsm: UpsertEvals failed: %v", err)
|
2015-07-23 22:52:38 +00:00
|
|
|
return err
|
|
|
|
}
|
2015-08-06 18:32:42 +00:00
|
|
|
|
2015-08-06 21:51:15 +00:00
|
|
|
for _, eval := range req.Evals {
|
|
|
|
if eval.ShouldEnqueue() {
|
2016-05-18 18:35:15 +00:00
|
|
|
n.evalBroker.Enqueue(eval)
|
2016-01-29 23:31:32 +00:00
|
|
|
} else if eval.ShouldBlock() {
|
|
|
|
n.blockedEvals.Block(eval)
|
2015-08-06 18:32:42 +00:00
|
|
|
}
|
|
|
|
}
|
2015-07-23 22:52:38 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (n *nomadFSM) applyDeleteEval(buf []byte, index uint64) interface{} {
|
|
|
|
defer metrics.MeasureSince([]string{"nomad", "fsm", "delete_eval"}, time.Now())
|
|
|
|
var req structs.EvalDeleteRequest
|
|
|
|
if err := structs.Decode(buf, &req); err != nil {
|
|
|
|
panic(fmt.Errorf("failed to decode request: %v", err))
|
|
|
|
}
|
|
|
|
|
2015-08-15 22:39:29 +00:00
|
|
|
if err := n.state.DeleteEval(index, req.Evals, req.Allocs); err != nil {
|
2015-07-23 22:52:38 +00:00
|
|
|
n.logger.Printf("[ERR] nomad.fsm: DeleteEval failed: %v", err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-08-04 21:04:26 +00:00
|
|
|
func (n *nomadFSM) applyAllocUpdate(buf []byte, index uint64) interface{} {
|
|
|
|
defer metrics.MeasureSince([]string{"nomad", "fsm", "alloc_update"}, time.Now())
|
|
|
|
var req structs.AllocUpdateRequest
|
|
|
|
if err := structs.Decode(buf, &req); err != nil {
|
|
|
|
panic(fmt.Errorf("failed to decode request: %v", err))
|
|
|
|
}
|
|
|
|
|
2016-03-01 22:09:25 +00:00
|
|
|
// Attach the job to all the allocations. It is pulled out in the
|
2016-02-21 19:42:54 +00:00
|
|
|
// payload to avoid the redundancy of encoding, but should be denormalized
|
|
|
|
// prior to being inserted into MemDB.
|
|
|
|
if j := req.Job; j != nil {
|
|
|
|
for _, alloc := range req.Alloc {
|
|
|
|
if alloc.Job == nil {
|
|
|
|
alloc.Job = j
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-03-01 22:09:25 +00:00
|
|
|
// Calculate the total resources of allocations. It is pulled out in the
|
|
|
|
// payload to avoid encoding something that can be computed, but should be
|
|
|
|
// denormalized prior to being inserted into MemDB.
|
|
|
|
for _, alloc := range req.Alloc {
|
|
|
|
if alloc.Resources != nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
alloc.Resources = new(structs.Resources)
|
|
|
|
for _, task := range alloc.TaskResources {
|
|
|
|
alloc.Resources.Add(task)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-07 03:47:42 +00:00
|
|
|
if err := n.state.UpsertAllocs(index, req.Alloc); err != nil {
|
|
|
|
n.logger.Printf("[ERR] nomad.fsm: UpsertAllocs failed: %v", err)
|
2015-08-04 21:04:26 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-08-26 01:00:14 +00:00
|
|
|
func (n *nomadFSM) applyAllocClientUpdate(buf []byte, index uint64) interface{} {
|
|
|
|
defer metrics.MeasureSince([]string{"nomad", "fsm", "alloc_client_update"}, time.Now())
|
|
|
|
var req structs.AllocUpdateRequest
|
|
|
|
if err := structs.Decode(buf, &req); err != nil {
|
|
|
|
panic(fmt.Errorf("failed to decode request: %v", err))
|
|
|
|
}
|
|
|
|
if len(req.Alloc) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-02-22 01:58:56 +00:00
|
|
|
// Update all the client allocations
|
|
|
|
if err := n.state.UpdateAllocsFromClient(index, req.Alloc); err != nil {
|
2015-08-26 01:00:14 +00:00
|
|
|
n.logger.Printf("[ERR] nomad.fsm: UpdateAllocFromClient failed: %v", err)
|
|
|
|
return err
|
|
|
|
}
|
2016-01-29 23:31:32 +00:00
|
|
|
|
|
|
|
// Unblock evals for the nodes computed node class if the client has
|
|
|
|
// finished running an allocation.
|
2016-02-22 01:58:56 +00:00
|
|
|
for _, alloc := range req.Alloc {
|
2016-03-24 01:08:19 +00:00
|
|
|
if alloc.ClientStatus == structs.AllocClientStatusComplete ||
|
2016-02-22 01:58:56 +00:00
|
|
|
alloc.ClientStatus == structs.AllocClientStatusFailed {
|
|
|
|
nodeID := alloc.NodeID
|
|
|
|
node, err := n.state.NodeByID(nodeID)
|
|
|
|
if err != nil || node == nil {
|
|
|
|
n.logger.Printf("[ERR] nomad.fsm: looking up node %q failed: %v", nodeID, err)
|
|
|
|
return err
|
2016-01-29 23:31:32 +00:00
|
|
|
|
2016-02-22 01:58:56 +00:00
|
|
|
}
|
2016-05-23 22:24:31 +00:00
|
|
|
n.blockedEvals.Unblock(node.ComputedClass, index)
|
2016-01-29 23:31:32 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-08-26 01:00:14 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-06-01 15:49:10 +00:00
|
|
|
func (n *nomadFSM) Snapshot() (raft.FSMSnapshot, error) {
|
|
|
|
// Create a new snapshot
|
|
|
|
snap, err := n.state.Snapshot()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2015-08-16 00:38:13 +00:00
|
|
|
|
|
|
|
ns := &nomadSnapshot{
|
|
|
|
snap: snap,
|
|
|
|
timetable: n.timetable,
|
|
|
|
}
|
|
|
|
return ns, nil
|
2015-06-01 15:49:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (n *nomadFSM) Restore(old io.ReadCloser) error {
|
|
|
|
defer old.Close()
|
2015-07-06 20:01:10 +00:00
|
|
|
|
|
|
|
// Create a new state store
|
2015-08-11 21:27:14 +00:00
|
|
|
newState, err := state.NewStateStore(n.logOutput)
|
2015-07-06 20:01:10 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2015-08-11 21:27:14 +00:00
|
|
|
n.state = newState
|
2015-07-06 20:01:10 +00:00
|
|
|
|
|
|
|
// Start the state restore
|
2015-08-11 21:27:14 +00:00
|
|
|
restore, err := newState.Restore()
|
2015-07-06 20:01:10 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer restore.Abort()
|
|
|
|
|
|
|
|
// Create a decoder
|
2015-11-18 23:16:42 +00:00
|
|
|
dec := codec.NewDecoder(old, structs.MsgpackHandle)
|
2015-07-06 20:01:10 +00:00
|
|
|
|
|
|
|
// Read in the header
|
|
|
|
var header snapshotHeader
|
|
|
|
if err := dec.Decode(&header); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Populate the new state
|
|
|
|
msgType := make([]byte, 1)
|
|
|
|
for {
|
|
|
|
// Read the message type
|
|
|
|
_, err := old.Read(msgType)
|
|
|
|
if err == io.EOF {
|
|
|
|
break
|
|
|
|
} else if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Decode
|
2015-07-06 21:51:01 +00:00
|
|
|
switch SnapshotType(msgType[0]) {
|
2015-08-16 00:38:13 +00:00
|
|
|
case TimeTableSnapshot:
|
|
|
|
if err := n.timetable.Deserialize(dec); err != nil {
|
|
|
|
return fmt.Errorf("time table deserialize failed: %v", err)
|
|
|
|
}
|
|
|
|
|
2015-07-06 21:51:01 +00:00
|
|
|
case NodeSnapshot:
|
|
|
|
node := new(structs.Node)
|
|
|
|
if err := dec.Decode(node); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := restore.NodeRestore(node); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2015-07-07 16:55:47 +00:00
|
|
|
case JobSnapshot:
|
|
|
|
job := new(structs.Job)
|
|
|
|
if err := dec.Decode(job); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := restore.JobRestore(job); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2015-07-23 22:52:38 +00:00
|
|
|
case EvalSnapshot:
|
|
|
|
eval := new(structs.Evaluation)
|
|
|
|
if err := dec.Decode(eval); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := restore.EvalRestore(eval); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2015-08-04 21:04:26 +00:00
|
|
|
case AllocSnapshot:
|
|
|
|
alloc := new(structs.Allocation)
|
|
|
|
if err := dec.Decode(alloc); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := restore.AllocRestore(alloc); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2015-07-06 21:51:01 +00:00
|
|
|
case IndexSnapshot:
|
2015-08-11 21:27:14 +00:00
|
|
|
idx := new(state.IndexEntry)
|
2015-07-06 21:51:01 +00:00
|
|
|
if err := dec.Decode(idx); err != nil {
|
2015-07-06 20:01:10 +00:00
|
|
|
return err
|
|
|
|
}
|
2015-07-06 21:51:01 +00:00
|
|
|
if err := restore.IndexRestore(idx); err != nil {
|
2015-07-06 20:01:10 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2015-12-07 23:58:17 +00:00
|
|
|
case PeriodicLaunchSnapshot:
|
|
|
|
launch := new(structs.PeriodicLaunch)
|
|
|
|
if err := dec.Decode(launch); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := restore.PeriodicLaunchRestore(launch); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-07-05 18:50:44 +00:00
|
|
|
case JobSummarySnapshot:
|
|
|
|
summary := new(structs.JobSummary)
|
|
|
|
if err := dec.Decode(summary); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := restore.JobSummaryRestore(summary); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2015-07-06 20:01:10 +00:00
|
|
|
default:
|
2015-07-06 21:51:01 +00:00
|
|
|
return fmt.Errorf("Unrecognized snapshot type: %v", msgType)
|
2015-07-06 20:01:10 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Commit the state restore
|
|
|
|
restore.Commit()
|
2015-06-01 15:49:10 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *nomadSnapshot) Persist(sink raft.SnapshotSink) error {
|
|
|
|
defer metrics.MeasureSince([]string{"nomad", "fsm", "persist"}, time.Now())
|
2015-07-06 20:01:10 +00:00
|
|
|
// Register the nodes
|
2015-11-18 23:16:42 +00:00
|
|
|
encoder := codec.NewEncoder(sink, structs.MsgpackHandle)
|
2015-07-06 20:01:10 +00:00
|
|
|
|
|
|
|
// Write the header
|
|
|
|
header := snapshotHeader{}
|
|
|
|
if err := encoder.Encode(&header); err != nil {
|
|
|
|
sink.Cancel()
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2015-08-16 00:38:13 +00:00
|
|
|
// Write the time table
|
|
|
|
sink.Write([]byte{byte(TimeTableSnapshot)})
|
|
|
|
if err := s.timetable.Serialize(encoder); err != nil {
|
|
|
|
sink.Cancel()
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2015-07-06 20:01:10 +00:00
|
|
|
// Write all the data out
|
2015-07-06 21:51:01 +00:00
|
|
|
if err := s.persistIndexes(sink, encoder); err != nil {
|
|
|
|
sink.Cancel()
|
|
|
|
return err
|
|
|
|
}
|
2015-07-06 20:01:10 +00:00
|
|
|
if err := s.persistNodes(sink, encoder); err != nil {
|
|
|
|
sink.Cancel()
|
|
|
|
return err
|
|
|
|
}
|
2015-07-07 16:55:47 +00:00
|
|
|
if err := s.persistJobs(sink, encoder); err != nil {
|
|
|
|
sink.Cancel()
|
|
|
|
return err
|
|
|
|
}
|
2015-07-23 22:52:38 +00:00
|
|
|
if err := s.persistEvals(sink, encoder); err != nil {
|
|
|
|
sink.Cancel()
|
|
|
|
return err
|
|
|
|
}
|
2015-08-04 21:04:26 +00:00
|
|
|
if err := s.persistAllocs(sink, encoder); err != nil {
|
|
|
|
sink.Cancel()
|
|
|
|
return err
|
|
|
|
}
|
2015-12-07 23:58:17 +00:00
|
|
|
if err := s.persistPeriodicLaunches(sink, encoder); err != nil {
|
|
|
|
sink.Cancel()
|
|
|
|
return err
|
|
|
|
}
|
2016-07-05 18:50:44 +00:00
|
|
|
if err := s.persistJobSummaries(sink, encoder); err != nil {
|
|
|
|
sink.Cancel()
|
|
|
|
return err
|
|
|
|
}
|
2015-06-01 15:49:10 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-07-06 21:51:01 +00:00
|
|
|
func (s *nomadSnapshot) persistIndexes(sink raft.SnapshotSink,
|
|
|
|
encoder *codec.Encoder) error {
|
|
|
|
// Get all the indexes
|
|
|
|
iter, err := s.snap.Indexes()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
for {
|
|
|
|
// Get the next item
|
|
|
|
raw := iter.Next()
|
|
|
|
if raw == nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
// Prepare the request struct
|
2015-08-11 21:27:14 +00:00
|
|
|
idx := raw.(*state.IndexEntry)
|
2015-07-06 21:51:01 +00:00
|
|
|
|
|
|
|
// Write out a node registration
|
|
|
|
sink.Write([]byte{byte(IndexSnapshot)})
|
|
|
|
if err := encoder.Encode(idx); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-07-06 20:01:10 +00:00
|
|
|
func (s *nomadSnapshot) persistNodes(sink raft.SnapshotSink,
|
|
|
|
encoder *codec.Encoder) error {
|
|
|
|
// Get all the nodes
|
|
|
|
nodes, err := s.snap.Nodes()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
for {
|
|
|
|
// Get the next item
|
|
|
|
raw := nodes.Next()
|
|
|
|
if raw == nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
// Prepare the request struct
|
|
|
|
node := raw.(*structs.Node)
|
|
|
|
|
|
|
|
// Write out a node registration
|
2015-07-06 21:51:01 +00:00
|
|
|
sink.Write([]byte{byte(NodeSnapshot)})
|
|
|
|
if err := encoder.Encode(node); err != nil {
|
2015-07-06 20:01:10 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
2015-06-01 15:49:10 +00:00
|
|
|
}
|
2015-07-06 20:01:10 +00:00
|
|
|
|
2015-07-07 16:55:47 +00:00
|
|
|
func (s *nomadSnapshot) persistJobs(sink raft.SnapshotSink,
|
|
|
|
encoder *codec.Encoder) error {
|
|
|
|
// Get all the jobs
|
|
|
|
jobs, err := s.snap.Jobs()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
for {
|
|
|
|
// Get the next item
|
|
|
|
raw := jobs.Next()
|
|
|
|
if raw == nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
// Prepare the request struct
|
|
|
|
job := raw.(*structs.Job)
|
|
|
|
|
|
|
|
// Write out a job registration
|
|
|
|
sink.Write([]byte{byte(JobSnapshot)})
|
|
|
|
if err := encoder.Encode(job); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-07-23 22:52:38 +00:00
|
|
|
func (s *nomadSnapshot) persistEvals(sink raft.SnapshotSink,
|
|
|
|
encoder *codec.Encoder) error {
|
|
|
|
// Get all the evaluations
|
|
|
|
evals, err := s.snap.Evals()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
for {
|
|
|
|
// Get the next item
|
|
|
|
raw := evals.Next()
|
|
|
|
if raw == nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
// Prepare the request struct
|
|
|
|
eval := raw.(*structs.Evaluation)
|
|
|
|
|
|
|
|
// Write out the evaluation
|
|
|
|
sink.Write([]byte{byte(EvalSnapshot)})
|
|
|
|
if err := encoder.Encode(eval); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-08-04 21:04:26 +00:00
|
|
|
func (s *nomadSnapshot) persistAllocs(sink raft.SnapshotSink,
|
|
|
|
encoder *codec.Encoder) error {
|
|
|
|
// Get all the allocations
|
|
|
|
allocs, err := s.snap.Allocs()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
for {
|
|
|
|
// Get the next item
|
|
|
|
raw := allocs.Next()
|
|
|
|
if raw == nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
// Prepare the request struct
|
|
|
|
alloc := raw.(*structs.Allocation)
|
|
|
|
|
|
|
|
// Write out the evaluation
|
|
|
|
sink.Write([]byte{byte(AllocSnapshot)})
|
|
|
|
if err := encoder.Encode(alloc); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-12-07 23:58:17 +00:00
|
|
|
func (s *nomadSnapshot) persistPeriodicLaunches(sink raft.SnapshotSink,
|
|
|
|
encoder *codec.Encoder) error {
|
|
|
|
// Get all the jobs
|
|
|
|
launches, err := s.snap.PeriodicLaunches()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
for {
|
|
|
|
// Get the next item
|
|
|
|
raw := launches.Next()
|
|
|
|
if raw == nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
// Prepare the request struct
|
|
|
|
launch := raw.(*structs.PeriodicLaunch)
|
|
|
|
|
|
|
|
// Write out a job registration
|
|
|
|
sink.Write([]byte{byte(PeriodicLaunchSnapshot)})
|
|
|
|
if err := encoder.Encode(launch); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-07-05 18:50:44 +00:00
|
|
|
func (s *nomadSnapshot) persistJobSummaries(sink raft.SnapshotSink,
|
|
|
|
encoder *codec.Encoder) error {
|
|
|
|
|
|
|
|
summaries, err := s.snap.JobSummaries()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
for {
|
|
|
|
raw := summaries.Next()
|
|
|
|
if raw == nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
jobSummary := raw.(*structs.JobSummary)
|
|
|
|
|
|
|
|
sink.Write([]byte{byte(JobSummarySnapshot)})
|
|
|
|
if err := encoder.Encode(jobSummary); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-07-06 20:01:10 +00:00
|
|
|
// Release is a no-op, as we just need to GC the pointer
|
|
|
|
// to the state store snapshot. There is nothing to explicitly
|
|
|
|
// cleanup.
|
|
|
|
func (s *nomadSnapshot) Release() {}
|