2015-06-01 15:49:10 +00:00
package nomad
2015-06-04 11:38:41 +00:00
import (
2017-08-30 17:06:56 +00:00
"bytes"
2016-08-22 20:57:27 +00:00
"context"
2015-12-16 22:14:55 +00:00
"errors"
2015-06-04 11:38:41 +00:00
"fmt"
2017-04-12 21:47:59 +00:00
"math/rand"
2017-02-02 23:49:06 +00:00
"net"
2017-10-17 05:01:49 +00:00
"sync"
2015-06-04 11:38:41 +00:00
"time"
2017-08-13 23:16:59 +00:00
"golang.org/x/time/rate"
2018-06-07 13:53:08 +00:00
"strings"
2015-06-04 11:38:41 +00:00
"github.com/armon/go-metrics"
2017-02-08 04:31:23 +00:00
memdb "github.com/hashicorp/go-memdb"
2017-12-18 21:16:23 +00:00
"github.com/hashicorp/go-version"
2017-09-29 16:58:48 +00:00
"github.com/hashicorp/nomad/helper/uuid"
2017-08-13 23:16:59 +00:00
"github.com/hashicorp/nomad/nomad/state"
2015-08-05 23:53:54 +00:00
"github.com/hashicorp/nomad/nomad/structs"
2015-06-04 11:38:41 +00:00
"github.com/hashicorp/raft"
"github.com/hashicorp/serf/serf"
)
2016-05-25 17:28:25 +00:00
const (
// failedEvalUnblockInterval is the interval at which failed evaluations are
// unblocked to re-enter the scheduler. A failed evaluation occurs under
// high contention when the schedulers plan does not make progress.
failedEvalUnblockInterval = 1 * time . Minute
2017-08-13 23:16:59 +00:00
// replicationRateLimit is used to rate limit how often data is replicated
// between the authoritative region and the local region
replicationRateLimit rate . Limit = 10.0
2017-10-17 05:01:49 +00:00
// barrierWriteTimeout is used to give Raft a chance to process a
// possible loss of leadership event if we are unable to get a barrier
// while leader.
barrierWriteTimeout = 2 * time . Minute
2016-05-25 17:28:25 +00:00
)
2017-12-18 21:16:23 +00:00
var minAutopilotVersion = version . Must ( version . NewVersion ( "0.8.0" ) )
2015-06-01 15:49:10 +00:00
// monitorLeadership is used to monitor if we acquire or lose our role
// as the leader in the Raft cluster. There is some work the leader is
// expected to do, so we must react to changes
func ( s * Server ) monitorLeadership ( ) {
2017-10-17 05:01:49 +00:00
var weAreLeaderCh chan struct { }
var leaderLoop sync . WaitGroup
2015-06-01 15:49:10 +00:00
for {
select {
2015-09-07 17:46:41 +00:00
case isLeader := <- s . leaderCh :
2017-10-17 05:01:49 +00:00
switch {
case isLeader :
if weAreLeaderCh != nil {
s . logger . Printf ( "[ERR] nomad: attempted to start the leader loop while running" )
continue
}
weAreLeaderCh = make ( chan struct { } )
leaderLoop . Add ( 1 )
go func ( ch chan struct { } ) {
defer leaderLoop . Done ( )
s . leaderLoop ( ch )
} ( weAreLeaderCh )
2015-06-01 15:49:10 +00:00
s . logger . Printf ( "[INFO] nomad: cluster leadership acquired" )
2017-10-17 05:01:49 +00:00
default :
if weAreLeaderCh == nil {
s . logger . Printf ( "[ERR] nomad: attempted to stop the leader loop while not running" )
continue
}
s . logger . Printf ( "[DEBUG] nomad: shutting down leader loop" )
close ( weAreLeaderCh )
leaderLoop . Wait ( )
weAreLeaderCh = nil
2015-06-01 15:49:10 +00:00
s . logger . Printf ( "[INFO] nomad: cluster leadership lost" )
}
2017-10-17 05:01:49 +00:00
2015-06-01 15:49:10 +00:00
case <- s . shutdownCh :
return
}
}
}
// leaderLoop runs as long as we are the leader to run various
2018-03-11 18:26:20 +00:00
// maintenance activities
2015-06-01 15:49:10 +00:00
func ( s * Server ) leaderLoop ( stopCh chan struct { } ) {
2015-06-05 21:54:45 +00:00
var reconcileCh chan serf . Member
2015-07-24 04:58:38 +00:00
establishedLeader := false
2015-06-05 21:54:45 +00:00
RECONCILE :
// Setup a reconciliation timer
reconcileCh = nil
interval := time . After ( s . config . ReconcileInterval )
// Apply a raft barrier to ensure our FSM is caught up
start := time . Now ( )
2017-10-17 05:01:49 +00:00
barrier := s . raft . Barrier ( barrierWriteTimeout )
2015-06-05 21:54:45 +00:00
if err := barrier . Error ( ) ; err != nil {
s . logger . Printf ( "[ERR] nomad: failed to wait for barrier: %v" , err )
goto WAIT
}
metrics . MeasureSince ( [ ] string { "nomad" , "leader" , "barrier" } , start )
2015-07-24 04:58:38 +00:00
// Check if we need to handle initial leadership actions
if ! establishedLeader {
2015-08-15 22:15:00 +00:00
if err := s . establishLeadership ( stopCh ) ; err != nil {
2017-08-03 19:37:58 +00:00
s . logger . Printf ( "[ERR] nomad: failed to establish leadership: %v" , err )
2018-02-20 20:52:00 +00:00
// Immediately revoke leadership since we didn't successfully
// establish leadership.
if err := s . revokeLeadership ( ) ; err != nil {
s . logger . Printf ( "[ERR] nomad: failed to revoke leadership: %v" , err )
}
2015-07-24 04:58:38 +00:00
goto WAIT
}
2018-02-20 20:52:00 +00:00
2015-07-24 04:58:38 +00:00
establishedLeader = true
2017-10-17 05:01:49 +00:00
defer func ( ) {
if err := s . revokeLeadership ( ) ; err != nil {
s . logger . Printf ( "[ERR] nomad: failed to revoke leadership: %v" , err )
}
} ( )
2015-07-24 04:58:38 +00:00
}
2015-06-05 21:54:45 +00:00
// Reconcile any missing data
if err := s . reconcile ( ) ; err != nil {
s . logger . Printf ( "[ERR] nomad: failed to reconcile: %v" , err )
goto WAIT
}
// Initial reconcile worked, now we can process the channel
// updates
reconcileCh = s . reconcileCh
2017-10-17 05:01:49 +00:00
// Poll the stop channel to give it priority so we don't waste time
// trying to perform the other operations if we have been asked to shut
// down.
select {
case <- stopCh :
return
default :
}
2015-06-05 21:54:45 +00:00
WAIT :
2015-06-01 15:49:10 +00:00
// Wait until leadership is lost
for {
select {
case <- stopCh :
return
case <- s . shutdownCh :
return
2015-06-05 21:54:45 +00:00
case <- interval :
goto RECONCILE
case member := <- reconcileCh :
2015-06-04 11:38:41 +00:00
s . reconcileMember ( member )
}
}
}
2015-07-24 04:58:38 +00:00
// establishLeadership is invoked once we become leader and are able
// to invoke an initial barrier. The barrier is used to ensure any
2016-05-15 16:41:34 +00:00
// previously inflight transactions have been committed and that our
2015-07-24 04:58:38 +00:00
// state is up-to-date.
2015-08-15 22:15:00 +00:00
func ( s * Server ) establishLeadership ( stopCh chan struct { } ) error {
2018-02-20 18:23:11 +00:00
defer metrics . MeasureSince ( [ ] string { "nomad" , "leader" , "establish_leadership" } , time . Now ( ) )
2017-10-23 19:50:37 +00:00
// Generate a leader ACL token. This will allow the leader to issue work
// that requires a valid ACL token.
s . setLeaderAcl ( uuid . Generate ( ) )
2016-02-17 21:50:06 +00:00
// Disable workers to free half the cores for use in the plan queue and
// evaluation broker
if numWorkers := len ( s . workers ) ; numWorkers > 1 {
2016-02-20 21:38:46 +00:00
// Disabling 3/4 of the workers frees CPU for raft and the
// plan applier which uses 1/2 the cores.
for i := 0 ; i < ( 3 * numWorkers / 4 ) ; i ++ {
2016-02-17 21:50:06 +00:00
s . workers [ i ] . SetPause ( true )
}
2015-08-23 20:59:13 +00:00
}
2017-12-18 21:16:23 +00:00
// Initialize and start the autopilot routine
s . getOrCreateAutopilotConfig ( )
s . autopilot . Start ( )
2015-07-27 22:11:42 +00:00
// Enable the plan queue, since we are now the leader
s . planQueue . SetEnabled ( true )
2015-07-27 22:31:09 +00:00
// Start the plan evaluator
go s . planApply ( )
2015-07-27 22:11:42 +00:00
2015-07-24 04:58:38 +00:00
// Enable the eval broker, since we are now the leader
s . evalBroker . SetEnabled ( true )
2016-01-29 23:31:32 +00:00
// Enable the blocked eval tracker, since we are now the leader
s . blockedEvals . SetEnabled ( true )
2017-10-13 21:36:02 +00:00
s . blockedEvals . SetTimetable ( s . fsm . TimeTable ( ) )
2016-01-29 23:31:32 +00:00
2017-06-28 22:35:52 +00:00
// Enable the deployment watcher, since we are now the leader
2018-02-27 00:28:10 +00:00
s . deploymentWatcher . SetEnabled ( true , s . State ( ) )
// Enable the NodeDrainer
s . nodeDrainer . SetEnabled ( true , s . State ( ) )
2017-06-28 22:35:52 +00:00
2015-08-05 23:53:54 +00:00
// Restore the eval broker state
2016-01-29 23:31:32 +00:00
if err := s . restoreEvals ( ) ; err != nil {
2015-08-05 23:53:54 +00:00
return err
}
2015-08-15 22:15:00 +00:00
2016-08-22 20:57:27 +00:00
// Activate the vault client
s . vault . SetActive ( true )
if err := s . restoreRevokingAccessors ( ) ; err != nil {
return err
}
2015-12-04 23:10:08 +00:00
// Enable the periodic dispatcher, since we are now the leader.
2015-12-18 20:26:28 +00:00
s . periodicDispatcher . SetEnabled ( true )
// Restore the periodic dispatcher state
if err := s . restorePeriodicDispatcher ( ) ; err != nil {
return err
}
2015-08-15 22:15:00 +00:00
// Scheduler periodic jobs
go s . schedulePeriodic ( stopCh )
2015-08-16 18:10:18 +00:00
// Reap any failed evaluations
go s . reapFailedEvaluations ( stopCh )
2015-08-23 00:17:13 +00:00
2016-01-31 00:16:13 +00:00
// Reap any duplicate blocked evaluations
go s . reapDupBlockedEvaluations ( stopCh )
2016-05-23 23:27:26 +00:00
// Periodically unblock failed allocations
go s . periodicUnblockFailedEvals ( stopCh )
2017-10-30 19:19:11 +00:00
// Periodically publish job summary metrics
go s . publishJobSummaryMetrics ( stopCh )
2015-08-23 00:17:13 +00:00
// Setup the heartbeat timers. This is done both when starting up or when
// a leader fail over happens. Since the timers are maintained by the leader
// node, effectively this means all the timers are renewed at the time of failover.
// The TTL contract is that the session will not be expired before the TTL,
// so expiring it later is allowable.
//
// This MUST be done after the initial barrier to ensure the latest Nodes
// are available to be initialized. Otherwise initialization may use stale
// data.
if err := s . initializeHeartbeatTimers ( ) ; err != nil {
s . logger . Printf ( "[ERR] nomad: heartbeat timer setup failed: %v" , err )
return err
}
2016-08-05 21:40:35 +00:00
// COMPAT 0.4 - 0.4.1
// Reconcile the summaries of the registered jobs. We reconcile summaries
// only if the server is 0.4.1 since summaries are not present in 0.4 they
// might be incorrect after upgrading to 0.4.1 the summaries might not be
// correct
2016-08-06 00:48:37 +00:00
if err := s . reconcileJobSummaries ( ) ; err != nil {
return fmt . Errorf ( "unable to reconcile job summaries: %v" , err )
2016-08-05 21:40:35 +00:00
}
2017-08-13 23:16:59 +00:00
// Start replication of ACLs and Policies if they are enabled,
// and we are not the authoritative region.
if s . config . ACLEnabled && s . config . Region != s . config . AuthoritativeRegion {
go s . replicateACLPolicies ( stopCh )
go s . replicateACLTokens ( stopCh )
}
2017-09-07 23:56:15 +00:00
// Setup any enterprise systems required.
2017-09-13 18:38:29 +00:00
if err := s . establishEnterpriseLeadership ( stopCh ) ; err != nil {
2017-09-07 23:56:15 +00:00
return err
}
2015-08-05 23:53:54 +00:00
return nil
}
2015-07-27 22:11:42 +00:00
2016-01-29 23:31:32 +00:00
// restoreEvals is used to restore pending evaluations into the eval broker and
// blocked evaluations into the blocked eval tracker. The broker and blocked
// eval tracker is maintained only by the leader, so it must be restored anytime
// a leadership transition takes place.
func ( s * Server ) restoreEvals ( ) error {
2015-08-05 23:53:54 +00:00
// Get an iterator over every evaluation
2017-02-08 04:31:23 +00:00
ws := memdb . NewWatchSet ( )
iter , err := s . fsm . State ( ) . Evals ( ws )
2015-08-05 23:53:54 +00:00
if err != nil {
return fmt . Errorf ( "failed to get evaluations: %v" , err )
}
for {
raw := iter . Next ( )
if raw == nil {
break
}
eval := raw . ( * structs . Evaluation )
2016-01-29 23:31:32 +00:00
if eval . ShouldEnqueue ( ) {
2016-05-18 18:35:15 +00:00
s . evalBroker . Enqueue ( eval )
2016-01-29 23:31:32 +00:00
} else if eval . ShouldBlock ( ) {
s . blockedEvals . Block ( eval )
2015-08-05 23:53:54 +00:00
}
}
2015-07-24 04:58:38 +00:00
return nil
}
2016-08-22 20:57:27 +00:00
// restoreRevokingAccessors is used to restore Vault accessors that should be
// revoked.
func ( s * Server ) restoreRevokingAccessors ( ) error {
// An accessor should be revoked if its allocation or node is terminal
2017-02-08 04:31:23 +00:00
ws := memdb . NewWatchSet ( )
2016-08-22 20:57:27 +00:00
state := s . fsm . State ( )
2017-02-08 04:31:23 +00:00
iter , err := state . VaultAccessors ( ws )
2016-08-22 20:57:27 +00:00
if err != nil {
return fmt . Errorf ( "failed to get vault accessors: %v" , err )
}
var revoke [ ] * structs . VaultAccessor
for {
raw := iter . Next ( )
if raw == nil {
break
}
va := raw . ( * structs . VaultAccessor )
// Check the allocation
2017-02-08 04:31:23 +00:00
alloc , err := state . AllocByID ( ws , va . AllocID )
2016-08-22 20:57:27 +00:00
if err != nil {
2017-02-28 00:00:19 +00:00
return fmt . Errorf ( "failed to lookup allocation %q: %v" , va . AllocID , err )
2016-08-22 20:57:27 +00:00
}
if alloc == nil || alloc . Terminated ( ) {
// No longer running and should be revoked
revoke = append ( revoke , va )
continue
}
// Check the node
2017-02-08 04:31:23 +00:00
node , err := state . NodeByID ( ws , va . NodeID )
2016-08-22 20:57:27 +00:00
if err != nil {
return fmt . Errorf ( "failed to lookup node %q: %v" , va . NodeID , err )
}
if node == nil || node . TerminalStatus ( ) {
// Node is terminal so any accessor from it should be revoked
revoke = append ( revoke , va )
continue
}
}
if len ( revoke ) != 0 {
if err := s . vault . RevokeTokens ( context . Background ( ) , revoke , true ) ; err != nil {
return fmt . Errorf ( "failed to revoke tokens: %v" , err )
}
}
return nil
}
2015-12-04 23:10:08 +00:00
// restorePeriodicDispatcher is used to restore all periodic jobs into the
// periodic dispatcher. It also determines if a periodic job should have been
// created during the leadership transition and force runs them. The periodic
// dispatcher is maintained only by the leader, so it must be restored anytime a
// leadership transition takes place.
2015-12-18 20:26:28 +00:00
func ( s * Server ) restorePeriodicDispatcher ( ) error {
2017-02-08 04:31:23 +00:00
ws := memdb . NewWatchSet ( )
iter , err := s . fsm . State ( ) . JobsByPeriodic ( ws , true )
2015-12-04 23:10:08 +00:00
if err != nil {
return fmt . Errorf ( "failed to get periodic jobs: %v" , err )
}
now := time . Now ( )
for i := iter . Next ( ) ; i != nil ; i = iter . Next ( ) {
job := i . ( * structs . Job )
2017-08-03 19:37:58 +00:00
// We skip adding parameterized jobs because they themselves aren't
// tracked, only the dispatched children are.
if job . IsParameterized ( ) {
continue
}
2017-12-11 21:55:17 +00:00
if err := s . periodicDispatcher . Add ( job ) ; err != nil {
2018-04-26 22:51:47 +00:00
s . logger . Printf ( "[ERR] nomad.periodic: %v" , err )
continue
2017-09-12 21:25:40 +00:00
}
2017-12-11 21:55:17 +00:00
// We do not need to force run the job since it isn't active.
if ! job . IsPeriodicActive ( ) {
2017-09-12 21:25:40 +00:00
continue
}
2015-12-04 23:10:08 +00:00
2015-12-16 22:14:55 +00:00
// If the periodic job has never been launched before, launch will hold
// the time the periodic job was added. Otherwise it has the last launch
// time of the periodic job.
2017-09-07 23:56:15 +00:00
launch , err := s . fsm . State ( ) . PeriodicLaunchByID ( ws , job . Namespace , job . ID )
2017-12-11 21:55:17 +00:00
if err != nil {
2015-12-19 01:51:30 +00:00
return fmt . Errorf ( "failed to get periodic launch time: %v" , err )
2015-12-04 23:10:08 +00:00
}
2017-12-11 21:55:17 +00:00
if launch == nil {
return fmt . Errorf ( "no recorded periodic launch time for job %q in namespace %q" ,
job . ID , job . Namespace )
}
2015-12-04 23:10:08 +00:00
2015-12-16 22:14:55 +00:00
// nextLaunch is the next launch that should occur.
2018-04-26 20:57:45 +00:00
nextLaunch , err := job . Periodic . Next ( launch . Launch . In ( job . Periodic . GetLocation ( ) ) )
if err != nil {
2018-04-26 22:15:43 +00:00
s . logger . Printf ( "[ERR] nomad.periodic: failed to determine next periodic launch for job %s: %v" , job . NamespacedID ( ) , err )
continue
2018-04-26 20:57:45 +00:00
}
2015-12-16 22:14:55 +00:00
// We skip force launching the job if there should be no next launch
// (the zero case) or if the next launch time is in the future. If it is
// in the future, it will be handled by the periodic dispatcher.
if nextLaunch . IsZero ( ) || ! nextLaunch . Before ( now ) {
2015-12-04 23:10:08 +00:00
continue
}
2017-09-07 23:56:15 +00:00
if _ , err := s . periodicDispatcher . ForceRun ( job . Namespace , job . ID ) ; err != nil {
2015-12-16 22:14:55 +00:00
msg := fmt . Sprintf ( "force run of periodic job %q failed: %v" , job . ID , err )
s . logger . Printf ( "[ERR] nomad.periodic: %s" , msg )
return errors . New ( msg )
2015-12-04 23:10:08 +00:00
}
s . logger . Printf ( "[DEBUG] nomad.periodic: periodic job %q force" +
" run during leadership establishment" , job . ID )
}
2015-12-18 20:26:28 +00:00
return nil
}
2015-08-15 22:15:00 +00:00
// schedulePeriodic is used to do periodic job dispatch while we are leader
func ( s * Server ) schedulePeriodic ( stopCh chan struct { } ) {
evalGC := time . NewTicker ( s . config . EvalGCInterval )
defer evalGC . Stop ( )
2015-09-07 18:01:29 +00:00
nodeGC := time . NewTicker ( s . config . NodeGCInterval )
defer nodeGC . Stop ( )
2015-12-15 03:20:57 +00:00
jobGC := time . NewTicker ( s . config . JobGCInterval )
defer jobGC . Stop ( )
2017-09-25 18:03:20 +00:00
deploymentGC := time . NewTicker ( s . config . DeploymentGCInterval )
defer deploymentGC . Stop ( )
2015-08-15 22:15:00 +00:00
2016-06-22 16:11:25 +00:00
// getLatest grabs the latest index from the state store. It returns true if
// the index was retrieved successfully.
getLatest := func ( ) ( uint64 , bool ) {
2016-06-22 16:33:15 +00:00
snapshotIndex , err := s . fsm . State ( ) . LatestIndex ( )
2016-06-22 16:04:22 +00:00
if err != nil {
2016-06-22 16:33:15 +00:00
s . logger . Printf ( "[ERR] nomad: failed to determine state store's index: %v" , err )
2016-06-22 16:11:25 +00:00
return 0 , false
2016-06-22 16:04:22 +00:00
}
2016-06-22 16:11:25 +00:00
return snapshotIndex , true
}
for {
2015-08-15 22:15:00 +00:00
select {
case <- evalGC . C :
2016-06-22 16:11:25 +00:00
if index , ok := getLatest ( ) ; ok {
s . evalBroker . Enqueue ( s . coreJobEval ( structs . CoreJobEvalGC , index ) )
}
2015-09-07 18:01:29 +00:00
case <- nodeGC . C :
2016-06-22 16:11:25 +00:00
if index , ok := getLatest ( ) ; ok {
s . evalBroker . Enqueue ( s . coreJobEval ( structs . CoreJobNodeGC , index ) )
}
2015-12-15 03:20:57 +00:00
case <- jobGC . C :
2016-06-22 16:11:25 +00:00
if index , ok := getLatest ( ) ; ok {
s . evalBroker . Enqueue ( s . coreJobEval ( structs . CoreJobJobGC , index ) )
}
2017-09-25 18:03:20 +00:00
case <- deploymentGC . C :
if index , ok := getLatest ( ) ; ok {
s . evalBroker . Enqueue ( s . coreJobEval ( structs . CoreJobDeploymentGC , index ) )
}
2015-08-15 22:15:00 +00:00
case <- stopCh :
return
}
}
}
2015-08-15 23:07:50 +00:00
// coreJobEval returns an evaluation for a core job
2016-06-22 16:04:22 +00:00
func ( s * Server ) coreJobEval ( job string , modifyIndex uint64 ) * structs . Evaluation {
2015-08-15 23:07:50 +00:00
return & structs . Evaluation {
2017-09-29 16:58:48 +00:00
ID : uuid . Generate ( ) ,
2017-09-07 23:56:15 +00:00
Namespace : "-" ,
2015-08-15 22:15:00 +00:00
Priority : structs . CoreJobPriority ,
Type : structs . JobTypeCore ,
TriggeredBy : structs . EvalTriggerScheduled ,
JobID : job ,
2017-10-23 22:04:00 +00:00
LeaderACL : s . getLeaderAcl ( ) ,
2015-08-15 22:15:00 +00:00
Status : structs . EvalStatusPending ,
2016-06-22 16:04:22 +00:00
ModifyIndex : modifyIndex ,
2015-08-15 22:15:00 +00:00
}
}
2015-08-16 18:10:18 +00:00
// reapFailedEvaluations is used to reap evaluations that
// have reached their delivery limit and should be failed
func ( s * Server ) reapFailedEvaluations ( stopCh chan struct { } ) {
for {
select {
case <- stopCh :
return
default :
// Scan for a failed evaluation
eval , token , err := s . evalBroker . Dequeue ( [ ] string { failedQueue } , time . Second )
if err != nil {
return
}
if eval == nil {
continue
}
// Update the status to failed
2017-04-14 20:19:14 +00:00
updateEval := eval . Copy ( )
updateEval . Status = structs . EvalStatusFailed
updateEval . StatusDescription = fmt . Sprintf ( "evaluation reached delivery limit (%d)" , s . config . EvalDeliveryLimit )
s . logger . Printf ( "[WARN] nomad: eval %#v reached delivery limit, marking as failed" , updateEval )
2015-08-16 18:10:18 +00:00
2017-04-12 21:47:59 +00:00
// Create a follow-up evaluation that will be used to retry the
// scheduling for the job after the cluster is hopefully more stable
// due to the fairly large backoff.
2017-04-14 22:24:55 +00:00
followupEvalWait := s . config . EvalFailedFollowupBaselineDelay +
time . Duration ( rand . Int63n ( int64 ( s . config . EvalFailedFollowupDelayRange ) ) )
2017-04-12 21:47:59 +00:00
followupEval := eval . CreateFailedFollowUpEval ( followupEvalWait )
2015-08-16 18:10:18 +00:00
// Update via Raft
req := structs . EvalUpdateRequest {
2017-04-14 20:19:14 +00:00
Evals : [ ] * structs . Evaluation { updateEval , followupEval } ,
2015-08-16 18:10:18 +00:00
}
if _ , _ , err := s . raftApply ( structs . EvalUpdateRequestType , & req ) ; err != nil {
2017-04-14 20:19:14 +00:00
s . logger . Printf ( "[ERR] nomad: failed to update failed eval %#v and create a follow-up: %v" , updateEval , err )
2015-08-16 18:10:18 +00:00
continue
}
// Ack completion
s . evalBroker . Ack ( eval . ID , token )
}
}
}
2016-01-31 00:16:13 +00:00
// reapDupBlockedEvaluations is used to reap duplicate blocked evaluations and
// should be cancelled.
func ( s * Server ) reapDupBlockedEvaluations ( stopCh chan struct { } ) {
for {
select {
case <- stopCh :
return
default :
// Scan for duplicate blocked evals.
dups := s . blockedEvals . GetDuplicates ( time . Second )
if dups == nil {
continue
}
cancel := make ( [ ] * structs . Evaluation , len ( dups ) )
for i , dup := range dups {
// Update the status to cancelled
newEval := dup . Copy ( )
newEval . Status = structs . EvalStatusCancelled
newEval . StatusDescription = fmt . Sprintf ( "existing blocked evaluation exists for job %q" , newEval . JobID )
cancel [ i ] = newEval
}
// Update via Raft
req := structs . EvalUpdateRequest {
Evals : cancel ,
}
if _ , _ , err := s . raftApply ( structs . EvalUpdateRequestType , & req ) ; err != nil {
s . logger . Printf ( "[ERR] nomad: failed to update duplicate evals %#v: %v" , cancel , err )
continue
}
}
}
}
2016-05-23 23:27:26 +00:00
// periodicUnblockFailedEvals periodically unblocks failed, blocked evaluations.
func ( s * Server ) periodicUnblockFailedEvals ( stopCh chan struct { } ) {
2016-07-06 00:08:58 +00:00
ticker := time . NewTicker ( failedEvalUnblockInterval )
2016-05-25 17:28:25 +00:00
defer ticker . Stop ( )
2016-05-23 23:27:26 +00:00
for {
select {
case <- stopCh :
return
case <- ticker . C :
// Unblock the failed allocations
s . blockedEvals . UnblockFailed ( )
}
}
}
2017-10-30 19:19:11 +00:00
// publishJobSummaryMetrics publishes the job summaries as metrics
func ( s * Server ) publishJobSummaryMetrics ( stopCh chan struct { } ) {
timer := time . NewTimer ( 0 )
defer timer . Stop ( )
for {
select {
case <- stopCh :
return
case <- timer . C :
2017-11-01 20:14:44 +00:00
timer . Reset ( s . config . StatsCollectionInterval )
2017-10-30 19:19:11 +00:00
state , err := s . State ( ) . Snapshot ( )
if err != nil {
s . logger . Printf ( "[ERR] nomad: failed to get state: %v" , err )
continue
}
ws := memdb . NewWatchSet ( )
iter , err := state . JobSummaries ( ws )
if err != nil {
s . logger . Printf ( "[ERR] nomad: failed to get job summaries: %v" , err )
continue
}
for {
raw := iter . Next ( )
if raw == nil {
break
}
summary := raw . ( * structs . JobSummary )
for name , tgSummary := range summary . Summary {
2017-10-30 23:10:58 +00:00
if ! s . config . DisableTaggedMetrics {
labels := [ ] metrics . Label {
{
Name : "job" ,
Value : summary . JobID ,
} ,
{
Name : "task_group" ,
Value : name ,
} ,
}
2018-06-07 13:53:08 +00:00
if strings . Contains ( summary . JobID , "/dispatch-" ) {
jobInfo := strings . Split ( summary . JobID , "/dispatch-" )
labels = append ( labels , metrics . Label {
Name : "parent_id" ,
Value : jobInfo [ 0 ] ,
} , metrics . Label {
Name : "dispatch_id" ,
Value : jobInfo [ 1 ] ,
} )
}
if strings . Contains ( summary . JobID , "/periodic-" ) {
jobInfo := strings . Split ( summary . JobID , "/periodic-" )
labels = append ( labels , metrics . Label {
Name : "parent_id" ,
Value : jobInfo [ 0 ] ,
} , metrics . Label {
Name : "periodic_id" ,
Value : jobInfo [ 1 ] ,
} )
}
2017-10-30 23:10:58 +00:00
metrics . SetGaugeWithLabels ( [ ] string { "nomad" , "job_summary" , "queued" } ,
float32 ( tgSummary . Queued ) , labels )
metrics . SetGaugeWithLabels ( [ ] string { "nomad" , "job_summary" , "complete" } ,
float32 ( tgSummary . Complete ) , labels )
metrics . SetGaugeWithLabels ( [ ] string { "nomad" , "job_summary" , "failed" } ,
float32 ( tgSummary . Failed ) , labels )
metrics . SetGaugeWithLabels ( [ ] string { "nomad" , "job_summary" , "running" } ,
float32 ( tgSummary . Running ) , labels )
metrics . SetGaugeWithLabels ( [ ] string { "nomad" , "job_summary" , "starting" } ,
float32 ( tgSummary . Starting ) , labels )
metrics . SetGaugeWithLabels ( [ ] string { "nomad" , "job_summary" , "lost" } ,
float32 ( tgSummary . Lost ) , labels )
}
if s . config . BackwardsCompatibleMetrics {
metrics . SetGauge ( [ ] string { "nomad" , "job_summary" , summary . JobID , name , "queued" } , float32 ( tgSummary . Queued ) )
metrics . SetGauge ( [ ] string { "nomad" , "job_summary" , summary . JobID , name , "complete" } , float32 ( tgSummary . Complete ) )
metrics . SetGauge ( [ ] string { "nomad" , "job_summary" , summary . JobID , name , "failed" } , float32 ( tgSummary . Failed ) )
metrics . SetGauge ( [ ] string { "nomad" , "job_summary" , summary . JobID , name , "running" } , float32 ( tgSummary . Running ) )
metrics . SetGauge ( [ ] string { "nomad" , "job_summary" , summary . JobID , name , "starting" } , float32 ( tgSummary . Starting ) )
metrics . SetGauge ( [ ] string { "nomad" , "job_summary" , summary . JobID , name , "lost" } , float32 ( tgSummary . Lost ) )
}
2017-10-30 19:19:11 +00:00
}
}
}
}
}
2015-07-24 04:58:38 +00:00
// revokeLeadership is invoked once we step down as leader.
// This is used to cleanup any state that may be specific to a leader.
func ( s * Server ) revokeLeadership ( ) error {
2018-02-20 18:23:11 +00:00
defer metrics . MeasureSince ( [ ] string { "nomad" , "leader" , "revoke_leadership" } , time . Now ( ) )
2018-02-20 18:22:15 +00:00
2017-10-23 22:11:13 +00:00
// Clear the leader token since we are no longer the leader.
s . setLeaderAcl ( "" )
2017-12-18 21:16:23 +00:00
// Disable autopilot
s . autopilot . Stop ( )
2015-07-27 22:11:42 +00:00
// Disable the plan queue, since we are no longer leader
s . planQueue . SetEnabled ( false )
2015-07-24 04:58:38 +00:00
// Disable the eval broker, since it is only useful as a leader
s . evalBroker . SetEnabled ( false )
2015-08-23 00:17:13 +00:00
2016-01-31 00:21:37 +00:00
// Disable the blocked eval tracker, since it is only useful as a leader
s . blockedEvals . SetEnabled ( false )
2015-12-18 20:26:28 +00:00
// Disable the periodic dispatcher, since it is only useful as a leader
s . periodicDispatcher . SetEnabled ( false )
2016-08-22 20:57:27 +00:00
// Disable the Vault client as it is only useful as a leader.
s . vault . SetActive ( false )
2017-06-28 22:35:52 +00:00
// Disable the deployment watcher as it is only useful as a leader.
2018-02-27 00:28:10 +00:00
s . deploymentWatcher . SetEnabled ( false , nil )
// Disable the node drainer
s . nodeDrainer . SetEnabled ( false , nil )
2017-06-28 22:35:52 +00:00
2017-09-07 23:56:15 +00:00
// Disable any enterprise systems required.
if err := s . revokeEnterpriseLeadership ( ) ; err != nil {
return err
}
2015-08-23 00:17:13 +00:00
// Clear the heartbeat timers on either shutdown or step down,
// since we are no longer responsible for TTL expirations.
if err := s . clearAllHeartbeatTimers ( ) ; err != nil {
s . logger . Printf ( "[ERR] nomad: clearing heartbeat timers failed: %v" , err )
return err
}
2015-08-23 20:59:13 +00:00
// Unpause our worker if we paused previously
if len ( s . workers ) > 1 {
2016-02-19 22:49:43 +00:00
for i := 0 ; i < len ( s . workers ) / 2 ; i ++ {
2016-02-17 21:50:06 +00:00
s . workers [ i ] . SetPause ( false )
}
2015-08-23 20:59:13 +00:00
}
2015-07-24 04:58:38 +00:00
return nil
}
2015-06-05 21:54:45 +00:00
// reconcile is used to reconcile the differences between Serf
// membership and what is reflected in our strongly consistent store.
func ( s * Server ) reconcile ( ) error {
defer metrics . MeasureSince ( [ ] string { "nomad" , "leader" , "reconcile" } , time . Now ( ) )
members := s . serf . Members ( )
for _ , member := range members {
if err := s . reconcileMember ( member ) ; err != nil {
return err
}
}
return nil
}
2015-06-04 11:38:41 +00:00
// reconcileMember is used to do an async reconcile of a single serf member
func ( s * Server ) reconcileMember ( member serf . Member ) error {
// Check if this is a member we should handle
valid , parts := isNomadServer ( member )
if ! valid || parts . Region != s . config . Region {
return nil
}
defer metrics . MeasureSince ( [ ] string { "nomad" , "leader" , "reconcileMember" } , time . Now ( ) )
var err error
switch member . Status {
case serf . StatusAlive :
err = s . addRaftPeer ( member , parts )
case serf . StatusLeft , StatusReap :
err = s . removeRaftPeer ( member , parts )
}
if err != nil {
s . logger . Printf ( "[ERR] nomad: failed to reconcile member: %v: %v" ,
member , err )
return err
}
return nil
}
2016-08-05 21:40:35 +00:00
// reconcileJobSummaries reconciles the summaries of all the jobs registered in
// the system
// COMPAT 0.4 -> 0.4.1
func ( s * Server ) reconcileJobSummaries ( ) error {
index , err := s . fsm . state . LatestIndex ( )
if err != nil {
return fmt . Errorf ( "unable to read latest index: %v" , err )
}
s . logger . Printf ( "[DEBUG] leader: reconciling job summaries at index: %v" , index )
args := & structs . GenericResponse { }
msg := structs . ReconcileJobSummariesRequestType | structs . IgnoreUnknownTypeFlag
if _ , _ , err = s . raftApply ( msg , args ) ; err != nil {
return fmt . Errorf ( "reconciliation of job summaries failed: %v" , err )
}
return nil
}
2015-06-04 11:38:41 +00:00
// addRaftPeer is used to add a new Raft peer when a Nomad server joins
func ( s * Server ) addRaftPeer ( m serf . Member , parts * serverParts ) error {
// Check for possibility of multiple bootstrap nodes
2017-11-22 00:29:11 +00:00
members := s . serf . Members ( )
2015-06-04 11:38:41 +00:00
if parts . Bootstrap {
for _ , member := range members {
valid , p := isNomadServer ( member )
if valid && member . Name != m . Name && p . Bootstrap {
s . logger . Printf ( "[ERR] nomad: '%v' and '%v' are both in bootstrap mode. Only one node should be in bootstrap mode, not adding Raft peer." , m . Name , member . Name )
return nil
}
2015-06-01 15:49:10 +00:00
}
}
2015-06-04 11:38:41 +00:00
2018-05-30 16:34:45 +00:00
// Processing ourselves could result in trying to remove ourselves to
// fix up our address, which would make us step down. This is only
// safe to attempt if there are multiple servers available.
2017-11-22 00:29:11 +00:00
addr := ( & net . TCPAddr { IP : m . Addr , Port : parts . Port } ) . String ( )
2017-02-02 23:49:06 +00:00
configFuture := s . raft . GetConfiguration ( )
if err := configFuture . Error ( ) ; err != nil {
2017-02-03 00:07:15 +00:00
s . logger . Printf ( "[ERR] nomad: failed to get raft configuration: %v" , err )
2017-02-02 23:49:06 +00:00
return err
}
2018-05-30 16:34:45 +00:00
if m . Name == s . config . NodeName {
if l := len ( configFuture . Configuration ( ) . Servers ) ; l < 3 {
s . logger . Printf ( "[DEBUG] consul: Skipping self join check for %q since the cluster is too small" , m . Name )
2017-02-02 23:49:06 +00:00
return nil
}
}
2017-11-22 00:29:11 +00:00
// See if it's already in the configuration. It's harmless to re-add it
// but we want to avoid doing that if possible to prevent useless Raft
// log entries. If the address is the same but the ID changed, remove the
// old server before adding the new one.
2017-12-18 21:16:23 +00:00
minRaftProtocol , err := s . autopilot . MinRaftProtocol ( )
2017-11-22 00:29:11 +00:00
if err != nil {
2015-06-04 11:38:41 +00:00
return err
}
2017-11-22 00:29:11 +00:00
for _ , server := range configFuture . Configuration ( ) . Servers {
// No-op if the raft version is too low
if server . Address == raft . ServerAddress ( addr ) && ( minRaftProtocol < 2 || parts . RaftVersion < 3 ) {
return nil
}
// If the address or ID matches an existing server, see if we need to remove the old one first
if server . Address == raft . ServerAddress ( addr ) || server . ID == raft . ServerID ( parts . ID ) {
2018-05-30 16:34:45 +00:00
// Exit with no-op if this is being called on an existing server and both the ID and address match
2017-11-22 00:29:11 +00:00
if server . Address == raft . ServerAddress ( addr ) && server . ID == raft . ServerID ( parts . ID ) {
return nil
}
future := s . raft . RemoveServer ( server . ID , 0 , 0 )
if server . Address == raft . ServerAddress ( addr ) {
if err := future . Error ( ) ; err != nil {
return fmt . Errorf ( "error removing server with duplicate address %q: %s" , server . Address , err )
}
s . logger . Printf ( "[INFO] nomad: removed server with duplicate address: %s" , server . Address )
} else {
if err := future . Error ( ) ; err != nil {
return fmt . Errorf ( "error removing server with duplicate ID %q: %s" , server . ID , err )
}
s . logger . Printf ( "[INFO] nomad: removed server with duplicate ID: %s" , server . ID )
}
}
}
// Attempt to add as a peer
switch {
case minRaftProtocol >= 3 :
2017-12-18 21:16:23 +00:00
addFuture := s . raft . AddNonvoter ( raft . ServerID ( parts . ID ) , raft . ServerAddress ( addr ) , 0 , 0 )
2017-11-22 00:29:11 +00:00
if err := addFuture . Error ( ) ; err != nil {
s . logger . Printf ( "[ERR] nomad: failed to add raft peer: %v" , err )
return err
}
case minRaftProtocol == 2 && parts . RaftVersion >= 3 :
addFuture := s . raft . AddVoter ( raft . ServerID ( parts . ID ) , raft . ServerAddress ( addr ) , 0 , 0 )
if err := addFuture . Error ( ) ; err != nil {
s . logger . Printf ( "[ERR] nomad: failed to add raft peer: %v" , err )
return err
}
default :
addFuture := s . raft . AddPeer ( raft . ServerAddress ( addr ) )
if err := addFuture . Error ( ) ; err != nil {
s . logger . Printf ( "[ERR] nomad: failed to add raft peer: %v" , err )
return err
}
}
2015-06-04 11:38:41 +00:00
return nil
}
// removeRaftPeer is used to remove a Raft peer when a Nomad server leaves
// or is reaped
func ( s * Server ) removeRaftPeer ( m serf . Member , parts * serverParts ) error {
2017-02-02 23:49:06 +00:00
addr := ( & net . TCPAddr { IP : m . Addr , Port : parts . Port } ) . String ( )
// See if it's already in the configuration. It's harmless to re-remove it
// but we want to avoid doing that if possible to prevent useless Raft
// log entries.
configFuture := s . raft . GetConfiguration ( )
if err := configFuture . Error ( ) ; err != nil {
2017-02-03 00:07:15 +00:00
s . logger . Printf ( "[ERR] nomad: failed to get raft configuration: %v" , err )
2017-02-02 23:49:06 +00:00
return err
}
2017-11-22 00:29:11 +00:00
2017-12-18 21:16:23 +00:00
minRaftProtocol , err := s . autopilot . MinRaftProtocol ( )
2017-11-22 00:29:11 +00:00
if err != nil {
return err
}
// Pick which remove API to use based on how the server was added.
2017-02-02 23:49:06 +00:00
for _ , server := range configFuture . Configuration ( ) . Servers {
2017-11-22 00:29:11 +00:00
// If we understand the new add/remove APIs and the server was added by ID, use the new remove API
if minRaftProtocol >= 2 && server . ID == raft . ServerID ( parts . ID ) {
s . logger . Printf ( "[INFO] nomad: removing server by ID: %q" , server . ID )
future := s . raft . RemoveServer ( raft . ServerID ( parts . ID ) , 0 , 0 )
if err := future . Error ( ) ; err != nil {
s . logger . Printf ( "[ERR] nomad: failed to remove raft peer '%v': %v" ,
server . ID , err )
return err
}
break
} else if server . Address == raft . ServerAddress ( addr ) {
// If not, use the old remove API
s . logger . Printf ( "[INFO] nomad: removing server by address: %q" , server . Address )
future := s . raft . RemovePeer ( raft . ServerAddress ( addr ) )
if err := future . Error ( ) ; err != nil {
s . logger . Printf ( "[ERR] nomad: failed to remove raft peer '%v': %v" ,
addr , err )
return err
}
break
2017-02-02 23:49:06 +00:00
}
}
2015-06-04 11:38:41 +00:00
return nil
2015-06-01 15:49:10 +00:00
}
2017-08-13 23:16:59 +00:00
// replicateACLPolicies is used to replicate ACL policies from
// the authoritative region to this region.
func ( s * Server ) replicateACLPolicies ( stopCh chan struct { } ) {
2017-08-19 22:30:01 +00:00
req := structs . ACLPolicyListRequest {
QueryOptions : structs . QueryOptions {
2017-08-24 16:53:30 +00:00
Region : s . config . AuthoritativeRegion ,
AllowStale : true ,
2017-08-19 22:30:01 +00:00
} ,
}
2017-08-13 23:16:59 +00:00
limiter := rate . NewLimiter ( replicationRateLimit , int ( replicationRateLimit ) )
2017-08-19 22:30:01 +00:00
s . logger . Printf ( "[DEBUG] nomad: starting ACL policy replication from authoritative region %q" , req . Region )
2017-08-13 23:16:59 +00:00
START :
for {
select {
case <- stopCh :
return
default :
// Rate limit how often we attempt replication
limiter . Wait ( context . Background ( ) )
// Fetch the list of policies
var resp structs . ACLPolicyListResponse
2017-10-12 22:16:33 +00:00
req . AuthToken = s . ReplicationToken ( )
2017-08-13 23:16:59 +00:00
err := s . forwardRegion ( s . config . AuthoritativeRegion ,
"ACL.ListPolicies" , & req , & resp )
if err != nil {
s . logger . Printf ( "[ERR] nomad: failed to fetch policies from authoritative region: %v" , err )
goto ERR_WAIT
}
// Perform a two-way diff
delete , update := diffACLPolicies ( s . State ( ) , req . MinQueryIndex , resp . Policies )
// Delete policies that should not exist
if len ( delete ) > 0 {
args := & structs . ACLPolicyDeleteRequest {
Names : delete ,
}
_ , _ , err := s . raftApply ( structs . ACLPolicyDeleteRequestType , args )
if err != nil {
s . logger . Printf ( "[ERR] nomad: failed to delete policies: %v" , err )
goto ERR_WAIT
}
}
// Fetch any outdated policies
var fetched [ ] * structs . ACLPolicy
2017-08-20 22:30:18 +00:00
if len ( update ) > 0 {
req := structs . ACLPolicySetRequest {
Names : update ,
QueryOptions : structs . QueryOptions {
2017-08-24 16:57:14 +00:00
Region : s . config . AuthoritativeRegion ,
2017-10-12 22:16:33 +00:00
AuthToken : s . ReplicationToken ( ) ,
2017-08-24 16:57:14 +00:00
AllowStale : true ,
MinQueryIndex : resp . Index - 1 ,
2017-08-20 22:30:18 +00:00
} ,
2017-08-13 23:16:59 +00:00
}
2017-08-20 22:30:18 +00:00
var reply structs . ACLPolicySetResponse
if err := s . forwardRegion ( s . config . AuthoritativeRegion ,
"ACL.GetPolicies" , & req , & reply ) ; err != nil {
s . logger . Printf ( "[ERR] nomad: failed to fetch policies from authoritative region: %v" , err )
2017-08-13 23:16:59 +00:00
goto ERR_WAIT
}
2017-08-20 22:30:18 +00:00
for _ , policy := range reply . Policies {
fetched = append ( fetched , policy )
2017-08-13 23:16:59 +00:00
}
}
// Update local policies
if len ( fetched ) > 0 {
args := & structs . ACLPolicyUpsertRequest {
Policies : fetched ,
}
_ , _ , err := s . raftApply ( structs . ACLPolicyUpsertRequestType , args )
if err != nil {
s . logger . Printf ( "[ERR] nomad: failed to update policies: %v" , err )
goto ERR_WAIT
}
}
// Update the minimum query index, blocks until there
// is a change.
req . MinQueryIndex = resp . Index
}
}
ERR_WAIT :
select {
case <- time . After ( s . config . ReplicationBackoff ) :
goto START
case <- stopCh :
return
}
}
// diffACLPolicies is used to perform a two-way diff between the local
// policies and the remote policies to determine which policies need to
// be deleted or updated.
func diffACLPolicies ( state * state . StateStore , minIndex uint64 , remoteList [ ] * structs . ACLPolicyListStub ) ( delete [ ] string , update [ ] string ) {
// Construct a set of the local and remote policies
2017-08-30 17:06:56 +00:00
local := make ( map [ string ] [ ] byte )
2017-08-13 23:16:59 +00:00
remote := make ( map [ string ] struct { } )
// Add all the local policies
iter , err := state . ACLPolicies ( nil )
if err != nil {
panic ( "failed to iterate local policies" )
}
for {
raw := iter . Next ( )
if raw == nil {
break
}
policy := raw . ( * structs . ACLPolicy )
2017-08-30 17:06:56 +00:00
local [ policy . Name ] = policy . Hash
2017-08-13 23:16:59 +00:00
}
// Iterate over the remote policies
for _ , rp := range remoteList {
remote [ rp . Name ] = struct { } { }
// Check if the policy is missing locally
2017-08-30 17:06:56 +00:00
if localHash , ok := local [ rp . Name ] ; ! ok {
2017-08-13 23:16:59 +00:00
update = append ( update , rp . Name )
2017-08-30 17:06:56 +00:00
// Check if policy is newer remotely and there is a hash mis-match.
} else if rp . ModifyIndex > minIndex && ! bytes . Equal ( localHash , rp . Hash ) {
2017-08-13 23:16:59 +00:00
update = append ( update , rp . Name )
}
}
// Check if policy should be deleted
for lp := range local {
if _ , ok := remote [ lp ] ; ! ok {
delete = append ( delete , lp )
}
}
return
}
// replicateACLTokens is used to replicate global ACL tokens from
// the authoritative region to this region.
func ( s * Server ) replicateACLTokens ( stopCh chan struct { } ) {
2017-08-13 23:45:13 +00:00
req := structs . ACLTokenListRequest {
GlobalOnly : true ,
2017-08-19 22:30:01 +00:00
QueryOptions : structs . QueryOptions {
2017-08-24 16:53:30 +00:00
Region : s . config . AuthoritativeRegion ,
AllowStale : true ,
2017-08-19 22:30:01 +00:00
} ,
2017-08-13 23:45:13 +00:00
}
limiter := rate . NewLimiter ( replicationRateLimit , int ( replicationRateLimit ) )
2017-08-19 22:30:01 +00:00
s . logger . Printf ( "[DEBUG] nomad: starting ACL token replication from authoritative region %q" , req . Region )
2017-08-13 23:45:13 +00:00
START :
2017-08-13 23:16:59 +00:00
for {
select {
case <- stopCh :
return
2017-08-13 23:45:13 +00:00
default :
// Rate limit how often we attempt replication
limiter . Wait ( context . Background ( ) )
// Fetch the list of tokens
var resp structs . ACLTokenListResponse
2017-10-12 22:16:33 +00:00
req . AuthToken = s . ReplicationToken ( )
2017-08-13 23:45:13 +00:00
err := s . forwardRegion ( s . config . AuthoritativeRegion ,
"ACL.ListTokens" , & req , & resp )
if err != nil {
s . logger . Printf ( "[ERR] nomad: failed to fetch tokens from authoritative region: %v" , err )
goto ERR_WAIT
}
// Perform a two-way diff
delete , update := diffACLTokens ( s . State ( ) , req . MinQueryIndex , resp . Tokens )
// Delete tokens that should not exist
if len ( delete ) > 0 {
args := & structs . ACLTokenDeleteRequest {
AccessorIDs : delete ,
}
_ , _ , err := s . raftApply ( structs . ACLTokenDeleteRequestType , args )
if err != nil {
s . logger . Printf ( "[ERR] nomad: failed to delete tokens: %v" , err )
goto ERR_WAIT
}
}
2017-08-19 22:30:01 +00:00
// Fetch any outdated policies.
2017-08-13 23:45:13 +00:00
var fetched [ ] * structs . ACLToken
2017-08-20 22:30:18 +00:00
if len ( update ) > 0 {
req := structs . ACLTokenSetRequest {
AccessorIDS : update ,
QueryOptions : structs . QueryOptions {
2017-08-24 16:57:14 +00:00
Region : s . config . AuthoritativeRegion ,
2017-10-12 22:16:33 +00:00
AuthToken : s . ReplicationToken ( ) ,
2017-08-24 16:57:14 +00:00
AllowStale : true ,
MinQueryIndex : resp . Index - 1 ,
2017-08-20 22:30:18 +00:00
} ,
2017-08-13 23:45:13 +00:00
}
2017-08-20 22:30:18 +00:00
var reply structs . ACLTokenSetResponse
if err := s . forwardRegion ( s . config . AuthoritativeRegion ,
"ACL.GetTokens" , & req , & reply ) ; err != nil {
s . logger . Printf ( "[ERR] nomad: failed to fetch tokens from authoritative region: %v" , err )
2017-08-13 23:45:13 +00:00
goto ERR_WAIT
}
2017-08-20 22:30:18 +00:00
for _ , token := range reply . Tokens {
fetched = append ( fetched , token )
2017-08-13 23:45:13 +00:00
}
}
2017-08-19 22:30:01 +00:00
// Update local tokens
2017-08-13 23:45:13 +00:00
if len ( fetched ) > 0 {
args := & structs . ACLTokenUpsertRequest {
Tokens : fetched ,
}
_ , _ , err := s . raftApply ( structs . ACLTokenUpsertRequestType , args )
if err != nil {
s . logger . Printf ( "[ERR] nomad: failed to update tokens: %v" , err )
goto ERR_WAIT
}
}
// Update the minimum query index, blocks until there
// is a change.
req . MinQueryIndex = resp . Index
}
}
ERR_WAIT :
select {
case <- time . After ( s . config . ReplicationBackoff ) :
goto START
case <- stopCh :
return
}
}
// diffACLTokens is used to perform a two-way diff between the local
// tokens and the remote tokens to determine which tokens need to
// be deleted or updated.
func diffACLTokens ( state * state . StateStore , minIndex uint64 , remoteList [ ] * structs . ACLTokenListStub ) ( delete [ ] string , update [ ] string ) {
// Construct a set of the local and remote policies
2017-08-30 17:06:56 +00:00
local := make ( map [ string ] [ ] byte )
2017-08-13 23:45:13 +00:00
remote := make ( map [ string ] struct { } )
// Add all the local global tokens
iter , err := state . ACLTokensByGlobal ( nil , true )
if err != nil {
panic ( "failed to iterate local tokens" )
}
for {
raw := iter . Next ( )
if raw == nil {
break
}
token := raw . ( * structs . ACLToken )
2017-08-30 17:06:56 +00:00
local [ token . AccessorID ] = token . Hash
2017-08-13 23:45:13 +00:00
}
// Iterate over the remote tokens
for _ , rp := range remoteList {
remote [ rp . AccessorID ] = struct { } { }
// Check if the token is missing locally
2017-08-30 17:06:56 +00:00
if localHash , ok := local [ rp . AccessorID ] ; ! ok {
2017-08-13 23:45:13 +00:00
update = append ( update , rp . AccessorID )
2017-08-30 17:06:56 +00:00
// Check if policy is newer remotely and there is a hash mis-match.
} else if rp . ModifyIndex > minIndex && ! bytes . Equal ( localHash , rp . Hash ) {
2017-08-13 23:45:13 +00:00
update = append ( update , rp . AccessorID )
}
}
// Check if local token should be deleted
for lp := range local {
if _ , ok := remote [ lp ] ; ! ok {
delete = append ( delete , lp )
2017-08-13 23:16:59 +00:00
}
}
2017-08-13 23:45:13 +00:00
return
2017-08-13 23:16:59 +00:00
}
2017-12-18 21:16:23 +00:00
// getOrCreateAutopilotConfig is used to get the autopilot config, initializing it if necessary
2018-01-30 03:53:34 +00:00
func ( s * Server ) getOrCreateAutopilotConfig ( ) * structs . AutopilotConfig {
2017-12-18 21:16:23 +00:00
state := s . fsm . State ( )
_ , config , err := state . AutopilotConfig ( )
if err != nil {
s . logger . Printf ( "[ERR] autopilot: failed to get config: %v" , err )
return nil
}
if config != nil {
return config
}
if ! ServersMeetMinimumVersion ( s . Members ( ) , minAutopilotVersion ) {
s . logger . Printf ( "[WARN] autopilot: can't initialize until all servers are >= %s" , minAutopilotVersion . String ( ) )
return nil
}
config = s . config . AutopilotConfig
req := structs . AutopilotSetConfigRequest { Config : * config }
if _ , _ , err = s . raftApply ( structs . AutopilotRequestType , req ) ; err != nil {
s . logger . Printf ( "[ERR] autopilot: failed to initialize config: %v" , err )
return nil
}
return config
}