2015-06-01 15:49:10 +00:00
|
|
|
package nomad
|
|
|
|
|
2015-06-04 11:38:41 +00:00
|
|
|
import (
|
2017-08-30 17:06:56 +00:00
|
|
|
"bytes"
|
2016-08-22 20:57:27 +00:00
|
|
|
"context"
|
2015-06-04 11:38:41 +00:00
|
|
|
"fmt"
|
2017-04-12 21:47:59 +00:00
|
|
|
"math/rand"
|
2017-02-02 23:49:06 +00:00
|
|
|
"net"
|
2019-11-14 13:18:29 +00:00
|
|
|
"strings"
|
2017-10-17 05:01:49 +00:00
|
|
|
"sync"
|
2015-06-04 11:38:41 +00:00
|
|
|
"time"
|
|
|
|
|
2022-04-02 00:24:02 +00:00
|
|
|
"github.com/armon/go-metrics"
|
|
|
|
"github.com/hashicorp/go-hclog"
|
|
|
|
"github.com/hashicorp/go-memdb"
|
|
|
|
"github.com/hashicorp/go-version"
|
2022-07-19 13:37:46 +00:00
|
|
|
"github.com/hashicorp/nomad/helper"
|
2017-09-29 16:58:48 +00:00
|
|
|
"github.com/hashicorp/nomad/helper/uuid"
|
2017-08-13 23:16:59 +00:00
|
|
|
"github.com/hashicorp/nomad/nomad/state"
|
2015-08-05 23:53:54 +00:00
|
|
|
"github.com/hashicorp/nomad/nomad/structs"
|
2015-06-04 11:38:41 +00:00
|
|
|
"github.com/hashicorp/raft"
|
|
|
|
"github.com/hashicorp/serf/serf"
|
2022-04-02 00:24:02 +00:00
|
|
|
"golang.org/x/time/rate"
|
2015-06-04 11:38:41 +00:00
|
|
|
)
|
|
|
|
|
2016-05-25 17:28:25 +00:00
|
|
|
const (
|
|
|
|
// failedEvalUnblockInterval is the interval at which failed evaluations are
|
|
|
|
// unblocked to re-enter the scheduler. A failed evaluation occurs under
|
|
|
|
// high contention when the schedulers plan does not make progress.
|
|
|
|
failedEvalUnblockInterval = 1 * time.Minute
|
2017-08-13 23:16:59 +00:00
|
|
|
|
|
|
|
// replicationRateLimit is used to rate limit how often data is replicated
|
|
|
|
// between the authoritative region and the local region
|
|
|
|
replicationRateLimit rate.Limit = 10.0
|
2017-10-17 05:01:49 +00:00
|
|
|
|
|
|
|
// barrierWriteTimeout is used to give Raft a chance to process a
|
|
|
|
// possible loss of leadership event if we are unable to get a barrier
|
|
|
|
// while leader.
|
|
|
|
barrierWriteTimeout = 2 * time.Minute
|
2016-05-25 17:28:25 +00:00
|
|
|
)
|
|
|
|
|
2017-12-18 21:16:23 +00:00
|
|
|
var minAutopilotVersion = version.Must(version.NewVersion("0.8.0"))
|
|
|
|
|
2019-01-29 19:48:45 +00:00
|
|
|
var minSchedulerConfigVersion = version.Must(version.NewVersion("0.9.0"))
|
2019-01-29 18:47:42 +00:00
|
|
|
|
2020-01-31 02:21:01 +00:00
|
|
|
var minClusterIDVersion = version.Must(version.NewVersion("0.10.4"))
|
2019-11-14 13:18:29 +00:00
|
|
|
|
Atomic eval insertion with job (de-)registration
This fixes a bug where jobs may get "stuck" unprocessed that
dispropotionately affect periodic jobs around leadership transitions.
When registering a job, the job registration and the eval to process it
get applied to raft as two separate transactions; if the job
registration succeeds but eval application fails, the job may remain
unprocessed. Operators may detect such failure, when submitting a job
update and get a 500 error code, and they could retry; periodic jobs
failures are more likely to go unnoticed, and no further periodic
invocations will be processed until an operator force evaluation.
This fixes the issue by ensuring that the job registration and eval
application get persisted and processed atomically in the same raft log
entry.
Also, applies the same change to ensure atomicity in job deregistration.
Backward Compatibility
We must maintain compatibility in two scenarios: mixed clusters where a
leader can handle atomic updates but followers cannot, and a recent
cluster processes old log entries from legacy or mixed cluster mode.
To handle this constraints: ensure that the leader continue to emit the
Evaluation log entry until all servers have upgraded; also, when
processing raft logs, the servers honor evaluations found in both spots,
the Eval in job (de-)registration and the eval update entries.
When an updated server sees mix-mode behavior where an eval is inserted
into the raft log twice, it ignores the second instance.
I made one compromise in consistency in the mixed-mode scenario: servers
may disagree on the eval.CreateIndex value: the leader and updated
servers will report the job registration index while old servers will
report the index of the eval update log entry. This discripency doesn't
seem to be material - it's the eval.JobModifyIndex that matters.
2020-07-10 17:31:55 +00:00
|
|
|
var minJobRegisterAtomicEvalVersion = version.Must(version.NewVersion("0.12.1"))
|
|
|
|
|
2021-07-27 17:17:55 +00:00
|
|
|
var minOneTimeAuthenticationTokenVersion = version.Must(version.NewVersion("1.1.0"))
|
|
|
|
|
2022-10-18 14:46:11 +00:00
|
|
|
// minACLRoleVersion is the Nomad version at which the ACL role table was
|
|
|
|
// introduced. It forms the minimum version all federated servers must meet
|
|
|
|
// before the feature can be used.
|
|
|
|
var minACLRoleVersion = version.Must(version.NewVersion("1.4.0"))
|
|
|
|
|
2022-11-21 09:15:39 +00:00
|
|
|
// minACLAuthMethodVersion is the Nomad version at which the ACL auth methods
|
|
|
|
// table was introduced. It forms the minimum version all federated servers must
|
|
|
|
// meet before the feature can be used.
|
|
|
|
//
|
|
|
|
// TODO: version constraint will be updated for every beta or rc until we reach
|
|
|
|
// 1.5, otherwise it's hard to test the functionality
|
|
|
|
var minACLAuthMethodVersion = version.Must(version.NewVersion("1.4.3-dev"))
|
|
|
|
|
2022-12-15 08:18:55 +00:00
|
|
|
// minACLBindingRuleVersion is the Nomad version at which the ACL binding rules
|
|
|
|
// table was introduced. It forms the minimum version all federated servers
|
|
|
|
// must meet before the feature can be used.
|
|
|
|
//
|
|
|
|
// TODO: version constraint will be updated for every beta or rc until we reach
|
|
|
|
// 1.5, otherwise it's hard to test the functionality
|
|
|
|
var minACLBindingRuleVersion = version.Must(version.NewVersion("1.4.3-dev"))
|
|
|
|
|
2022-10-18 13:30:28 +00:00
|
|
|
// minNomadServiceRegistrationVersion is the Nomad version at which the service
|
|
|
|
// registrations table was introduced. It forms the minimum version all local
|
|
|
|
// servers must meet before the feature can be used.
|
|
|
|
var minNomadServiceRegistrationVersion = version.Must(version.NewVersion("1.3.0"))
|
|
|
|
|
2015-06-01 15:49:10 +00:00
|
|
|
// monitorLeadership is used to monitor if we acquire or lose our role
|
|
|
|
// as the leader in the Raft cluster. There is some work the leader is
|
|
|
|
// expected to do, so we must react to changes
|
|
|
|
func (s *Server) monitorLeadership() {
|
2017-10-17 05:01:49 +00:00
|
|
|
var weAreLeaderCh chan struct{}
|
|
|
|
var leaderLoop sync.WaitGroup
|
2020-01-22 13:21:33 +00:00
|
|
|
|
2020-02-11 19:41:22 +00:00
|
|
|
leaderCh := s.raft.LeaderCh()
|
|
|
|
|
2020-01-22 13:21:33 +00:00
|
|
|
leaderStep := func(isLeader bool) {
|
2020-01-28 14:06:52 +00:00
|
|
|
if isLeader {
|
2020-01-22 13:21:33 +00:00
|
|
|
if weAreLeaderCh != nil {
|
|
|
|
s.logger.Error("attempted to start the leader loop while running")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
weAreLeaderCh = make(chan struct{})
|
|
|
|
leaderLoop.Add(1)
|
|
|
|
go func(ch chan struct{}) {
|
|
|
|
defer leaderLoop.Done()
|
|
|
|
s.leaderLoop(ch)
|
|
|
|
}(weAreLeaderCh)
|
|
|
|
s.logger.Info("cluster leadership acquired")
|
2020-01-28 14:06:52 +00:00
|
|
|
return
|
|
|
|
}
|
2020-01-22 13:21:33 +00:00
|
|
|
|
2020-01-28 14:06:52 +00:00
|
|
|
if weAreLeaderCh == nil {
|
|
|
|
s.logger.Error("attempted to stop the leader loop while not running")
|
|
|
|
return
|
2020-01-22 13:21:33 +00:00
|
|
|
}
|
2020-01-28 14:06:52 +00:00
|
|
|
|
|
|
|
s.logger.Debug("shutting down leader loop")
|
|
|
|
close(weAreLeaderCh)
|
|
|
|
leaderLoop.Wait()
|
|
|
|
weAreLeaderCh = nil
|
|
|
|
s.logger.Info("cluster leadership lost")
|
2020-01-22 13:21:33 +00:00
|
|
|
}
|
|
|
|
|
Handle Nomad leadership flapping
Fixes a deadlock in leadership handling if leadership flapped.
Raft propagates leadership transition to Nomad through a NotifyCh channel.
Raft blocks when writing to this channel, so channel must be buffered or
aggressively consumed[1]. Otherwise, Raft blocks indefinitely in `raft.runLeader`
until the channel is consumed[1] and does not move on to executing follower
related logic (in `raft.runFollower`).
While Raft `runLeader` defer function blocks, raft cannot process any other
raft operations. For example, `run{Leader|Follower}` methods consume
`raft.applyCh`, and while runLeader defer is blocked, all raft log applications
or config lookup will block indefinitely.
Sadly, `leaderLoop` and `establishLeader` makes few Raft calls!
`establishLeader` attempts to auto-create autopilot/scheduler config [3]; and
`leaderLoop` attempts to check raft configuration [4]. All of these calls occur
without a timeout.
Thus, if leadership flapped quickly while `leaderLoop/establishLeadership` is
invoked and hit any of these Raft calls, Raft handler _deadlock_ forever.
Depending on how many times it flapped and where exactly we get stuck, I suspect
it's possible to get in the following case:
* Agent metrics/stats http and RPC calls hang as they check raft.Configurations
* raft.State remains in Leader state, and server attempts to handle RPC calls
(e.g. node/alloc updates) and these hang as well
As we create goroutines per RPC call, the number of goroutines grow over time
and may trigger a out of memory errors in addition to missed updates.
[1] https://github.com/hashicorp/raft/blob/d90d6d6bdacf1b35d66940b07be515b074d89e88/config.go#L190-L193
[2] https://github.com/hashicorp/raft/blob/d90d6d6bdacf1b35d66940b07be515b074d89e88/raft.go#L425-L436
[3] https://github.com/hashicorp/nomad/blob/2a89e477465adbe6a88987f0dcb9fe80145d7b2f/nomad/leader.go#L198-L202
[4] https://github.com/hashicorp/nomad/blob/2a89e477465adbe6a88987f0dcb9fe80145d7b2f/nomad/leader.go#L877
2020-01-22 15:55:44 +00:00
|
|
|
wasLeader := false
|
2015-06-01 15:49:10 +00:00
|
|
|
for {
|
|
|
|
select {
|
2020-02-11 19:41:22 +00:00
|
|
|
case isLeader := <-leaderCh:
|
Handle Nomad leadership flapping
Fixes a deadlock in leadership handling if leadership flapped.
Raft propagates leadership transition to Nomad through a NotifyCh channel.
Raft blocks when writing to this channel, so channel must be buffered or
aggressively consumed[1]. Otherwise, Raft blocks indefinitely in `raft.runLeader`
until the channel is consumed[1] and does not move on to executing follower
related logic (in `raft.runFollower`).
While Raft `runLeader` defer function blocks, raft cannot process any other
raft operations. For example, `run{Leader|Follower}` methods consume
`raft.applyCh`, and while runLeader defer is blocked, all raft log applications
or config lookup will block indefinitely.
Sadly, `leaderLoop` and `establishLeader` makes few Raft calls!
`establishLeader` attempts to auto-create autopilot/scheduler config [3]; and
`leaderLoop` attempts to check raft configuration [4]. All of these calls occur
without a timeout.
Thus, if leadership flapped quickly while `leaderLoop/establishLeadership` is
invoked and hit any of these Raft calls, Raft handler _deadlock_ forever.
Depending on how many times it flapped and where exactly we get stuck, I suspect
it's possible to get in the following case:
* Agent metrics/stats http and RPC calls hang as they check raft.Configurations
* raft.State remains in Leader state, and server attempts to handle RPC calls
(e.g. node/alloc updates) and these hang as well
As we create goroutines per RPC call, the number of goroutines grow over time
and may trigger a out of memory errors in addition to missed updates.
[1] https://github.com/hashicorp/raft/blob/d90d6d6bdacf1b35d66940b07be515b074d89e88/config.go#L190-L193
[2] https://github.com/hashicorp/raft/blob/d90d6d6bdacf1b35d66940b07be515b074d89e88/raft.go#L425-L436
[3] https://github.com/hashicorp/nomad/blob/2a89e477465adbe6a88987f0dcb9fe80145d7b2f/nomad/leader.go#L198-L202
[4] https://github.com/hashicorp/nomad/blob/2a89e477465adbe6a88987f0dcb9fe80145d7b2f/nomad/leader.go#L877
2020-01-22 15:55:44 +00:00
|
|
|
if wasLeader != isLeader {
|
|
|
|
wasLeader = isLeader
|
|
|
|
// normal case where we went through a transition
|
|
|
|
leaderStep(isLeader)
|
|
|
|
} else if wasLeader && isLeader {
|
|
|
|
// Server lost but then gained leadership immediately.
|
|
|
|
// During this time, this server may have received
|
|
|
|
// Raft transitions that haven't been applied to the FSM
|
|
|
|
// yet.
|
|
|
|
// Ensure that that FSM caught up and eval queues are refreshed
|
2020-01-28 14:49:36 +00:00
|
|
|
s.logger.Warn("cluster leadership lost and gained leadership immediately. Could indicate network issues, memory paging, or high CPU load.")
|
Handle Nomad leadership flapping
Fixes a deadlock in leadership handling if leadership flapped.
Raft propagates leadership transition to Nomad through a NotifyCh channel.
Raft blocks when writing to this channel, so channel must be buffered or
aggressively consumed[1]. Otherwise, Raft blocks indefinitely in `raft.runLeader`
until the channel is consumed[1] and does not move on to executing follower
related logic (in `raft.runFollower`).
While Raft `runLeader` defer function blocks, raft cannot process any other
raft operations. For example, `run{Leader|Follower}` methods consume
`raft.applyCh`, and while runLeader defer is blocked, all raft log applications
or config lookup will block indefinitely.
Sadly, `leaderLoop` and `establishLeader` makes few Raft calls!
`establishLeader` attempts to auto-create autopilot/scheduler config [3]; and
`leaderLoop` attempts to check raft configuration [4]. All of these calls occur
without a timeout.
Thus, if leadership flapped quickly while `leaderLoop/establishLeadership` is
invoked and hit any of these Raft calls, Raft handler _deadlock_ forever.
Depending on how many times it flapped and where exactly we get stuck, I suspect
it's possible to get in the following case:
* Agent metrics/stats http and RPC calls hang as they check raft.Configurations
* raft.State remains in Leader state, and server attempts to handle RPC calls
(e.g. node/alloc updates) and these hang as well
As we create goroutines per RPC call, the number of goroutines grow over time
and may trigger a out of memory errors in addition to missed updates.
[1] https://github.com/hashicorp/raft/blob/d90d6d6bdacf1b35d66940b07be515b074d89e88/config.go#L190-L193
[2] https://github.com/hashicorp/raft/blob/d90d6d6bdacf1b35d66940b07be515b074d89e88/raft.go#L425-L436
[3] https://github.com/hashicorp/nomad/blob/2a89e477465adbe6a88987f0dcb9fe80145d7b2f/nomad/leader.go#L198-L202
[4] https://github.com/hashicorp/nomad/blob/2a89e477465adbe6a88987f0dcb9fe80145d7b2f/nomad/leader.go#L877
2020-01-22 15:55:44 +00:00
|
|
|
|
|
|
|
leaderStep(false)
|
|
|
|
leaderStep(true)
|
|
|
|
} else {
|
|
|
|
// Server gained but lost leadership immediately
|
|
|
|
// before it reacted; nothing to do, move on
|
2020-01-28 14:49:36 +00:00
|
|
|
s.logger.Warn("cluster leadership gained and lost leadership immediately. Could indicate network issues, memory paging, or high CPU load.")
|
Handle Nomad leadership flapping
Fixes a deadlock in leadership handling if leadership flapped.
Raft propagates leadership transition to Nomad through a NotifyCh channel.
Raft blocks when writing to this channel, so channel must be buffered or
aggressively consumed[1]. Otherwise, Raft blocks indefinitely in `raft.runLeader`
until the channel is consumed[1] and does not move on to executing follower
related logic (in `raft.runFollower`).
While Raft `runLeader` defer function blocks, raft cannot process any other
raft operations. For example, `run{Leader|Follower}` methods consume
`raft.applyCh`, and while runLeader defer is blocked, all raft log applications
or config lookup will block indefinitely.
Sadly, `leaderLoop` and `establishLeader` makes few Raft calls!
`establishLeader` attempts to auto-create autopilot/scheduler config [3]; and
`leaderLoop` attempts to check raft configuration [4]. All of these calls occur
without a timeout.
Thus, if leadership flapped quickly while `leaderLoop/establishLeadership` is
invoked and hit any of these Raft calls, Raft handler _deadlock_ forever.
Depending on how many times it flapped and where exactly we get stuck, I suspect
it's possible to get in the following case:
* Agent metrics/stats http and RPC calls hang as they check raft.Configurations
* raft.State remains in Leader state, and server attempts to handle RPC calls
(e.g. node/alloc updates) and these hang as well
As we create goroutines per RPC call, the number of goroutines grow over time
and may trigger a out of memory errors in addition to missed updates.
[1] https://github.com/hashicorp/raft/blob/d90d6d6bdacf1b35d66940b07be515b074d89e88/config.go#L190-L193
[2] https://github.com/hashicorp/raft/blob/d90d6d6bdacf1b35d66940b07be515b074d89e88/raft.go#L425-L436
[3] https://github.com/hashicorp/nomad/blob/2a89e477465adbe6a88987f0dcb9fe80145d7b2f/nomad/leader.go#L198-L202
[4] https://github.com/hashicorp/nomad/blob/2a89e477465adbe6a88987f0dcb9fe80145d7b2f/nomad/leader.go#L877
2020-01-22 15:55:44 +00:00
|
|
|
}
|
2015-06-01 15:49:10 +00:00
|
|
|
case <-s.shutdownCh:
|
2020-05-26 13:35:55 +00:00
|
|
|
if weAreLeaderCh != nil {
|
|
|
|
leaderStep(false)
|
|
|
|
}
|
2015-06-01 15:49:10 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-03-17 15:10:57 +00:00
|
|
|
func (s *Server) leadershipTransfer() error {
|
|
|
|
retryCount := 3
|
|
|
|
for i := 0; i < retryCount; i++ {
|
|
|
|
err := s.raft.LeadershipTransfer().Error()
|
|
|
|
if err == nil {
|
|
|
|
s.logger.Info("successfully transferred leadership")
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Don't retry if the Raft version doesn't support leadership transfer
|
|
|
|
// since this will never succeed.
|
|
|
|
if err == raft.ErrUnsupportedProtocol {
|
|
|
|
return fmt.Errorf("leadership transfer not supported with Raft version lower than 3")
|
|
|
|
}
|
|
|
|
|
|
|
|
s.logger.Error("failed to transfer leadership attempt, will retry",
|
|
|
|
"attempt", i,
|
|
|
|
"retry_limit", retryCount,
|
|
|
|
"error", err,
|
|
|
|
)
|
|
|
|
}
|
|
|
|
return fmt.Errorf("failed to transfer leadership in %d attempts", retryCount)
|
|
|
|
}
|
|
|
|
|
2015-06-01 15:49:10 +00:00
|
|
|
// leaderLoop runs as long as we are the leader to run various
|
2018-03-11 18:26:20 +00:00
|
|
|
// maintenance activities
|
2015-06-01 15:49:10 +00:00
|
|
|
func (s *Server) leaderLoop(stopCh chan struct{}) {
|
2015-06-05 21:54:45 +00:00
|
|
|
var reconcileCh chan serf.Member
|
2015-07-24 04:58:38 +00:00
|
|
|
establishedLeader := false
|
|
|
|
|
2015-06-05 21:54:45 +00:00
|
|
|
RECONCILE:
|
|
|
|
// Setup a reconciliation timer
|
|
|
|
reconcileCh = nil
|
|
|
|
interval := time.After(s.config.ReconcileInterval)
|
|
|
|
|
|
|
|
// Apply a raft barrier to ensure our FSM is caught up
|
|
|
|
start := time.Now()
|
2017-10-17 05:01:49 +00:00
|
|
|
barrier := s.raft.Barrier(barrierWriteTimeout)
|
2015-06-05 21:54:45 +00:00
|
|
|
if err := barrier.Error(); err != nil {
|
2018-09-15 23:23:13 +00:00
|
|
|
s.logger.Error("failed to wait for barrier", "error", err)
|
2015-06-05 21:54:45 +00:00
|
|
|
goto WAIT
|
|
|
|
}
|
|
|
|
metrics.MeasureSince([]string{"nomad", "leader", "barrier"}, start)
|
|
|
|
|
2015-07-24 04:58:38 +00:00
|
|
|
// Check if we need to handle initial leadership actions
|
|
|
|
if !establishedLeader {
|
2015-08-15 22:15:00 +00:00
|
|
|
if err := s.establishLeadership(stopCh); err != nil {
|
2018-09-15 23:23:13 +00:00
|
|
|
s.logger.Error("failed to establish leadership", "error", err)
|
2018-02-20 20:52:00 +00:00
|
|
|
|
|
|
|
// Immediately revoke leadership since we didn't successfully
|
|
|
|
// establish leadership.
|
|
|
|
if err := s.revokeLeadership(); err != nil {
|
2018-09-15 23:23:13 +00:00
|
|
|
s.logger.Error("failed to revoke leadership", "error", err)
|
2018-02-20 20:52:00 +00:00
|
|
|
}
|
|
|
|
|
2022-03-17 15:10:57 +00:00
|
|
|
// Attempt to transfer leadership. If successful, leave the
|
|
|
|
// leaderLoop since this node is no longer the leader. Otherwise
|
|
|
|
// try to establish leadership again after 5 seconds.
|
|
|
|
if err := s.leadershipTransfer(); err != nil {
|
|
|
|
s.logger.Error("failed to transfer leadership", "error", err)
|
|
|
|
interval = time.After(5 * time.Second)
|
|
|
|
goto WAIT
|
|
|
|
}
|
|
|
|
return
|
2015-07-24 04:58:38 +00:00
|
|
|
}
|
2018-02-20 20:52:00 +00:00
|
|
|
|
2015-07-24 04:58:38 +00:00
|
|
|
establishedLeader = true
|
2017-10-17 05:01:49 +00:00
|
|
|
defer func() {
|
|
|
|
if err := s.revokeLeadership(); err != nil {
|
2018-09-15 23:23:13 +00:00
|
|
|
s.logger.Error("failed to revoke leadership", "error", err)
|
2017-10-17 05:01:49 +00:00
|
|
|
}
|
|
|
|
}()
|
2015-07-24 04:58:38 +00:00
|
|
|
}
|
|
|
|
|
2015-06-05 21:54:45 +00:00
|
|
|
// Reconcile any missing data
|
|
|
|
if err := s.reconcile(); err != nil {
|
2018-09-15 23:23:13 +00:00
|
|
|
s.logger.Error("failed to reconcile", "error", err)
|
2015-06-05 21:54:45 +00:00
|
|
|
goto WAIT
|
|
|
|
}
|
|
|
|
|
|
|
|
// Initial reconcile worked, now we can process the channel
|
|
|
|
// updates
|
|
|
|
reconcileCh = s.reconcileCh
|
|
|
|
|
2017-10-17 05:01:49 +00:00
|
|
|
// Poll the stop channel to give it priority so we don't waste time
|
|
|
|
// trying to perform the other operations if we have been asked to shut
|
|
|
|
// down.
|
|
|
|
select {
|
|
|
|
case <-stopCh:
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
2015-06-05 21:54:45 +00:00
|
|
|
WAIT:
|
2022-03-17 15:10:57 +00:00
|
|
|
// Wait until leadership is lost or periodically reconcile as long as we
|
|
|
|
// are the leader, or when Serf events arrive.
|
2015-06-01 15:49:10 +00:00
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-stopCh:
|
2022-03-17 15:10:57 +00:00
|
|
|
// Lost leadership.
|
2015-06-01 15:49:10 +00:00
|
|
|
return
|
|
|
|
case <-s.shutdownCh:
|
|
|
|
return
|
2015-06-05 21:54:45 +00:00
|
|
|
case <-interval:
|
|
|
|
goto RECONCILE
|
|
|
|
case member := <-reconcileCh:
|
2015-06-04 11:38:41 +00:00
|
|
|
s.reconcileMember(member)
|
2020-06-04 22:11:17 +00:00
|
|
|
case errCh := <-s.reassertLeaderCh:
|
2020-06-09 16:00:09 +00:00
|
|
|
// Recompute leader state, by asserting leadership and
|
|
|
|
// repopulating leader states.
|
|
|
|
|
|
|
|
// Check first if we are indeed the leaders first. We
|
|
|
|
// can get into this state when the initial
|
|
|
|
// establishLeadership has failed.
|
|
|
|
// Afterwards we will be waiting for the interval to
|
|
|
|
// trigger a reconciliation and can potentially end up
|
|
|
|
// here. There is no point to reassert because this
|
|
|
|
// agent was never leader in the first place.
|
2020-06-04 22:11:17 +00:00
|
|
|
if !establishedLeader {
|
|
|
|
errCh <- fmt.Errorf("leadership has not been established")
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// refresh leadership state
|
|
|
|
s.revokeLeadership()
|
|
|
|
err := s.establishLeadership(stopCh)
|
|
|
|
errCh <- err
|
2022-03-17 15:10:57 +00:00
|
|
|
|
|
|
|
// In case establishLeadership fails, try to transfer leadership.
|
|
|
|
// At this point Raft thinks we are the leader, but Nomad did not
|
|
|
|
// complete the required steps to act as the leader.
|
|
|
|
if err != nil {
|
|
|
|
if err := s.leadershipTransfer(); err != nil {
|
|
|
|
// establishedLeader was true before, but it no longer is
|
|
|
|
// since we revoked leadership and leadershipTransfer also
|
|
|
|
// failed.
|
|
|
|
// Stay in the leaderLoop with establishedLeader set to
|
|
|
|
// false so we try to establish leadership again in the
|
|
|
|
// next loop.
|
|
|
|
establishedLeader = false
|
|
|
|
interval = time.After(5 * time.Second)
|
|
|
|
goto WAIT
|
|
|
|
}
|
|
|
|
|
|
|
|
// leadershipTransfer was successful and it is
|
|
|
|
// time to leave the leaderLoop.
|
|
|
|
return
|
|
|
|
}
|
2015-06-04 11:38:41 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-07-24 04:58:38 +00:00
|
|
|
// establishLeadership is invoked once we become leader and are able
|
|
|
|
// to invoke an initial barrier. The barrier is used to ensure any
|
2016-05-15 16:41:34 +00:00
|
|
|
// previously inflight transactions have been committed and that our
|
2015-07-24 04:58:38 +00:00
|
|
|
// state is up-to-date.
|
2015-08-15 22:15:00 +00:00
|
|
|
func (s *Server) establishLeadership(stopCh chan struct{}) error {
|
2018-02-20 18:23:11 +00:00
|
|
|
defer metrics.MeasureSince([]string{"nomad", "leader", "establish_leadership"}, time.Now())
|
|
|
|
|
2017-10-23 19:50:37 +00:00
|
|
|
// Generate a leader ACL token. This will allow the leader to issue work
|
|
|
|
// that requires a valid ACL token.
|
|
|
|
s.setLeaderAcl(uuid.Generate())
|
|
|
|
|
2016-02-17 21:50:06 +00:00
|
|
|
// Disable workers to free half the cores for use in the plan queue and
|
|
|
|
// evaluation broker
|
2022-01-06 16:56:13 +00:00
|
|
|
s.handlePausableWorkers(true)
|
2015-08-23 20:59:13 +00:00
|
|
|
|
2017-12-18 21:16:23 +00:00
|
|
|
// Initialize and start the autopilot routine
|
|
|
|
s.getOrCreateAutopilotConfig()
|
2022-09-01 18:27:10 +00:00
|
|
|
s.autopilot.Start(s.shutdownCtx)
|
2017-12-18 21:16:23 +00:00
|
|
|
|
2022-07-06 14:13:48 +00:00
|
|
|
// Initialize scheduler configuration.
|
|
|
|
schedulerConfig := s.getOrCreateSchedulerConfig()
|
2018-09-28 04:27:38 +00:00
|
|
|
|
2019-11-14 13:18:29 +00:00
|
|
|
// Initialize the ClusterID
|
|
|
|
_, _ = s.ClusterID()
|
|
|
|
// todo: use cluster ID for stuff, later!
|
|
|
|
|
2015-07-27 22:11:42 +00:00
|
|
|
// Enable the plan queue, since we are now the leader
|
|
|
|
s.planQueue.SetEnabled(true)
|
|
|
|
|
2015-07-27 22:31:09 +00:00
|
|
|
// Start the plan evaluator
|
|
|
|
go s.planApply()
|
2015-07-27 22:11:42 +00:00
|
|
|
|
2022-07-06 14:13:48 +00:00
|
|
|
// Start the eval broker and blocked eval broker if these are not paused by
|
|
|
|
// the operator.
|
|
|
|
restoreEvals := s.handleEvalBrokerStateChange(schedulerConfig)
|
2016-01-29 23:31:32 +00:00
|
|
|
|
2017-06-28 22:35:52 +00:00
|
|
|
// Enable the deployment watcher, since we are now the leader
|
2018-02-27 00:28:10 +00:00
|
|
|
s.deploymentWatcher.SetEnabled(true, s.State())
|
|
|
|
|
|
|
|
// Enable the NodeDrainer
|
|
|
|
s.nodeDrainer.SetEnabled(true, s.State())
|
2017-06-28 22:35:52 +00:00
|
|
|
|
2020-04-30 13:13:00 +00:00
|
|
|
// Enable the volume watcher, since we are now the leader
|
2022-01-24 16:49:50 +00:00
|
|
|
s.volumeWatcher.SetEnabled(true, s.State(), s.getLeaderAcl())
|
2020-04-30 13:13:00 +00:00
|
|
|
|
2022-07-06 14:13:48 +00:00
|
|
|
// Restore the eval broker state and blocked eval state. If these are
|
|
|
|
// currently paused, we do not need to do this.
|
|
|
|
if restoreEvals {
|
|
|
|
if err := s.restoreEvals(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2015-08-05 23:53:54 +00:00
|
|
|
}
|
2015-08-15 22:15:00 +00:00
|
|
|
|
2016-08-22 20:57:27 +00:00
|
|
|
// Activate the vault client
|
|
|
|
s.vault.SetActive(true)
|
|
|
|
|
2015-12-04 23:10:08 +00:00
|
|
|
// Enable the periodic dispatcher, since we are now the leader.
|
2015-12-18 20:26:28 +00:00
|
|
|
s.periodicDispatcher.SetEnabled(true)
|
|
|
|
|
2020-06-01 01:19:33 +00:00
|
|
|
// Activate RPC now that local FSM caught up with Raft (as evident by Barrier call success)
|
|
|
|
// and all leader related components (e.g. broker queue) are enabled.
|
|
|
|
// Auxiliary processes (e.g. background, bookkeeping, and cleanup tasks can start after)
|
2020-05-21 12:27:17 +00:00
|
|
|
s.setConsistentReadReady()
|
|
|
|
|
|
|
|
// Further clean ups and follow up that don't block RPC consistency
|
|
|
|
|
2022-10-21 16:33:16 +00:00
|
|
|
// Create the first root key if it doesn't already exist
|
|
|
|
go s.initializeKeyring(stopCh)
|
|
|
|
|
2015-12-18 20:26:28 +00:00
|
|
|
// Restore the periodic dispatcher state
|
|
|
|
if err := s.restorePeriodicDispatcher(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2022-07-19 13:37:46 +00:00
|
|
|
// Schedule periodic jobs which include expired local ACL token garbage
|
|
|
|
// collection.
|
2015-08-15 22:15:00 +00:00
|
|
|
go s.schedulePeriodic(stopCh)
|
2015-08-16 18:10:18 +00:00
|
|
|
|
|
|
|
// Reap any failed evaluations
|
|
|
|
go s.reapFailedEvaluations(stopCh)
|
2015-08-23 00:17:13 +00:00
|
|
|
|
2016-01-31 00:16:13 +00:00
|
|
|
// Reap any duplicate blocked evaluations
|
|
|
|
go s.reapDupBlockedEvaluations(stopCh)
|
|
|
|
|
2022-11-16 21:10:11 +00:00
|
|
|
// Reap any cancelable evaluations
|
|
|
|
s.reapCancelableEvalsCh = s.reapCancelableEvaluations(stopCh)
|
|
|
|
|
2016-05-23 23:27:26 +00:00
|
|
|
// Periodically unblock failed allocations
|
|
|
|
go s.periodicUnblockFailedEvals(stopCh)
|
|
|
|
|
2017-10-30 19:19:11 +00:00
|
|
|
// Periodically publish job summary metrics
|
|
|
|
go s.publishJobSummaryMetrics(stopCh)
|
|
|
|
|
2019-07-24 13:17:33 +00:00
|
|
|
// Periodically publish job status metrics
|
|
|
|
go s.publishJobStatusMetrics(stopCh)
|
|
|
|
|
2015-08-23 00:17:13 +00:00
|
|
|
// Setup the heartbeat timers. This is done both when starting up or when
|
|
|
|
// a leader fail over happens. Since the timers are maintained by the leader
|
|
|
|
// node, effectively this means all the timers are renewed at the time of failover.
|
|
|
|
// The TTL contract is that the session will not be expired before the TTL,
|
|
|
|
// so expiring it later is allowable.
|
|
|
|
//
|
|
|
|
// This MUST be done after the initial barrier to ensure the latest Nodes
|
|
|
|
// are available to be initialized. Otherwise initialization may use stale
|
|
|
|
// data.
|
|
|
|
if err := s.initializeHeartbeatTimers(); err != nil {
|
2018-09-15 23:23:13 +00:00
|
|
|
s.logger.Error("heartbeat timer setup failed", "error", err)
|
2015-08-23 00:17:13 +00:00
|
|
|
return err
|
|
|
|
}
|
2016-08-05 21:40:35 +00:00
|
|
|
|
2022-07-19 13:37:46 +00:00
|
|
|
// If ACLs are enabled, the leader needs to start a number of long-lived
|
|
|
|
// routines. Exactly which routines, depends on whether this leader is
|
|
|
|
// running within the authoritative region or not.
|
|
|
|
if s.config.ACLEnabled {
|
|
|
|
|
|
|
|
// The authoritative region is responsible for garbage collecting
|
|
|
|
// expired global tokens. Otherwise, non-authoritative regions need to
|
|
|
|
// replicate policies, tokens, and namespaces.
|
|
|
|
switch s.config.AuthoritativeRegion {
|
|
|
|
case s.config.Region:
|
|
|
|
go s.schedulePeriodicAuthoritative(stopCh)
|
|
|
|
default:
|
|
|
|
go s.replicateACLPolicies(stopCh)
|
|
|
|
go s.replicateACLTokens(stopCh)
|
2022-08-22 06:54:07 +00:00
|
|
|
go s.replicateACLRoles(stopCh)
|
2022-11-28 08:20:24 +00:00
|
|
|
go s.replicateACLAuthMethods(stopCh)
|
2022-12-16 08:08:00 +00:00
|
|
|
go s.replicateACLBindingRules(stopCh)
|
2022-07-19 13:37:46 +00:00
|
|
|
go s.replicateNamespaces(stopCh)
|
|
|
|
}
|
2017-08-13 23:16:59 +00:00
|
|
|
}
|
2017-09-07 23:56:15 +00:00
|
|
|
|
|
|
|
// Setup any enterprise systems required.
|
2017-09-13 18:38:29 +00:00
|
|
|
if err := s.establishEnterpriseLeadership(stopCh); err != nil {
|
2017-09-07 23:56:15 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-05-21 12:27:17 +00:00
|
|
|
// Cleanup orphaned Vault token accessors
|
|
|
|
if err := s.revokeVaultAccessorsOnRestore(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Cleanup orphaned Service Identity token accessors
|
|
|
|
if err := s.revokeSITokenAccessorsOnRestore(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-07-02 07:58:02 +00:00
|
|
|
|
2015-08-05 23:53:54 +00:00
|
|
|
return nil
|
|
|
|
}
|
2015-07-27 22:11:42 +00:00
|
|
|
|
2020-10-21 04:16:25 +00:00
|
|
|
// replicateNamespaces is used to replicate namespaces from the authoritative
|
|
|
|
// region to this region.
|
|
|
|
func (s *Server) replicateNamespaces(stopCh chan struct{}) {
|
|
|
|
req := structs.NamespaceListRequest{
|
|
|
|
QueryOptions: structs.QueryOptions{
|
|
|
|
Region: s.config.AuthoritativeRegion,
|
|
|
|
AllowStale: true,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
limiter := rate.NewLimiter(replicationRateLimit, int(replicationRateLimit))
|
|
|
|
s.logger.Debug("starting namespace replication from authoritative region", "region", req.Region)
|
|
|
|
|
|
|
|
START:
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-stopCh:
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
|
|
|
// Rate limit how often we attempt replication
|
|
|
|
limiter.Wait(context.Background())
|
|
|
|
|
|
|
|
// Fetch the list of namespaces
|
|
|
|
var resp structs.NamespaceListResponse
|
|
|
|
req.AuthToken = s.ReplicationToken()
|
|
|
|
err := s.forwardRegion(s.config.AuthoritativeRegion, "Namespace.ListNamespaces", &req, &resp)
|
|
|
|
if err != nil {
|
|
|
|
s.logger.Error("failed to fetch namespaces from authoritative region", "error", err)
|
|
|
|
goto ERR_WAIT
|
|
|
|
}
|
|
|
|
|
|
|
|
// Perform a two-way diff
|
|
|
|
delete, update := diffNamespaces(s.State(), req.MinQueryIndex, resp.Namespaces)
|
|
|
|
|
|
|
|
// Delete namespaces that should not exist
|
|
|
|
if len(delete) > 0 {
|
|
|
|
args := &structs.NamespaceDeleteRequest{
|
|
|
|
Namespaces: delete,
|
|
|
|
}
|
|
|
|
_, _, err := s.raftApply(structs.NamespaceDeleteRequestType, args)
|
|
|
|
if err != nil {
|
|
|
|
s.logger.Error("failed to delete namespaces", "error", err)
|
|
|
|
goto ERR_WAIT
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fetch any outdated namespaces
|
|
|
|
var fetched []*structs.Namespace
|
|
|
|
if len(update) > 0 {
|
|
|
|
req := structs.NamespaceSetRequest{
|
|
|
|
Namespaces: update,
|
|
|
|
QueryOptions: structs.QueryOptions{
|
|
|
|
Region: s.config.AuthoritativeRegion,
|
|
|
|
AuthToken: s.ReplicationToken(),
|
|
|
|
AllowStale: true,
|
|
|
|
MinQueryIndex: resp.Index - 1,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
var reply structs.NamespaceSetResponse
|
|
|
|
if err := s.forwardRegion(s.config.AuthoritativeRegion, "Namespace.GetNamespaces", &req, &reply); err != nil {
|
|
|
|
s.logger.Error("failed to fetch namespaces from authoritative region", "error", err)
|
|
|
|
goto ERR_WAIT
|
|
|
|
}
|
|
|
|
for _, namespace := range reply.Namespaces {
|
|
|
|
fetched = append(fetched, namespace)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update local namespaces
|
|
|
|
if len(fetched) > 0 {
|
|
|
|
args := &structs.NamespaceUpsertRequest{
|
|
|
|
Namespaces: fetched,
|
|
|
|
}
|
|
|
|
_, _, err := s.raftApply(structs.NamespaceUpsertRequestType, args)
|
|
|
|
if err != nil {
|
|
|
|
s.logger.Error("failed to update namespaces", "error", err)
|
|
|
|
goto ERR_WAIT
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update the minimum query index, blocks until there is a change.
|
|
|
|
req.MinQueryIndex = resp.Index
|
|
|
|
}
|
|
|
|
|
|
|
|
ERR_WAIT:
|
|
|
|
select {
|
|
|
|
case <-time.After(s.config.ReplicationBackoff):
|
|
|
|
goto START
|
|
|
|
case <-stopCh:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-01-06 16:56:13 +00:00
|
|
|
func (s *Server) handlePausableWorkers(isLeader bool) {
|
|
|
|
for _, w := range s.pausableWorkers() {
|
|
|
|
if isLeader {
|
|
|
|
w.Pause()
|
|
|
|
} else {
|
|
|
|
w.Resume()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-21 04:16:25 +00:00
|
|
|
// diffNamespaces is used to perform a two-way diff between the local namespaces
|
|
|
|
// and the remote namespaces to determine which namespaces need to be deleted or
|
|
|
|
// updated.
|
|
|
|
func diffNamespaces(state *state.StateStore, minIndex uint64, remoteList []*structs.Namespace) (delete []string, update []string) {
|
|
|
|
// Construct a set of the local and remote namespaces
|
|
|
|
local := make(map[string][]byte)
|
|
|
|
remote := make(map[string]struct{})
|
|
|
|
|
|
|
|
// Add all the local namespaces
|
|
|
|
iter, err := state.Namespaces(nil)
|
|
|
|
if err != nil {
|
|
|
|
panic("failed to iterate local namespaces")
|
|
|
|
}
|
|
|
|
for {
|
|
|
|
raw := iter.Next()
|
|
|
|
if raw == nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
namespace := raw.(*structs.Namespace)
|
|
|
|
local[namespace.Name] = namespace.Hash
|
|
|
|
}
|
|
|
|
|
|
|
|
// Iterate over the remote namespaces
|
|
|
|
for _, rns := range remoteList {
|
|
|
|
remote[rns.Name] = struct{}{}
|
|
|
|
|
|
|
|
// Check if the namespace is missing locally
|
|
|
|
if localHash, ok := local[rns.Name]; !ok {
|
|
|
|
update = append(update, rns.Name)
|
|
|
|
|
|
|
|
// Check if the namespace is newer remotely and there is a hash
|
|
|
|
// mis-match.
|
|
|
|
} else if rns.ModifyIndex > minIndex && !bytes.Equal(localHash, rns.Hash) {
|
|
|
|
update = append(update, rns.Name)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if namespaces should be deleted
|
|
|
|
for lns := range local {
|
|
|
|
if _, ok := remote[lns]; !ok {
|
|
|
|
delete = append(delete, lns)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2016-01-29 23:31:32 +00:00
|
|
|
// restoreEvals is used to restore pending evaluations into the eval broker and
|
|
|
|
// blocked evaluations into the blocked eval tracker. The broker and blocked
|
|
|
|
// eval tracker is maintained only by the leader, so it must be restored anytime
|
|
|
|
// a leadership transition takes place.
|
|
|
|
func (s *Server) restoreEvals() error {
|
2015-08-05 23:53:54 +00:00
|
|
|
// Get an iterator over every evaluation
|
2017-02-08 04:31:23 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
2022-02-10 17:50:34 +00:00
|
|
|
iter, err := s.fsm.State().Evals(ws, false)
|
2015-08-05 23:53:54 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to get evaluations: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
for {
|
|
|
|
raw := iter.Next()
|
|
|
|
if raw == nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
eval := raw.(*structs.Evaluation)
|
|
|
|
|
2016-01-29 23:31:32 +00:00
|
|
|
if eval.ShouldEnqueue() {
|
2016-05-18 18:35:15 +00:00
|
|
|
s.evalBroker.Enqueue(eval)
|
2016-01-29 23:31:32 +00:00
|
|
|
} else if eval.ShouldBlock() {
|
|
|
|
s.blockedEvals.Block(eval)
|
2015-08-05 23:53:54 +00:00
|
|
|
}
|
|
|
|
}
|
2015-07-24 04:58:38 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-12-06 20:46:46 +00:00
|
|
|
// revokeVaultAccessorsOnRestore is used to restore Vault accessors that should be
|
2016-08-22 20:57:27 +00:00
|
|
|
// revoked.
|
2019-12-06 20:46:46 +00:00
|
|
|
func (s *Server) revokeVaultAccessorsOnRestore() error {
|
2016-08-22 20:57:27 +00:00
|
|
|
// An accessor should be revoked if its allocation or node is terminal
|
2017-02-08 04:31:23 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
2016-08-22 20:57:27 +00:00
|
|
|
state := s.fsm.State()
|
2017-02-08 04:31:23 +00:00
|
|
|
iter, err := state.VaultAccessors(ws)
|
2016-08-22 20:57:27 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to get vault accessors: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
var revoke []*structs.VaultAccessor
|
|
|
|
for {
|
|
|
|
raw := iter.Next()
|
|
|
|
if raw == nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
va := raw.(*structs.VaultAccessor)
|
|
|
|
|
|
|
|
// Check the allocation
|
2017-02-08 04:31:23 +00:00
|
|
|
alloc, err := state.AllocByID(ws, va.AllocID)
|
2016-08-22 20:57:27 +00:00
|
|
|
if err != nil {
|
2017-02-28 00:00:19 +00:00
|
|
|
return fmt.Errorf("failed to lookup allocation %q: %v", va.AllocID, err)
|
2016-08-22 20:57:27 +00:00
|
|
|
}
|
|
|
|
if alloc == nil || alloc.Terminated() {
|
|
|
|
// No longer running and should be revoked
|
|
|
|
revoke = append(revoke, va)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check the node
|
2017-02-08 04:31:23 +00:00
|
|
|
node, err := state.NodeByID(ws, va.NodeID)
|
2016-08-22 20:57:27 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to lookup node %q: %v", va.NodeID, err)
|
|
|
|
}
|
|
|
|
if node == nil || node.TerminalStatus() {
|
|
|
|
// Node is terminal so any accessor from it should be revoked
|
|
|
|
revoke = append(revoke, va)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(revoke) != 0 {
|
2020-06-01 01:04:39 +00:00
|
|
|
s.logger.Info("revoking vault accessors after becoming leader", "accessors", len(revoke))
|
2020-05-21 11:38:27 +00:00
|
|
|
|
|
|
|
if err := s.vault.MarkForRevocation(revoke); err != nil {
|
2016-08-22 20:57:27 +00:00
|
|
|
return fmt.Errorf("failed to revoke tokens: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-12-06 20:46:46 +00:00
|
|
|
// revokeSITokenAccessorsOnRestore is used to revoke Service Identity token
|
|
|
|
// accessors on behalf of allocs that are now gone / terminal.
|
|
|
|
func (s *Server) revokeSITokenAccessorsOnRestore() error {
|
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
fsmState := s.fsm.State()
|
|
|
|
iter, err := fsmState.SITokenAccessors(ws)
|
|
|
|
if err != nil {
|
2022-04-02 00:24:02 +00:00
|
|
|
return fmt.Errorf("failed to get SI token accessors: %w", err)
|
2019-12-06 20:46:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
var toRevoke []*structs.SITokenAccessor
|
|
|
|
for raw := iter.Next(); raw != nil; raw = iter.Next() {
|
|
|
|
accessor := raw.(*structs.SITokenAccessor)
|
|
|
|
|
|
|
|
// Check the allocation
|
|
|
|
alloc, err := fsmState.AllocByID(ws, accessor.AllocID)
|
|
|
|
if err != nil {
|
2022-04-02 00:24:02 +00:00
|
|
|
return fmt.Errorf("failed to lookup alloc %q: %w", accessor.AllocID, err)
|
2019-12-06 20:46:46 +00:00
|
|
|
}
|
|
|
|
if alloc == nil || alloc.Terminated() {
|
|
|
|
// no longer running and associated accessors should be revoked
|
|
|
|
toRevoke = append(toRevoke, accessor)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check the node
|
|
|
|
node, err := fsmState.NodeByID(ws, accessor.NodeID)
|
|
|
|
if err != nil {
|
2022-04-02 00:24:02 +00:00
|
|
|
return fmt.Errorf("failed to lookup node %q: %w", accessor.NodeID, err)
|
2019-12-06 20:46:46 +00:00
|
|
|
}
|
|
|
|
if node == nil || node.TerminalStatus() {
|
|
|
|
// node is terminal and associated accessors should be revoked
|
|
|
|
toRevoke = append(toRevoke, accessor)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(toRevoke) > 0 {
|
2020-06-01 01:04:39 +00:00
|
|
|
s.logger.Info("revoking consul accessors after becoming leader", "accessors", len(toRevoke))
|
2020-05-21 12:18:12 +00:00
|
|
|
s.consulACLs.MarkForRevocation(toRevoke)
|
2019-12-06 20:46:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-12-04 23:10:08 +00:00
|
|
|
// restorePeriodicDispatcher is used to restore all periodic jobs into the
|
|
|
|
// periodic dispatcher. It also determines if a periodic job should have been
|
|
|
|
// created during the leadership transition and force runs them. The periodic
|
|
|
|
// dispatcher is maintained only by the leader, so it must be restored anytime a
|
|
|
|
// leadership transition takes place.
|
2015-12-18 20:26:28 +00:00
|
|
|
func (s *Server) restorePeriodicDispatcher() error {
|
2018-09-15 23:23:13 +00:00
|
|
|
logger := s.logger.Named("periodic")
|
2017-02-08 04:31:23 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
iter, err := s.fsm.State().JobsByPeriodic(ws, true)
|
2015-12-04 23:10:08 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to get periodic jobs: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
now := time.Now()
|
|
|
|
for i := iter.Next(); i != nil; i = iter.Next() {
|
|
|
|
job := i.(*structs.Job)
|
2017-08-03 19:37:58 +00:00
|
|
|
|
|
|
|
// We skip adding parameterized jobs because they themselves aren't
|
|
|
|
// tracked, only the dispatched children are.
|
|
|
|
if job.IsParameterized() {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2017-12-11 21:55:17 +00:00
|
|
|
if err := s.periodicDispatcher.Add(job); err != nil {
|
2018-09-15 23:23:13 +00:00
|
|
|
logger.Error("failed to add job to periodic dispatcher", "error", err)
|
2018-04-26 22:51:47 +00:00
|
|
|
continue
|
2017-09-12 21:25:40 +00:00
|
|
|
}
|
|
|
|
|
2017-12-11 21:55:17 +00:00
|
|
|
// We do not need to force run the job since it isn't active.
|
|
|
|
if !job.IsPeriodicActive() {
|
2017-09-12 21:25:40 +00:00
|
|
|
continue
|
|
|
|
}
|
2015-12-04 23:10:08 +00:00
|
|
|
|
2015-12-16 22:14:55 +00:00
|
|
|
// If the periodic job has never been launched before, launch will hold
|
|
|
|
// the time the periodic job was added. Otherwise it has the last launch
|
|
|
|
// time of the periodic job.
|
2017-09-07 23:56:15 +00:00
|
|
|
launch, err := s.fsm.State().PeriodicLaunchByID(ws, job.Namespace, job.ID)
|
2017-12-11 21:55:17 +00:00
|
|
|
if err != nil {
|
2015-12-19 01:51:30 +00:00
|
|
|
return fmt.Errorf("failed to get periodic launch time: %v", err)
|
2015-12-04 23:10:08 +00:00
|
|
|
}
|
2017-12-11 21:55:17 +00:00
|
|
|
if launch == nil {
|
|
|
|
return fmt.Errorf("no recorded periodic launch time for job %q in namespace %q",
|
|
|
|
job.ID, job.Namespace)
|
|
|
|
}
|
2015-12-04 23:10:08 +00:00
|
|
|
|
2015-12-16 22:14:55 +00:00
|
|
|
// nextLaunch is the next launch that should occur.
|
2018-04-26 20:57:45 +00:00
|
|
|
nextLaunch, err := job.Periodic.Next(launch.Launch.In(job.Periodic.GetLocation()))
|
|
|
|
if err != nil {
|
2018-09-15 23:23:13 +00:00
|
|
|
logger.Error("failed to determine next periodic launch for job", "job", job.NamespacedID(), "error", err)
|
2018-04-26 22:15:43 +00:00
|
|
|
continue
|
2018-04-26 20:57:45 +00:00
|
|
|
}
|
2015-12-16 22:14:55 +00:00
|
|
|
|
|
|
|
// We skip force launching the job if there should be no next launch
|
|
|
|
// (the zero case) or if the next launch time is in the future. If it is
|
|
|
|
// in the future, it will be handled by the periodic dispatcher.
|
|
|
|
if nextLaunch.IsZero() || !nextLaunch.Before(now) {
|
2015-12-04 23:10:08 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2017-09-07 23:56:15 +00:00
|
|
|
if _, err := s.periodicDispatcher.ForceRun(job.Namespace, job.ID); err != nil {
|
2018-09-15 23:23:13 +00:00
|
|
|
logger.Error("force run of periodic job failed", "job", job.NamespacedID(), "error", err)
|
|
|
|
return fmt.Errorf("force run of periodic job %q failed: %v", job.NamespacedID(), err)
|
2015-12-04 23:10:08 +00:00
|
|
|
}
|
2018-09-15 23:23:13 +00:00
|
|
|
logger.Debug("periodic job force runned during leadership establishment", "job", job.NamespacedID())
|
2015-12-04 23:10:08 +00:00
|
|
|
}
|
|
|
|
|
2015-12-18 20:26:28 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-08-15 22:15:00 +00:00
|
|
|
// schedulePeriodic is used to do periodic job dispatch while we are leader
|
|
|
|
func (s *Server) schedulePeriodic(stopCh chan struct{}) {
|
|
|
|
evalGC := time.NewTicker(s.config.EvalGCInterval)
|
|
|
|
defer evalGC.Stop()
|
2015-09-07 18:01:29 +00:00
|
|
|
nodeGC := time.NewTicker(s.config.NodeGCInterval)
|
|
|
|
defer nodeGC.Stop()
|
2015-12-15 03:20:57 +00:00
|
|
|
jobGC := time.NewTicker(s.config.JobGCInterval)
|
|
|
|
defer jobGC.Stop()
|
2017-09-25 18:03:20 +00:00
|
|
|
deploymentGC := time.NewTicker(s.config.DeploymentGCInterval)
|
|
|
|
defer deploymentGC.Stop()
|
2020-05-06 20:49:12 +00:00
|
|
|
csiPluginGC := time.NewTicker(s.config.CSIPluginGCInterval)
|
|
|
|
defer csiPluginGC.Stop()
|
2020-05-11 12:20:50 +00:00
|
|
|
csiVolumeClaimGC := time.NewTicker(s.config.CSIVolumeClaimGCInterval)
|
|
|
|
defer csiVolumeClaimGC.Stop()
|
2021-02-26 15:20:33 +00:00
|
|
|
oneTimeTokenGC := time.NewTicker(s.config.OneTimeTokenGCInterval)
|
|
|
|
defer oneTimeTokenGC.Stop()
|
2022-06-02 19:48:17 +00:00
|
|
|
rootKeyGC := time.NewTicker(s.config.RootKeyGCInterval)
|
|
|
|
defer rootKeyGC.Stop()
|
2022-08-26 18:03:56 +00:00
|
|
|
variablesRekey := time.NewTicker(s.config.VariablesRekeyInterval)
|
|
|
|
defer variablesRekey.Stop()
|
2015-08-15 22:15:00 +00:00
|
|
|
|
2022-07-19 13:37:46 +00:00
|
|
|
// Set up the expired ACL local token garbage collection timer.
|
|
|
|
localTokenExpiredGC, localTokenExpiredGCStop := helper.NewSafeTimer(s.config.ACLTokenExpirationGCInterval)
|
|
|
|
defer localTokenExpiredGCStop()
|
2016-06-22 16:11:25 +00:00
|
|
|
|
|
|
|
for {
|
|
|
|
|
2015-08-15 22:15:00 +00:00
|
|
|
select {
|
|
|
|
case <-evalGC.C:
|
2022-07-19 13:37:46 +00:00
|
|
|
if index, ok := s.getLatestIndex(); ok {
|
2016-06-22 16:11:25 +00:00
|
|
|
s.evalBroker.Enqueue(s.coreJobEval(structs.CoreJobEvalGC, index))
|
|
|
|
}
|
2015-09-07 18:01:29 +00:00
|
|
|
case <-nodeGC.C:
|
2022-07-19 13:37:46 +00:00
|
|
|
if index, ok := s.getLatestIndex(); ok {
|
2016-06-22 16:11:25 +00:00
|
|
|
s.evalBroker.Enqueue(s.coreJobEval(structs.CoreJobNodeGC, index))
|
|
|
|
}
|
2015-12-15 03:20:57 +00:00
|
|
|
case <-jobGC.C:
|
2022-07-19 13:37:46 +00:00
|
|
|
if index, ok := s.getLatestIndex(); ok {
|
2016-06-22 16:11:25 +00:00
|
|
|
s.evalBroker.Enqueue(s.coreJobEval(structs.CoreJobJobGC, index))
|
|
|
|
}
|
2017-09-25 18:03:20 +00:00
|
|
|
case <-deploymentGC.C:
|
2022-07-19 13:37:46 +00:00
|
|
|
if index, ok := s.getLatestIndex(); ok {
|
2017-09-25 18:03:20 +00:00
|
|
|
s.evalBroker.Enqueue(s.coreJobEval(structs.CoreJobDeploymentGC, index))
|
|
|
|
}
|
2020-05-06 20:49:12 +00:00
|
|
|
case <-csiPluginGC.C:
|
2022-07-19 13:37:46 +00:00
|
|
|
if index, ok := s.getLatestIndex(); ok {
|
2020-05-06 20:49:12 +00:00
|
|
|
s.evalBroker.Enqueue(s.coreJobEval(structs.CoreJobCSIPluginGC, index))
|
|
|
|
}
|
2020-05-11 12:20:50 +00:00
|
|
|
case <-csiVolumeClaimGC.C:
|
2022-07-19 13:37:46 +00:00
|
|
|
if index, ok := s.getLatestIndex(); ok {
|
2020-05-11 12:20:50 +00:00
|
|
|
s.evalBroker.Enqueue(s.coreJobEval(structs.CoreJobCSIVolumeClaimGC, index))
|
|
|
|
}
|
2021-02-26 15:20:33 +00:00
|
|
|
case <-oneTimeTokenGC.C:
|
2022-10-17 20:23:51 +00:00
|
|
|
if !ServersMeetMinimumVersion(s.Members(), s.Region(), minOneTimeAuthenticationTokenVersion, false) {
|
2021-07-27 17:17:55 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2022-07-19 13:37:46 +00:00
|
|
|
if index, ok := s.getLatestIndex(); ok {
|
2021-02-26 15:20:33 +00:00
|
|
|
s.evalBroker.Enqueue(s.coreJobEval(structs.CoreJobOneTimeTokenGC, index))
|
|
|
|
}
|
2022-07-19 13:37:46 +00:00
|
|
|
case <-localTokenExpiredGC.C:
|
|
|
|
if index, ok := s.getLatestIndex(); ok {
|
|
|
|
s.evalBroker.Enqueue(s.coreJobEval(structs.CoreJobLocalTokenExpiredGC, index))
|
|
|
|
}
|
|
|
|
localTokenExpiredGC.Reset(s.config.ACLTokenExpirationGCInterval)
|
2022-06-02 19:48:17 +00:00
|
|
|
case <-rootKeyGC.C:
|
2022-08-02 07:30:03 +00:00
|
|
|
if index, ok := s.getLatestIndex(); ok {
|
2022-06-20 20:26:05 +00:00
|
|
|
s.evalBroker.Enqueue(s.coreJobEval(structs.CoreJobRootKeyRotateOrGC, index))
|
2022-06-02 19:48:17 +00:00
|
|
|
}
|
2022-08-26 18:03:56 +00:00
|
|
|
case <-variablesRekey.C:
|
2022-08-02 07:30:03 +00:00
|
|
|
if index, ok := s.getLatestIndex(); ok {
|
2022-08-26 18:03:56 +00:00
|
|
|
s.evalBroker.Enqueue(s.coreJobEval(structs.CoreJobVariablesRekey, index))
|
2022-07-07 17:48:38 +00:00
|
|
|
}
|
2015-08-15 22:15:00 +00:00
|
|
|
case <-stopCh:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-07-19 13:37:46 +00:00
|
|
|
// schedulePeriodicAuthoritative is a long-lived routine intended for use on
|
|
|
|
// the leader within the authoritative region only. It periodically queues work
|
|
|
|
// onto the _core scheduler for ACL based activities such as removing expired
|
|
|
|
// global ACL tokens.
|
|
|
|
func (s *Server) schedulePeriodicAuthoritative(stopCh chan struct{}) {
|
|
|
|
|
|
|
|
// Set up the expired ACL global token garbage collection timer.
|
|
|
|
globalTokenExpiredGC, globalTokenExpiredGCStop := helper.NewSafeTimer(s.config.ACLTokenExpirationGCInterval)
|
|
|
|
defer globalTokenExpiredGCStop()
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-globalTokenExpiredGC.C:
|
|
|
|
if index, ok := s.getLatestIndex(); ok {
|
|
|
|
s.evalBroker.Enqueue(s.coreJobEval(structs.CoreJobGlobalTokenExpiredGC, index))
|
|
|
|
}
|
|
|
|
globalTokenExpiredGC.Reset(s.config.ACLTokenExpirationGCInterval)
|
|
|
|
case <-stopCh:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// getLatestIndex is a helper function which returns the latest index from the
|
|
|
|
// state store. The boolean return indicates whether the call has been
|
|
|
|
// successful or not.
|
|
|
|
func (s *Server) getLatestIndex() (uint64, bool) {
|
|
|
|
snapshotIndex, err := s.fsm.State().LatestIndex()
|
|
|
|
if err != nil {
|
|
|
|
s.logger.Error("failed to determine state store's index", "error", err)
|
|
|
|
return 0, false
|
|
|
|
}
|
|
|
|
return snapshotIndex, true
|
|
|
|
}
|
|
|
|
|
2015-08-15 23:07:50 +00:00
|
|
|
// coreJobEval returns an evaluation for a core job
|
2016-06-22 16:04:22 +00:00
|
|
|
func (s *Server) coreJobEval(job string, modifyIndex uint64) *structs.Evaluation {
|
2015-08-15 23:07:50 +00:00
|
|
|
return &structs.Evaluation{
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: "-",
|
2015-08-15 22:15:00 +00:00
|
|
|
Priority: structs.CoreJobPriority,
|
|
|
|
Type: structs.JobTypeCore,
|
|
|
|
TriggeredBy: structs.EvalTriggerScheduled,
|
|
|
|
JobID: job,
|
2017-10-23 22:04:00 +00:00
|
|
|
LeaderACL: s.getLeaderAcl(),
|
2015-08-15 22:15:00 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2016-06-22 16:04:22 +00:00
|
|
|
ModifyIndex: modifyIndex,
|
2015-08-15 22:15:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-08-16 18:10:18 +00:00
|
|
|
// reapFailedEvaluations is used to reap evaluations that
|
|
|
|
// have reached their delivery limit and should be failed
|
|
|
|
func (s *Server) reapFailedEvaluations(stopCh chan struct{}) {
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-stopCh:
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
// Scan for a failed evaluation
|
|
|
|
eval, token, err := s.evalBroker.Dequeue([]string{failedQueue}, time.Second)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if eval == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update the status to failed
|
2017-04-14 20:19:14 +00:00
|
|
|
updateEval := eval.Copy()
|
|
|
|
updateEval.Status = structs.EvalStatusFailed
|
|
|
|
updateEval.StatusDescription = fmt.Sprintf("evaluation reached delivery limit (%d)", s.config.EvalDeliveryLimit)
|
2021-06-02 13:32:08 +00:00
|
|
|
s.logger.Warn("eval reached delivery limit, marking as failed",
|
2022-04-02 00:24:02 +00:00
|
|
|
"eval", hclog.Fmt("%#v", updateEval))
|
2015-08-16 18:10:18 +00:00
|
|
|
|
2020-08-18 20:48:43 +00:00
|
|
|
// Core job evals that fail or span leader elections will never
|
|
|
|
// succeed because the follow-up doesn't have the leader ACL. We
|
|
|
|
// rely on the leader to schedule new core jobs periodically
|
|
|
|
// instead.
|
|
|
|
if eval.Type != structs.JobTypeCore {
|
|
|
|
|
|
|
|
// Create a follow-up evaluation that will be used to retry the
|
|
|
|
// scheduling for the job after the cluster is hopefully more stable
|
|
|
|
// due to the fairly large backoff.
|
|
|
|
followupEvalWait := s.config.EvalFailedFollowupBaselineDelay +
|
|
|
|
time.Duration(rand.Int63n(int64(s.config.EvalFailedFollowupDelayRange)))
|
|
|
|
|
|
|
|
followupEval := eval.CreateFailedFollowUpEval(followupEvalWait)
|
|
|
|
updateEval.NextEval = followupEval.ID
|
|
|
|
updateEval.UpdateModifyTime()
|
|
|
|
|
|
|
|
// Update via Raft
|
|
|
|
req := structs.EvalUpdateRequest{
|
|
|
|
Evals: []*structs.Evaluation{updateEval, followupEval},
|
|
|
|
}
|
|
|
|
if _, _, err := s.raftApply(structs.EvalUpdateRequestType, &req); err != nil {
|
2021-06-02 13:32:08 +00:00
|
|
|
s.logger.Error("failed to update failed eval and create a follow-up",
|
2022-04-02 00:24:02 +00:00
|
|
|
"eval", hclog.Fmt("%#v", updateEval), "error", err)
|
2020-08-18 20:48:43 +00:00
|
|
|
continue
|
|
|
|
}
|
2015-08-16 18:10:18 +00:00
|
|
|
}
|
|
|
|
// Ack completion
|
|
|
|
s.evalBroker.Ack(eval.ID, token)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-01-31 00:16:13 +00:00
|
|
|
// reapDupBlockedEvaluations is used to reap duplicate blocked evaluations and
|
|
|
|
// should be cancelled.
|
|
|
|
func (s *Server) reapDupBlockedEvaluations(stopCh chan struct{}) {
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-stopCh:
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
// Scan for duplicate blocked evals.
|
|
|
|
dups := s.blockedEvals.GetDuplicates(time.Second)
|
|
|
|
if dups == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
cancel := make([]*structs.Evaluation, len(dups))
|
|
|
|
for i, dup := range dups {
|
|
|
|
// Update the status to cancelled
|
|
|
|
newEval := dup.Copy()
|
|
|
|
newEval.Status = structs.EvalStatusCancelled
|
|
|
|
newEval.StatusDescription = fmt.Sprintf("existing blocked evaluation exists for job %q", newEval.JobID)
|
2019-08-07 16:50:35 +00:00
|
|
|
newEval.UpdateModifyTime()
|
2016-01-31 00:16:13 +00:00
|
|
|
cancel[i] = newEval
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update via Raft
|
|
|
|
req := structs.EvalUpdateRequest{
|
|
|
|
Evals: cancel,
|
|
|
|
}
|
|
|
|
if _, _, err := s.raftApply(structs.EvalUpdateRequestType, &req); err != nil {
|
2022-04-02 00:24:02 +00:00
|
|
|
s.logger.Error("failed to update duplicate evals", "evals", hclog.Fmt("%#v", cancel), "error", err)
|
2016-01-31 00:16:13 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-11-16 21:10:11 +00:00
|
|
|
// reapCancelableEvaluations is used to reap evaluations that were marked
|
|
|
|
// cancelable by the eval broker and should be canceled. These get swept up
|
|
|
|
// whenever an eval Acks, but this ensures that we don't have a straggling batch
|
|
|
|
// when the cluster doesn't have any more work to do. Returns a wake-up channel
|
|
|
|
// that can be used to trigger a new reap without waiting for the timer
|
|
|
|
func (s *Server) reapCancelableEvaluations(stopCh chan struct{}) chan struct{} {
|
|
|
|
|
|
|
|
wakeCh := make(chan struct{}, 1)
|
|
|
|
go func() {
|
|
|
|
|
|
|
|
timer, cancel := helper.NewSafeTimer(s.config.EvalReapCancelableInterval)
|
|
|
|
defer cancel()
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-stopCh:
|
|
|
|
return
|
|
|
|
case <-wakeCh:
|
|
|
|
cancelCancelableEvals(s)
|
|
|
|
case <-timer.C:
|
|
|
|
cancelCancelableEvals(s)
|
|
|
|
timer.Reset(s.config.EvalReapCancelableInterval)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
return wakeCh
|
|
|
|
}
|
|
|
|
|
|
|
|
// cancelCancelableEvals pulls a batch of cancelable evaluations from the eval
|
|
|
|
// broker and updates their status to canceled.
|
|
|
|
func cancelCancelableEvals(srv *Server) error {
|
|
|
|
|
|
|
|
const cancelDesc = "canceled after more recent eval was processed"
|
|
|
|
|
|
|
|
// We *can* send larger raft logs but rough benchmarks show that a smaller
|
|
|
|
// page size strikes a balance between throughput and time we block the FSM
|
|
|
|
// apply for other operations
|
|
|
|
cancelable := srv.evalBroker.Cancelable(structs.MaxUUIDsPerWriteRequest / 10)
|
|
|
|
if len(cancelable) > 0 {
|
|
|
|
for i, eval := range cancelable {
|
|
|
|
eval = eval.Copy()
|
|
|
|
eval.Status = structs.EvalStatusCancelled
|
|
|
|
eval.StatusDescription = cancelDesc
|
|
|
|
eval.UpdateModifyTime()
|
|
|
|
cancelable[i] = eval
|
|
|
|
}
|
|
|
|
|
|
|
|
update := &structs.EvalUpdateRequest{
|
|
|
|
Evals: cancelable,
|
|
|
|
WriteRequest: structs.WriteRequest{Region: srv.Region()},
|
|
|
|
}
|
|
|
|
_, _, err := srv.raftApply(structs.EvalUpdateRequestType, update)
|
|
|
|
if err != nil {
|
|
|
|
srv.logger.Warn("eval cancel failed", "error", err, "method", "ack")
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-05-23 23:27:26 +00:00
|
|
|
// periodicUnblockFailedEvals periodically unblocks failed, blocked evaluations.
|
|
|
|
func (s *Server) periodicUnblockFailedEvals(stopCh chan struct{}) {
|
2016-07-06 00:08:58 +00:00
|
|
|
ticker := time.NewTicker(failedEvalUnblockInterval)
|
2016-05-25 17:28:25 +00:00
|
|
|
defer ticker.Stop()
|
2016-05-23 23:27:26 +00:00
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-stopCh:
|
|
|
|
return
|
|
|
|
case <-ticker.C:
|
|
|
|
// Unblock the failed allocations
|
|
|
|
s.blockedEvals.UnblockFailed()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-10-30 19:19:11 +00:00
|
|
|
// publishJobSummaryMetrics publishes the job summaries as metrics
|
|
|
|
func (s *Server) publishJobSummaryMetrics(stopCh chan struct{}) {
|
|
|
|
timer := time.NewTimer(0)
|
|
|
|
defer timer.Stop()
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-stopCh:
|
|
|
|
return
|
|
|
|
case <-timer.C:
|
2017-11-01 20:14:44 +00:00
|
|
|
timer.Reset(s.config.StatsCollectionInterval)
|
2017-10-30 19:19:11 +00:00
|
|
|
state, err := s.State().Snapshot()
|
|
|
|
if err != nil {
|
2018-09-15 23:23:13 +00:00
|
|
|
s.logger.Error("failed to get state", "error", err)
|
2017-10-30 19:19:11 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
iter, err := state.JobSummaries(ws)
|
|
|
|
if err != nil {
|
2018-09-15 23:23:13 +00:00
|
|
|
s.logger.Error("failed to get job summaries", "error", err)
|
2017-10-30 19:19:11 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
for {
|
|
|
|
raw := iter.Next()
|
|
|
|
if raw == nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
summary := raw.(*structs.JobSummary)
|
2018-11-14 21:04:03 +00:00
|
|
|
if s.config.DisableDispatchedJobSummaryMetrics {
|
|
|
|
job, err := state.JobByID(ws, summary.Namespace, summary.JobID)
|
|
|
|
if err != nil {
|
2018-11-30 03:27:39 +00:00
|
|
|
s.logger.Error("error getting job for summary", "error", err)
|
2018-11-14 21:04:03 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
if job.Dispatched {
|
|
|
|
continue
|
|
|
|
}
|
2017-10-30 19:19:11 +00:00
|
|
|
}
|
2018-11-14 16:13:52 +00:00
|
|
|
s.iterateJobSummaryMetrics(summary)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Server) iterateJobSummaryMetrics(summary *structs.JobSummary) {
|
|
|
|
for name, tgSummary := range summary.Summary {
|
2020-10-13 19:56:54 +00:00
|
|
|
labels := []metrics.Label{
|
|
|
|
{
|
|
|
|
Name: "job",
|
|
|
|
Value: summary.JobID,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "task_group",
|
|
|
|
Value: name,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "namespace",
|
|
|
|
Value: summary.Namespace,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
if strings.Contains(summary.JobID, "/dispatch-") {
|
|
|
|
jobInfo := strings.Split(summary.JobID, "/dispatch-")
|
|
|
|
labels = append(labels, metrics.Label{
|
|
|
|
Name: "parent_id",
|
|
|
|
Value: jobInfo[0],
|
|
|
|
}, metrics.Label{
|
|
|
|
Name: "dispatch_id",
|
|
|
|
Value: jobInfo[1],
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
if strings.Contains(summary.JobID, "/periodic-") {
|
|
|
|
jobInfo := strings.Split(summary.JobID, "/periodic-")
|
|
|
|
labels = append(labels, metrics.Label{
|
|
|
|
Name: "parent_id",
|
|
|
|
Value: jobInfo[0],
|
|
|
|
}, metrics.Label{
|
|
|
|
Name: "periodic_id",
|
|
|
|
Value: jobInfo[1],
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
metrics.SetGaugeWithLabels([]string{"nomad", "job_summary", "queued"},
|
|
|
|
float32(tgSummary.Queued), labels)
|
|
|
|
metrics.SetGaugeWithLabels([]string{"nomad", "job_summary", "complete"},
|
|
|
|
float32(tgSummary.Complete), labels)
|
|
|
|
metrics.SetGaugeWithLabels([]string{"nomad", "job_summary", "failed"},
|
|
|
|
float32(tgSummary.Failed), labels)
|
|
|
|
metrics.SetGaugeWithLabels([]string{"nomad", "job_summary", "running"},
|
|
|
|
float32(tgSummary.Running), labels)
|
|
|
|
metrics.SetGaugeWithLabels([]string{"nomad", "job_summary", "starting"},
|
|
|
|
float32(tgSummary.Starting), labels)
|
|
|
|
metrics.SetGaugeWithLabels([]string{"nomad", "job_summary", "lost"},
|
|
|
|
float32(tgSummary.Lost), labels)
|
2022-03-04 16:04:21 +00:00
|
|
|
metrics.SetGaugeWithLabels([]string{"nomad", "job_summary", "unknown"},
|
|
|
|
float32(tgSummary.Unknown), labels)
|
2017-10-30 19:19:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-24 13:17:33 +00:00
|
|
|
// publishJobStatusMetrics publishes the job statuses as metrics
|
|
|
|
func (s *Server) publishJobStatusMetrics(stopCh chan struct{}) {
|
|
|
|
timer := time.NewTimer(0)
|
|
|
|
defer timer.Stop()
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-stopCh:
|
|
|
|
return
|
|
|
|
case <-timer.C:
|
|
|
|
timer.Reset(s.config.StatsCollectionInterval)
|
|
|
|
state, err := s.State().Snapshot()
|
|
|
|
if err != nil {
|
|
|
|
s.logger.Error("failed to get state", "error", err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
iter, err := state.Jobs(ws)
|
|
|
|
if err != nil {
|
|
|
|
s.logger.Error("failed to get job statuses", "error", err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
s.iterateJobStatusMetrics(&iter)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Server) iterateJobStatusMetrics(jobs *memdb.ResultIterator) {
|
|
|
|
var pending int64 // Sum of all jobs in 'pending' state
|
|
|
|
var running int64 // Sum of all jobs in 'running' state
|
|
|
|
var dead int64 // Sum of all jobs in 'dead' state
|
|
|
|
|
|
|
|
for {
|
|
|
|
raw := (*jobs).Next()
|
|
|
|
if raw == nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
job := raw.(*structs.Job)
|
|
|
|
|
|
|
|
switch job.Status {
|
|
|
|
case structs.JobStatusPending:
|
|
|
|
pending++
|
|
|
|
case structs.JobStatusRunning:
|
|
|
|
running++
|
|
|
|
case structs.JobStatusDead:
|
|
|
|
dead++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
metrics.SetGauge([]string{"nomad", "job_status", "pending"}, float32(pending))
|
|
|
|
metrics.SetGauge([]string{"nomad", "job_status", "running"}, float32(running))
|
|
|
|
metrics.SetGauge([]string{"nomad", "job_status", "dead"}, float32(dead))
|
|
|
|
}
|
|
|
|
|
2015-07-24 04:58:38 +00:00
|
|
|
// revokeLeadership is invoked once we step down as leader.
|
|
|
|
// This is used to cleanup any state that may be specific to a leader.
|
|
|
|
func (s *Server) revokeLeadership() error {
|
2018-02-20 18:23:11 +00:00
|
|
|
defer metrics.MeasureSince([]string{"nomad", "leader", "revoke_leadership"}, time.Now())
|
2018-02-20 18:22:15 +00:00
|
|
|
|
2019-07-02 07:58:02 +00:00
|
|
|
s.resetConsistentReadReady()
|
|
|
|
|
2017-10-23 22:11:13 +00:00
|
|
|
// Clear the leader token since we are no longer the leader.
|
|
|
|
s.setLeaderAcl("")
|
|
|
|
|
2017-12-18 21:16:23 +00:00
|
|
|
// Disable autopilot
|
|
|
|
s.autopilot.Stop()
|
|
|
|
|
2015-07-27 22:11:42 +00:00
|
|
|
// Disable the plan queue, since we are no longer leader
|
|
|
|
s.planQueue.SetEnabled(false)
|
|
|
|
|
2022-07-06 14:13:48 +00:00
|
|
|
// Disable the eval broker and blocked evals. We do not need to check the
|
|
|
|
// scheduler configuration paused eval broker value, as the brokers should
|
|
|
|
// always be paused on the non-leader.
|
|
|
|
s.brokerLock.Lock()
|
2015-07-24 04:58:38 +00:00
|
|
|
s.evalBroker.SetEnabled(false)
|
2016-01-31 00:21:37 +00:00
|
|
|
s.blockedEvals.SetEnabled(false)
|
2022-07-06 14:13:48 +00:00
|
|
|
s.brokerLock.Unlock()
|
2016-01-31 00:21:37 +00:00
|
|
|
|
2015-12-18 20:26:28 +00:00
|
|
|
// Disable the periodic dispatcher, since it is only useful as a leader
|
|
|
|
s.periodicDispatcher.SetEnabled(false)
|
|
|
|
|
2016-08-22 20:57:27 +00:00
|
|
|
// Disable the Vault client as it is only useful as a leader.
|
|
|
|
s.vault.SetActive(false)
|
|
|
|
|
2017-06-28 22:35:52 +00:00
|
|
|
// Disable the deployment watcher as it is only useful as a leader.
|
2018-02-27 00:28:10 +00:00
|
|
|
s.deploymentWatcher.SetEnabled(false, nil)
|
|
|
|
|
|
|
|
// Disable the node drainer
|
|
|
|
s.nodeDrainer.SetEnabled(false, nil)
|
2017-06-28 22:35:52 +00:00
|
|
|
|
2020-04-30 13:13:00 +00:00
|
|
|
// Disable the volume watcher
|
2022-01-24 16:49:50 +00:00
|
|
|
s.volumeWatcher.SetEnabled(false, nil, "")
|
2020-04-30 13:13:00 +00:00
|
|
|
|
2017-09-07 23:56:15 +00:00
|
|
|
// Disable any enterprise systems required.
|
|
|
|
if err := s.revokeEnterpriseLeadership(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2015-08-23 00:17:13 +00:00
|
|
|
// Clear the heartbeat timers on either shutdown or step down,
|
|
|
|
// since we are no longer responsible for TTL expirations.
|
|
|
|
if err := s.clearAllHeartbeatTimers(); err != nil {
|
2018-09-15 23:23:13 +00:00
|
|
|
s.logger.Error("clearing heartbeat timers failed", "error", err)
|
2015-08-23 00:17:13 +00:00
|
|
|
return err
|
|
|
|
}
|
2015-08-23 20:59:13 +00:00
|
|
|
|
|
|
|
// Unpause our worker if we paused previously
|
2022-01-06 16:56:13 +00:00
|
|
|
s.handlePausableWorkers(false)
|
2020-06-01 14:57:53 +00:00
|
|
|
|
2015-07-24 04:58:38 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-06-01 14:57:53 +00:00
|
|
|
// pausableWorkers returns a slice of the workers
|
|
|
|
// to pause on leader transitions.
|
|
|
|
//
|
|
|
|
// Upon leadership establishment, pause workers to free half
|
|
|
|
// the cores for use in the plan queue and evaluation broker
|
|
|
|
func (s *Server) pausableWorkers() []*Worker {
|
|
|
|
n := len(s.workers)
|
|
|
|
if n <= 1 {
|
|
|
|
return []*Worker{}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Disabling 3/4 of the workers frees CPU for raft and the
|
|
|
|
// plan applier which uses 1/2 the cores.
|
|
|
|
return s.workers[:3*n/4]
|
|
|
|
}
|
|
|
|
|
2015-06-05 21:54:45 +00:00
|
|
|
// reconcile is used to reconcile the differences between Serf
|
|
|
|
// membership and what is reflected in our strongly consistent store.
|
|
|
|
func (s *Server) reconcile() error {
|
|
|
|
defer metrics.MeasureSince([]string{"nomad", "leader", "reconcile"}, time.Now())
|
|
|
|
members := s.serf.Members()
|
|
|
|
for _, member := range members {
|
|
|
|
if err := s.reconcileMember(member); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-06-04 11:38:41 +00:00
|
|
|
// reconcileMember is used to do an async reconcile of a single serf member
|
|
|
|
func (s *Server) reconcileMember(member serf.Member) error {
|
|
|
|
// Check if this is a member we should handle
|
|
|
|
valid, parts := isNomadServer(member)
|
|
|
|
if !valid || parts.Region != s.config.Region {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
defer metrics.MeasureSince([]string{"nomad", "leader", "reconcileMember"}, time.Now())
|
|
|
|
|
|
|
|
var err error
|
|
|
|
switch member.Status {
|
|
|
|
case serf.StatusAlive:
|
|
|
|
err = s.addRaftPeer(member, parts)
|
|
|
|
case serf.StatusLeft, StatusReap:
|
|
|
|
err = s.removeRaftPeer(member, parts)
|
|
|
|
}
|
|
|
|
if err != nil {
|
2018-09-15 23:23:13 +00:00
|
|
|
s.logger.Error("failed to reconcile member", "member", member, "error", err)
|
2015-06-04 11:38:41 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// addRaftPeer is used to add a new Raft peer when a Nomad server joins
|
|
|
|
func (s *Server) addRaftPeer(m serf.Member, parts *serverParts) error {
|
|
|
|
// Check for possibility of multiple bootstrap nodes
|
2017-11-22 00:29:11 +00:00
|
|
|
members := s.serf.Members()
|
2015-06-04 11:38:41 +00:00
|
|
|
if parts.Bootstrap {
|
|
|
|
for _, member := range members {
|
|
|
|
valid, p := isNomadServer(member)
|
|
|
|
if valid && member.Name != m.Name && p.Bootstrap {
|
2018-09-15 23:23:13 +00:00
|
|
|
s.logger.Error("skipping adding Raft peer because an existing peer is in bootstrap mode and only one server should be in bootstrap mode",
|
|
|
|
"existing_peer", member.Name, "joining_peer", m.Name)
|
2015-06-04 11:38:41 +00:00
|
|
|
return nil
|
|
|
|
}
|
2015-06-01 15:49:10 +00:00
|
|
|
}
|
|
|
|
}
|
2015-06-04 11:38:41 +00:00
|
|
|
|
2018-05-30 16:34:45 +00:00
|
|
|
// Processing ourselves could result in trying to remove ourselves to
|
|
|
|
// fix up our address, which would make us step down. This is only
|
|
|
|
// safe to attempt if there are multiple servers available.
|
2017-11-22 00:29:11 +00:00
|
|
|
addr := (&net.TCPAddr{IP: m.Addr, Port: parts.Port}).String()
|
2017-02-02 23:49:06 +00:00
|
|
|
configFuture := s.raft.GetConfiguration()
|
|
|
|
if err := configFuture.Error(); err != nil {
|
2018-09-15 23:23:13 +00:00
|
|
|
s.logger.Error("failed to get raft configuration", "error", err)
|
2017-02-02 23:49:06 +00:00
|
|
|
return err
|
|
|
|
}
|
2018-05-30 16:34:45 +00:00
|
|
|
|
|
|
|
if m.Name == s.config.NodeName {
|
|
|
|
if l := len(configFuture.Configuration().Servers); l < 3 {
|
2018-09-15 23:23:13 +00:00
|
|
|
s.logger.Debug("skipping self join check for peer since the cluster is too small", "peer", m.Name)
|
2017-02-02 23:49:06 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-22 00:29:11 +00:00
|
|
|
// See if it's already in the configuration. It's harmless to re-add it
|
|
|
|
// but we want to avoid doing that if possible to prevent useless Raft
|
|
|
|
// log entries. If the address is the same but the ID changed, remove the
|
|
|
|
// old server before adding the new one.
|
2022-09-01 18:27:10 +00:00
|
|
|
minRaftProtocol, err := s.MinRaftProtocol()
|
2017-11-22 00:29:11 +00:00
|
|
|
if err != nil {
|
2015-06-04 11:38:41 +00:00
|
|
|
return err
|
|
|
|
}
|
2017-11-22 00:29:11 +00:00
|
|
|
for _, server := range configFuture.Configuration().Servers {
|
|
|
|
// No-op if the raft version is too low
|
|
|
|
if server.Address == raft.ServerAddress(addr) && (minRaftProtocol < 2 || parts.RaftVersion < 3) {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the address or ID matches an existing server, see if we need to remove the old one first
|
|
|
|
if server.Address == raft.ServerAddress(addr) || server.ID == raft.ServerID(parts.ID) {
|
2018-05-30 16:34:45 +00:00
|
|
|
// Exit with no-op if this is being called on an existing server and both the ID and address match
|
2017-11-22 00:29:11 +00:00
|
|
|
if server.Address == raft.ServerAddress(addr) && server.ID == raft.ServerID(parts.ID) {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
future := s.raft.RemoveServer(server.ID, 0, 0)
|
|
|
|
if server.Address == raft.ServerAddress(addr) {
|
|
|
|
if err := future.Error(); err != nil {
|
|
|
|
return fmt.Errorf("error removing server with duplicate address %q: %s", server.Address, err)
|
|
|
|
}
|
2018-09-15 23:23:13 +00:00
|
|
|
s.logger.Info("removed server with duplicate address", "address", server.Address)
|
2017-11-22 00:29:11 +00:00
|
|
|
} else {
|
|
|
|
if err := future.Error(); err != nil {
|
|
|
|
return fmt.Errorf("error removing server with duplicate ID %q: %s", server.ID, err)
|
|
|
|
}
|
2018-09-15 23:23:13 +00:00
|
|
|
s.logger.Info("removed server with duplicate ID", "id", server.ID)
|
2017-11-22 00:29:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Attempt to add as a peer
|
|
|
|
switch {
|
|
|
|
case minRaftProtocol >= 3:
|
2017-12-18 21:16:23 +00:00
|
|
|
addFuture := s.raft.AddNonvoter(raft.ServerID(parts.ID), raft.ServerAddress(addr), 0, 0)
|
2017-11-22 00:29:11 +00:00
|
|
|
if err := addFuture.Error(); err != nil {
|
2018-09-15 23:23:13 +00:00
|
|
|
s.logger.Error("failed to add raft peer", "error", err)
|
2017-11-22 00:29:11 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
case minRaftProtocol == 2 && parts.RaftVersion >= 3:
|
|
|
|
addFuture := s.raft.AddVoter(raft.ServerID(parts.ID), raft.ServerAddress(addr), 0, 0)
|
|
|
|
if err := addFuture.Error(); err != nil {
|
2018-09-15 23:23:13 +00:00
|
|
|
s.logger.Error("failed to add raft peer", "error", err)
|
2017-11-22 00:29:11 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
addFuture := s.raft.AddPeer(raft.ServerAddress(addr))
|
|
|
|
if err := addFuture.Error(); err != nil {
|
2018-09-15 23:23:13 +00:00
|
|
|
s.logger.Error("failed to add raft peer", "error", err)
|
2017-11-22 00:29:11 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-04 11:38:41 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// removeRaftPeer is used to remove a Raft peer when a Nomad server leaves
|
|
|
|
// or is reaped
|
|
|
|
func (s *Server) removeRaftPeer(m serf.Member, parts *serverParts) error {
|
2017-02-02 23:49:06 +00:00
|
|
|
addr := (&net.TCPAddr{IP: m.Addr, Port: parts.Port}).String()
|
|
|
|
|
|
|
|
// See if it's already in the configuration. It's harmless to re-remove it
|
|
|
|
// but we want to avoid doing that if possible to prevent useless Raft
|
|
|
|
// log entries.
|
|
|
|
configFuture := s.raft.GetConfiguration()
|
|
|
|
if err := configFuture.Error(); err != nil {
|
2018-09-15 23:23:13 +00:00
|
|
|
s.logger.Error("failed to get raft configuration", "error", err)
|
2017-02-02 23:49:06 +00:00
|
|
|
return err
|
|
|
|
}
|
2017-11-22 00:29:11 +00:00
|
|
|
|
2022-09-01 18:27:10 +00:00
|
|
|
minRaftProtocol, err := s.MinRaftProtocol()
|
2017-11-22 00:29:11 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Pick which remove API to use based on how the server was added.
|
2017-02-02 23:49:06 +00:00
|
|
|
for _, server := range configFuture.Configuration().Servers {
|
2022-03-22 19:07:31 +00:00
|
|
|
// Check if this is the server to remove based on how it was registered.
|
|
|
|
// Raft v2 servers are registered by address.
|
|
|
|
// Raft v3 servers are registered by ID.
|
|
|
|
if server.ID == raft.ServerID(parts.ID) || server.Address == raft.ServerAddress(addr) {
|
|
|
|
// Use the new add/remove APIs if we understand them.
|
|
|
|
if minRaftProtocol >= 2 {
|
|
|
|
s.logger.Info("removing server by ID", "id", server.ID)
|
|
|
|
future := s.raft.RemoveServer(server.ID, 0, 0)
|
|
|
|
if err := future.Error(); err != nil {
|
|
|
|
s.logger.Error("failed to remove raft peer", "id", server.ID, "error", err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// If not, use the old remove API
|
|
|
|
s.logger.Info("removing server by address", "address", server.Address)
|
|
|
|
future := s.raft.RemovePeer(raft.ServerAddress(addr))
|
|
|
|
if err := future.Error(); err != nil {
|
|
|
|
s.logger.Error("failed to remove raft peer", "address", addr, "error", err)
|
|
|
|
return err
|
|
|
|
}
|
2017-11-22 00:29:11 +00:00
|
|
|
}
|
|
|
|
break
|
2017-02-02 23:49:06 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-04 11:38:41 +00:00
|
|
|
return nil
|
2015-06-01 15:49:10 +00:00
|
|
|
}
|
2017-08-13 23:16:59 +00:00
|
|
|
|
|
|
|
// replicateACLPolicies is used to replicate ACL policies from
|
|
|
|
// the authoritative region to this region.
|
|
|
|
func (s *Server) replicateACLPolicies(stopCh chan struct{}) {
|
2017-08-19 22:30:01 +00:00
|
|
|
req := structs.ACLPolicyListRequest{
|
|
|
|
QueryOptions: structs.QueryOptions{
|
2017-08-24 16:53:30 +00:00
|
|
|
Region: s.config.AuthoritativeRegion,
|
|
|
|
AllowStale: true,
|
2017-08-19 22:30:01 +00:00
|
|
|
},
|
|
|
|
}
|
2017-08-13 23:16:59 +00:00
|
|
|
limiter := rate.NewLimiter(replicationRateLimit, int(replicationRateLimit))
|
2018-09-15 23:42:38 +00:00
|
|
|
s.logger.Debug("starting ACL policy replication from authoritative region", "authoritative_region", req.Region)
|
2017-08-13 23:16:59 +00:00
|
|
|
|
|
|
|
START:
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-stopCh:
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
// Rate limit how often we attempt replication
|
|
|
|
limiter.Wait(context.Background())
|
|
|
|
|
|
|
|
// Fetch the list of policies
|
|
|
|
var resp structs.ACLPolicyListResponse
|
2017-10-12 22:16:33 +00:00
|
|
|
req.AuthToken = s.ReplicationToken()
|
2017-08-13 23:16:59 +00:00
|
|
|
err := s.forwardRegion(s.config.AuthoritativeRegion,
|
|
|
|
"ACL.ListPolicies", &req, &resp)
|
|
|
|
if err != nil {
|
2018-09-15 23:23:13 +00:00
|
|
|
s.logger.Error("failed to fetch policies from authoritative region", "error", err)
|
2017-08-13 23:16:59 +00:00
|
|
|
goto ERR_WAIT
|
|
|
|
}
|
|
|
|
|
|
|
|
// Perform a two-way diff
|
|
|
|
delete, update := diffACLPolicies(s.State(), req.MinQueryIndex, resp.Policies)
|
|
|
|
|
|
|
|
// Delete policies that should not exist
|
|
|
|
if len(delete) > 0 {
|
|
|
|
args := &structs.ACLPolicyDeleteRequest{
|
|
|
|
Names: delete,
|
|
|
|
}
|
|
|
|
_, _, err := s.raftApply(structs.ACLPolicyDeleteRequestType, args)
|
|
|
|
if err != nil {
|
2018-09-15 23:23:13 +00:00
|
|
|
s.logger.Error("failed to delete policies", "error", err)
|
2017-08-13 23:16:59 +00:00
|
|
|
goto ERR_WAIT
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fetch any outdated policies
|
|
|
|
var fetched []*structs.ACLPolicy
|
2017-08-20 22:30:18 +00:00
|
|
|
if len(update) > 0 {
|
|
|
|
req := structs.ACLPolicySetRequest{
|
|
|
|
Names: update,
|
|
|
|
QueryOptions: structs.QueryOptions{
|
2017-08-24 16:57:14 +00:00
|
|
|
Region: s.config.AuthoritativeRegion,
|
2017-10-12 22:16:33 +00:00
|
|
|
AuthToken: s.ReplicationToken(),
|
2017-08-24 16:57:14 +00:00
|
|
|
AllowStale: true,
|
|
|
|
MinQueryIndex: resp.Index - 1,
|
2017-08-20 22:30:18 +00:00
|
|
|
},
|
2017-08-13 23:16:59 +00:00
|
|
|
}
|
2017-08-20 22:30:18 +00:00
|
|
|
var reply structs.ACLPolicySetResponse
|
|
|
|
if err := s.forwardRegion(s.config.AuthoritativeRegion,
|
|
|
|
"ACL.GetPolicies", &req, &reply); err != nil {
|
2018-09-15 23:23:13 +00:00
|
|
|
s.logger.Error("failed to fetch policies from authoritative region", "error", err)
|
2017-08-13 23:16:59 +00:00
|
|
|
goto ERR_WAIT
|
|
|
|
}
|
2017-08-20 22:30:18 +00:00
|
|
|
for _, policy := range reply.Policies {
|
|
|
|
fetched = append(fetched, policy)
|
2017-08-13 23:16:59 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update local policies
|
|
|
|
if len(fetched) > 0 {
|
|
|
|
args := &structs.ACLPolicyUpsertRequest{
|
|
|
|
Policies: fetched,
|
|
|
|
}
|
|
|
|
_, _, err := s.raftApply(structs.ACLPolicyUpsertRequestType, args)
|
|
|
|
if err != nil {
|
2018-09-15 23:23:13 +00:00
|
|
|
s.logger.Error("failed to update policies", "error", err)
|
2017-08-13 23:16:59 +00:00
|
|
|
goto ERR_WAIT
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update the minimum query index, blocks until there
|
|
|
|
// is a change.
|
|
|
|
req.MinQueryIndex = resp.Index
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ERR_WAIT:
|
|
|
|
select {
|
|
|
|
case <-time.After(s.config.ReplicationBackoff):
|
|
|
|
goto START
|
|
|
|
case <-stopCh:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// diffACLPolicies is used to perform a two-way diff between the local
|
|
|
|
// policies and the remote policies to determine which policies need to
|
|
|
|
// be deleted or updated.
|
|
|
|
func diffACLPolicies(state *state.StateStore, minIndex uint64, remoteList []*structs.ACLPolicyListStub) (delete []string, update []string) {
|
|
|
|
// Construct a set of the local and remote policies
|
2017-08-30 17:06:56 +00:00
|
|
|
local := make(map[string][]byte)
|
2017-08-13 23:16:59 +00:00
|
|
|
remote := make(map[string]struct{})
|
|
|
|
|
|
|
|
// Add all the local policies
|
|
|
|
iter, err := state.ACLPolicies(nil)
|
|
|
|
if err != nil {
|
|
|
|
panic("failed to iterate local policies")
|
|
|
|
}
|
|
|
|
for {
|
|
|
|
raw := iter.Next()
|
|
|
|
if raw == nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
policy := raw.(*structs.ACLPolicy)
|
2017-08-30 17:06:56 +00:00
|
|
|
local[policy.Name] = policy.Hash
|
2017-08-13 23:16:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Iterate over the remote policies
|
|
|
|
for _, rp := range remoteList {
|
|
|
|
remote[rp.Name] = struct{}{}
|
|
|
|
|
|
|
|
// Check if the policy is missing locally
|
2017-08-30 17:06:56 +00:00
|
|
|
if localHash, ok := local[rp.Name]; !ok {
|
2017-08-13 23:16:59 +00:00
|
|
|
update = append(update, rp.Name)
|
|
|
|
|
2017-08-30 17:06:56 +00:00
|
|
|
// Check if policy is newer remotely and there is a hash mis-match.
|
|
|
|
} else if rp.ModifyIndex > minIndex && !bytes.Equal(localHash, rp.Hash) {
|
2017-08-13 23:16:59 +00:00
|
|
|
update = append(update, rp.Name)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if policy should be deleted
|
|
|
|
for lp := range local {
|
|
|
|
if _, ok := remote[lp]; !ok {
|
|
|
|
delete = append(delete, lp)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// replicateACLTokens is used to replicate global ACL tokens from
|
|
|
|
// the authoritative region to this region.
|
|
|
|
func (s *Server) replicateACLTokens(stopCh chan struct{}) {
|
2017-08-13 23:45:13 +00:00
|
|
|
req := structs.ACLTokenListRequest{
|
|
|
|
GlobalOnly: true,
|
2017-08-19 22:30:01 +00:00
|
|
|
QueryOptions: structs.QueryOptions{
|
2017-08-24 16:53:30 +00:00
|
|
|
Region: s.config.AuthoritativeRegion,
|
|
|
|
AllowStale: true,
|
2017-08-19 22:30:01 +00:00
|
|
|
},
|
2017-08-13 23:45:13 +00:00
|
|
|
}
|
|
|
|
limiter := rate.NewLimiter(replicationRateLimit, int(replicationRateLimit))
|
2018-09-15 23:42:38 +00:00
|
|
|
s.logger.Debug("starting ACL token replication from authoritative region", "authoritative_region", req.Region)
|
2017-08-13 23:45:13 +00:00
|
|
|
|
|
|
|
START:
|
2017-08-13 23:16:59 +00:00
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-stopCh:
|
|
|
|
return
|
2017-08-13 23:45:13 +00:00
|
|
|
default:
|
|
|
|
// Rate limit how often we attempt replication
|
|
|
|
limiter.Wait(context.Background())
|
|
|
|
|
|
|
|
// Fetch the list of tokens
|
|
|
|
var resp structs.ACLTokenListResponse
|
2017-10-12 22:16:33 +00:00
|
|
|
req.AuthToken = s.ReplicationToken()
|
2017-08-13 23:45:13 +00:00
|
|
|
err := s.forwardRegion(s.config.AuthoritativeRegion,
|
|
|
|
"ACL.ListTokens", &req, &resp)
|
|
|
|
if err != nil {
|
2018-09-15 23:23:13 +00:00
|
|
|
s.logger.Error("failed to fetch tokens from authoritative region", "error", err)
|
2017-08-13 23:45:13 +00:00
|
|
|
goto ERR_WAIT
|
|
|
|
}
|
|
|
|
|
|
|
|
// Perform a two-way diff
|
|
|
|
delete, update := diffACLTokens(s.State(), req.MinQueryIndex, resp.Tokens)
|
|
|
|
|
|
|
|
// Delete tokens that should not exist
|
|
|
|
if len(delete) > 0 {
|
|
|
|
args := &structs.ACLTokenDeleteRequest{
|
|
|
|
AccessorIDs: delete,
|
|
|
|
}
|
|
|
|
_, _, err := s.raftApply(structs.ACLTokenDeleteRequestType, args)
|
|
|
|
if err != nil {
|
2018-09-15 23:23:13 +00:00
|
|
|
s.logger.Error("failed to delete tokens", "error", err)
|
2017-08-13 23:45:13 +00:00
|
|
|
goto ERR_WAIT
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-08-19 22:30:01 +00:00
|
|
|
// Fetch any outdated policies.
|
2017-08-13 23:45:13 +00:00
|
|
|
var fetched []*structs.ACLToken
|
2017-08-20 22:30:18 +00:00
|
|
|
if len(update) > 0 {
|
|
|
|
req := structs.ACLTokenSetRequest{
|
|
|
|
AccessorIDS: update,
|
|
|
|
QueryOptions: structs.QueryOptions{
|
2017-08-24 16:57:14 +00:00
|
|
|
Region: s.config.AuthoritativeRegion,
|
2017-10-12 22:16:33 +00:00
|
|
|
AuthToken: s.ReplicationToken(),
|
2017-08-24 16:57:14 +00:00
|
|
|
AllowStale: true,
|
|
|
|
MinQueryIndex: resp.Index - 1,
|
2017-08-20 22:30:18 +00:00
|
|
|
},
|
2017-08-13 23:45:13 +00:00
|
|
|
}
|
2017-08-20 22:30:18 +00:00
|
|
|
var reply structs.ACLTokenSetResponse
|
|
|
|
if err := s.forwardRegion(s.config.AuthoritativeRegion,
|
|
|
|
"ACL.GetTokens", &req, &reply); err != nil {
|
2018-09-15 23:23:13 +00:00
|
|
|
s.logger.Error("failed to fetch tokens from authoritative region", "error", err)
|
2017-08-13 23:45:13 +00:00
|
|
|
goto ERR_WAIT
|
|
|
|
}
|
2017-08-20 22:30:18 +00:00
|
|
|
for _, token := range reply.Tokens {
|
|
|
|
fetched = append(fetched, token)
|
2017-08-13 23:45:13 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-08-19 22:30:01 +00:00
|
|
|
// Update local tokens
|
2017-08-13 23:45:13 +00:00
|
|
|
if len(fetched) > 0 {
|
|
|
|
args := &structs.ACLTokenUpsertRequest{
|
|
|
|
Tokens: fetched,
|
|
|
|
}
|
|
|
|
_, _, err := s.raftApply(structs.ACLTokenUpsertRequestType, args)
|
|
|
|
if err != nil {
|
2018-09-15 23:23:13 +00:00
|
|
|
s.logger.Error("failed to update tokens", "error", err)
|
2017-08-13 23:45:13 +00:00
|
|
|
goto ERR_WAIT
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update the minimum query index, blocks until there
|
|
|
|
// is a change.
|
|
|
|
req.MinQueryIndex = resp.Index
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ERR_WAIT:
|
|
|
|
select {
|
|
|
|
case <-time.After(s.config.ReplicationBackoff):
|
|
|
|
goto START
|
|
|
|
case <-stopCh:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// diffACLTokens is used to perform a two-way diff between the local
|
|
|
|
// tokens and the remote tokens to determine which tokens need to
|
|
|
|
// be deleted or updated.
|
2022-03-12 00:44:52 +00:00
|
|
|
func diffACLTokens(store *state.StateStore, minIndex uint64, remoteList []*structs.ACLTokenListStub) (delete []string, update []string) {
|
2017-08-13 23:45:13 +00:00
|
|
|
// Construct a set of the local and remote policies
|
2017-08-30 17:06:56 +00:00
|
|
|
local := make(map[string][]byte)
|
2017-08-13 23:45:13 +00:00
|
|
|
remote := make(map[string]struct{})
|
|
|
|
|
|
|
|
// Add all the local global tokens
|
2022-03-12 00:44:52 +00:00
|
|
|
iter, err := store.ACLTokensByGlobal(nil, true, state.SortDefault)
|
2017-08-13 23:45:13 +00:00
|
|
|
if err != nil {
|
|
|
|
panic("failed to iterate local tokens")
|
|
|
|
}
|
|
|
|
for {
|
|
|
|
raw := iter.Next()
|
|
|
|
if raw == nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
token := raw.(*structs.ACLToken)
|
2017-08-30 17:06:56 +00:00
|
|
|
local[token.AccessorID] = token.Hash
|
2017-08-13 23:45:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Iterate over the remote tokens
|
|
|
|
for _, rp := range remoteList {
|
|
|
|
remote[rp.AccessorID] = struct{}{}
|
|
|
|
|
|
|
|
// Check if the token is missing locally
|
2017-08-30 17:06:56 +00:00
|
|
|
if localHash, ok := local[rp.AccessorID]; !ok {
|
2017-08-13 23:45:13 +00:00
|
|
|
update = append(update, rp.AccessorID)
|
|
|
|
|
2017-08-30 17:06:56 +00:00
|
|
|
// Check if policy is newer remotely and there is a hash mis-match.
|
|
|
|
} else if rp.ModifyIndex > minIndex && !bytes.Equal(localHash, rp.Hash) {
|
2017-08-13 23:45:13 +00:00
|
|
|
update = append(update, rp.AccessorID)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if local token should be deleted
|
|
|
|
for lp := range local {
|
|
|
|
if _, ok := remote[lp]; !ok {
|
|
|
|
delete = append(delete, lp)
|
2017-08-13 23:16:59 +00:00
|
|
|
}
|
|
|
|
}
|
2017-08-13 23:45:13 +00:00
|
|
|
return
|
2017-08-13 23:16:59 +00:00
|
|
|
}
|
2017-12-18 21:16:23 +00:00
|
|
|
|
2022-08-22 06:54:07 +00:00
|
|
|
// replicateACLRoles is used to replicate ACL Roles from the authoritative
|
|
|
|
// region to this region. The loop should only be run on the leader within the
|
|
|
|
// federated region.
|
|
|
|
func (s *Server) replicateACLRoles(stopCh chan struct{}) {
|
|
|
|
|
|
|
|
// Generate our request object. We only need to do this once and reuse it
|
|
|
|
// for every RPC request. The MinQueryIndex is updated after every
|
|
|
|
// successful replication loop, so the next query acts as a blocking query
|
|
|
|
// and only returns upon a change in the authoritative region.
|
|
|
|
req := structs.ACLRolesListRequest{
|
|
|
|
QueryOptions: structs.QueryOptions{
|
|
|
|
AllowStale: true,
|
|
|
|
Region: s.config.AuthoritativeRegion,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create our replication rate limiter for ACL roles and log a lovely
|
|
|
|
// message to indicate the process is starting.
|
|
|
|
limiter := rate.NewLimiter(replicationRateLimit, int(replicationRateLimit))
|
|
|
|
s.logger.Debug("starting ACL Role replication from authoritative region",
|
|
|
|
"authoritative_region", req.Region)
|
|
|
|
|
|
|
|
// Enter the main ACL Role replication loop that will only exit when the
|
|
|
|
// stopCh is closed.
|
|
|
|
//
|
|
|
|
// Any error encountered will use the replicationBackoffContinue function
|
|
|
|
// which handles replication backoff and shutdown coordination in the event
|
|
|
|
// of an error inside the loop.
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-stopCh:
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
|
|
|
|
// Rate limit how often we attempt replication. It is OK to ignore
|
|
|
|
// the error as the context will never be cancelled and the limit
|
|
|
|
// parameters are controlled internally.
|
|
|
|
_ = limiter.Wait(context.Background())
|
|
|
|
|
|
|
|
// Set the replication token on each replication iteration so that
|
|
|
|
// it is always current and can handle agent SIGHUP reloads.
|
|
|
|
req.AuthToken = s.ReplicationToken()
|
|
|
|
|
|
|
|
var resp structs.ACLRolesListResponse
|
|
|
|
|
|
|
|
// Make the list RPC request to the authoritative region, so we
|
|
|
|
// capture the latest ACL role listing.
|
|
|
|
err := s.forwardRegion(s.config.AuthoritativeRegion, structs.ACLListRolesRPCMethod, &req, &resp)
|
|
|
|
if err != nil {
|
|
|
|
s.logger.Error("failed to fetch ACL Roles from authoritative region", "error", err)
|
|
|
|
if s.replicationBackoffContinue(stopCh) {
|
|
|
|
continue
|
|
|
|
} else {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Perform a two-way diff on the ACL roles.
|
|
|
|
toDelete, toUpdate := diffACLRoles(s.State(), req.MinQueryIndex, resp.ACLRoles)
|
|
|
|
|
|
|
|
// A significant amount of time could pass between the last check
|
|
|
|
// on whether we should stop the replication process. Therefore, do
|
|
|
|
// a check here, before calling Raft.
|
|
|
|
select {
|
|
|
|
case <-stopCh:
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we have ACL roles to delete, make this call directly to Raft.
|
|
|
|
if len(toDelete) > 0 {
|
|
|
|
args := structs.ACLRolesDeleteByIDRequest{ACLRoleIDs: toDelete}
|
|
|
|
_, _, err := s.raftApply(structs.ACLRolesDeleteByIDRequestType, &args)
|
|
|
|
|
|
|
|
// If the error was because we lost leadership while calling
|
|
|
|
// Raft, avoid logging as this can be confusing to operators.
|
|
|
|
if err != nil {
|
|
|
|
if err != raft.ErrLeadershipLost {
|
|
|
|
s.logger.Error("failed to delete ACL roles", "error", err)
|
|
|
|
}
|
|
|
|
if s.replicationBackoffContinue(stopCh) {
|
|
|
|
continue
|
|
|
|
} else {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fetch any outdated policies.
|
|
|
|
var fetched []*structs.ACLRole
|
|
|
|
if len(toUpdate) > 0 {
|
|
|
|
req := structs.ACLRolesByIDRequest{
|
|
|
|
ACLRoleIDs: toUpdate,
|
|
|
|
QueryOptions: structs.QueryOptions{
|
|
|
|
Region: s.config.AuthoritativeRegion,
|
|
|
|
AuthToken: s.ReplicationToken(),
|
|
|
|
AllowStale: true,
|
|
|
|
MinQueryIndex: resp.Index - 1,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
var reply structs.ACLRolesByIDResponse
|
|
|
|
if err := s.forwardRegion(s.config.AuthoritativeRegion, structs.ACLGetRolesByIDRPCMethod, &req, &reply); err != nil {
|
|
|
|
s.logger.Error("failed to fetch ACL Roles from authoritative region", "error", err)
|
|
|
|
if s.replicationBackoffContinue(stopCh) {
|
|
|
|
continue
|
|
|
|
} else {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for _, aclRole := range reply.ACLRoles {
|
|
|
|
fetched = append(fetched, aclRole)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update local tokens
|
|
|
|
if len(fetched) > 0 {
|
|
|
|
|
|
|
|
// The replication of ACL roles and policies are independent,
|
|
|
|
// therefore we cannot ensure the policies linked within the
|
|
|
|
// role are present. We must set allow missing to true.
|
|
|
|
args := structs.ACLRolesUpsertRequest{
|
|
|
|
ACLRoles: fetched,
|
|
|
|
AllowMissingPolicies: true,
|
|
|
|
}
|
|
|
|
|
|
|
|
// Perform the upsert directly via Raft.
|
|
|
|
_, _, err := s.raftApply(structs.ACLRolesUpsertRequestType, &args)
|
|
|
|
if err != nil {
|
|
|
|
s.logger.Error("failed to update ACL roles", "error", err)
|
|
|
|
if s.replicationBackoffContinue(stopCh) {
|
|
|
|
continue
|
|
|
|
} else {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update the minimum query index, blocks until there is a change.
|
|
|
|
req.MinQueryIndex = resp.Index
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// diffACLRoles is used to perform a two-way diff between the local ACL Roles
|
|
|
|
// and the remote Roles to determine which tokens need to be deleted or
|
|
|
|
// updated. The returned array's contain ACL Role IDs.
|
|
|
|
func diffACLRoles(
|
2022-08-22 15:20:23 +00:00
|
|
|
store *state.StateStore, minIndex uint64, remoteList []*structs.ACLRoleListStub) (
|
2022-08-22 06:54:07 +00:00
|
|
|
delete []string, update []string) {
|
|
|
|
|
|
|
|
// The local ACL role tracking is keyed by the role ID and the value is the
|
|
|
|
// hash of the role.
|
|
|
|
local := make(map[string][]byte)
|
|
|
|
|
|
|
|
// The remote ACL role tracking is keyed by the role ID; the value is an
|
|
|
|
// empty struct as we already have the full object.
|
|
|
|
remote := make(map[string]struct{})
|
|
|
|
|
|
|
|
// Read all the ACL role currently held within our local state. This panic
|
|
|
|
// will only happen as a developer making a mistake with naming the index
|
|
|
|
// to use.
|
|
|
|
iter, err := store.GetACLRoles(nil)
|
|
|
|
if err != nil {
|
|
|
|
panic(fmt.Sprintf("failed to iterate local ACL roles: %v", err))
|
|
|
|
}
|
|
|
|
|
|
|
|
// Iterate the local ACL roles and add them to our tracking of local roles.
|
|
|
|
for raw := iter.Next(); raw != nil; raw = iter.Next() {
|
|
|
|
aclRole := raw.(*structs.ACLRole)
|
|
|
|
local[aclRole.ID] = aclRole.Hash
|
|
|
|
}
|
|
|
|
|
|
|
|
// Iterate over the remote ACL roles.
|
|
|
|
for _, remoteACLRole := range remoteList {
|
|
|
|
remote[remoteACLRole.ID] = struct{}{}
|
|
|
|
|
|
|
|
// Identify whether the ACL role is within the local state. If it is
|
|
|
|
// not, add this to our update list.
|
|
|
|
if localHash, ok := local[remoteACLRole.ID]; !ok {
|
|
|
|
update = append(update, remoteACLRole.ID)
|
|
|
|
|
|
|
|
// Check if ACL role is newer remotely and there is a hash
|
|
|
|
// mismatch.
|
|
|
|
} else if remoteACLRole.ModifyIndex > minIndex && !bytes.Equal(localHash, remoteACLRole.Hash) {
|
|
|
|
update = append(update, remoteACLRole.ID)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we have ACL roles within state which are no longer present in the
|
|
|
|
// authoritative region we should delete them.
|
|
|
|
for localACLRole := range local {
|
|
|
|
if _, ok := remote[localACLRole]; !ok {
|
|
|
|
delete = append(delete, localACLRole)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-11-28 08:20:24 +00:00
|
|
|
// replicateACLAuthMethods is used to replicate ACL Authentication Methods from
|
|
|
|
// the authoritative region to this region. The loop should only be run on the
|
|
|
|
// leader within the federated region.
|
|
|
|
func (s *Server) replicateACLAuthMethods(stopCh chan struct{}) {
|
|
|
|
|
|
|
|
// Generate our request object. We only need to do this once and reuse it
|
|
|
|
// for every RPC request. The MinQueryIndex is updated after every
|
|
|
|
// successful replication loop, so the next query acts as a blocking query
|
|
|
|
// and only returns upon a change in the authoritative region.
|
|
|
|
req := structs.ACLAuthMethodListRequest{
|
|
|
|
QueryOptions: structs.QueryOptions{
|
|
|
|
AllowStale: true,
|
|
|
|
Region: s.config.AuthoritativeRegion,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create our replication rate limiter for ACL auth-methods and log a
|
|
|
|
// lovely message to indicate the process is starting.
|
|
|
|
limiter := rate.NewLimiter(replicationRateLimit, int(replicationRateLimit))
|
|
|
|
s.logger.Debug("starting ACL Auth-Methods replication from authoritative region",
|
|
|
|
"authoritative_region", req.Region)
|
|
|
|
|
|
|
|
// Enter the main ACL auth-methods replication loop that will only exit
|
|
|
|
// when the stopCh is closed.
|
|
|
|
//
|
|
|
|
// Any error encountered will use the replicationBackoffContinue function
|
|
|
|
// which handles replication backoff and shutdown coordination in the event
|
|
|
|
// of an error inside the loop.
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-stopCh:
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
|
|
|
|
// Rate limit how often we attempt replication. It is OK to ignore
|
|
|
|
// the error as the context will never be cancelled and the limit
|
|
|
|
// parameters are controlled internally.
|
|
|
|
_ = limiter.Wait(context.Background())
|
|
|
|
|
|
|
|
// Set the replication token on each replication iteration so that
|
|
|
|
// it is always current and can handle agent SIGHUP reloads.
|
|
|
|
req.AuthToken = s.ReplicationToken()
|
|
|
|
|
|
|
|
var resp structs.ACLAuthMethodListResponse
|
|
|
|
|
|
|
|
// Make the list RPC request to the authoritative region, so we
|
|
|
|
// capture the latest ACL auth-method listing.
|
|
|
|
err := s.forwardRegion(s.config.AuthoritativeRegion, structs.ACLListAuthMethodsRPCMethod, &req, &resp)
|
|
|
|
if err != nil {
|
|
|
|
s.logger.Error("failed to fetch ACL auth-methods from authoritative region", "error", err)
|
|
|
|
if s.replicationBackoffContinue(stopCh) {
|
|
|
|
continue
|
|
|
|
} else {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Perform a two-way diff on the ACL auth-methods.
|
|
|
|
toDelete, toUpdate := diffACLAuthMethods(s.State(), req.MinQueryIndex, resp.AuthMethods)
|
|
|
|
|
|
|
|
// A significant amount of time could pass between the last check
|
|
|
|
// on whether we should stop the replication process. Therefore, do
|
|
|
|
// a check here, before calling Raft.
|
|
|
|
select {
|
|
|
|
case <-stopCh:
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we have ACL auth-methods to delete, make this call directly
|
|
|
|
// to Raft.
|
|
|
|
if len(toDelete) > 0 {
|
|
|
|
args := structs.ACLAuthMethodDeleteRequest{Names: toDelete}
|
|
|
|
_, _, err := s.raftApply(structs.ACLAuthMethodsDeleteRequestType, &args)
|
|
|
|
|
|
|
|
// If the error was because we lost leadership while calling
|
|
|
|
// Raft, avoid logging as this can be confusing to operators.
|
|
|
|
if err != nil {
|
|
|
|
if err != raft.ErrLeadershipLost {
|
|
|
|
s.logger.Error("failed to delete ACL auth-methods", "error", err)
|
|
|
|
}
|
|
|
|
if s.replicationBackoffContinue(stopCh) {
|
|
|
|
continue
|
|
|
|
} else {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fetch any outdated auth-methods.
|
|
|
|
var fetched []*structs.ACLAuthMethod
|
|
|
|
if len(toUpdate) > 0 {
|
|
|
|
req := structs.ACLAuthMethodsGetRequest{
|
|
|
|
Names: toUpdate,
|
|
|
|
QueryOptions: structs.QueryOptions{
|
|
|
|
Region: s.config.AuthoritativeRegion,
|
|
|
|
AuthToken: s.ReplicationToken(),
|
|
|
|
AllowStale: true,
|
|
|
|
MinQueryIndex: resp.Index - 1,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
var reply structs.ACLAuthMethodsGetResponse
|
|
|
|
if err := s.forwardRegion(s.config.AuthoritativeRegion, structs.ACLGetAuthMethodsRPCMethod, &req, &reply); err != nil {
|
|
|
|
s.logger.Error("failed to fetch ACL auth-methods from authoritative region", "error", err)
|
|
|
|
if s.replicationBackoffContinue(stopCh) {
|
|
|
|
continue
|
|
|
|
} else {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for _, aclAuthMethod := range reply.AuthMethods {
|
|
|
|
fetched = append(fetched, aclAuthMethod)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update local auth-methods.
|
|
|
|
if len(fetched) > 0 {
|
|
|
|
args := structs.ACLAuthMethodUpsertRequest{
|
|
|
|
AuthMethods: fetched,
|
|
|
|
}
|
|
|
|
|
|
|
|
// Perform the upsert directly via Raft.
|
|
|
|
_, _, err := s.raftApply(structs.ACLAuthMethodsUpsertRequestType, &args)
|
|
|
|
if err != nil {
|
|
|
|
s.logger.Error("failed to update ACL auth-methods", "error", err)
|
|
|
|
if s.replicationBackoffContinue(stopCh) {
|
|
|
|
continue
|
|
|
|
} else {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update the minimum query index, blocks until there is a change.
|
|
|
|
req.MinQueryIndex = resp.Index
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// diffACLAuthMethods is used to perform a two-way diff between the local ACL
|
|
|
|
// auth-methods and the remote auth-methods to determine which ones need to be
|
|
|
|
// deleted or updated. The returned array's contain ACL auth-method names.
|
|
|
|
func diffACLAuthMethods(
|
|
|
|
store *state.StateStore, minIndex uint64, remoteList []*structs.ACLAuthMethodStub) (
|
|
|
|
delete []string, update []string) {
|
|
|
|
|
|
|
|
// The local ACL auth-method tracking is keyed by the name and the value is
|
|
|
|
// the hash of the auth-method.
|
|
|
|
local := make(map[string][]byte)
|
|
|
|
|
|
|
|
// The remote ACL auth-method tracking is keyed by the name; the value is
|
|
|
|
// an empty struct as we already have the full object.
|
|
|
|
remote := make(map[string]struct{})
|
|
|
|
|
|
|
|
// Read all the ACL auth-methods currently held within our local state.
|
|
|
|
// This panic will only happen as a developer making a mistake with naming
|
|
|
|
// the index to use.
|
|
|
|
iter, err := store.GetACLAuthMethods(nil)
|
|
|
|
if err != nil {
|
|
|
|
panic(fmt.Sprintf("failed to iterate local ACL roles: %v", err))
|
|
|
|
}
|
|
|
|
|
|
|
|
// Iterate the local ACL auth-methods and add them to our tracking of
|
|
|
|
// local auth-methods
|
|
|
|
for raw := iter.Next(); raw != nil; raw = iter.Next() {
|
|
|
|
aclAuthMethod := raw.(*structs.ACLAuthMethod)
|
|
|
|
local[aclAuthMethod.Name] = aclAuthMethod.Hash
|
|
|
|
}
|
|
|
|
|
|
|
|
// Iterate over the remote ACL auth-methods.
|
|
|
|
for _, remoteACLAuthMethod := range remoteList {
|
|
|
|
remote[remoteACLAuthMethod.Name] = struct{}{}
|
|
|
|
|
|
|
|
// Identify whether the ACL auth-method is within the local state. If
|
|
|
|
// it is not, add this to our update list.
|
|
|
|
if localHash, ok := local[remoteACLAuthMethod.Name]; !ok {
|
|
|
|
update = append(update, remoteACLAuthMethod.Name)
|
|
|
|
|
|
|
|
// Check if ACL auth-method is newer remotely and there is a hash
|
|
|
|
// mismatch.
|
|
|
|
} else if remoteACLAuthMethod.ModifyIndex > minIndex && !bytes.Equal(localHash, remoteACLAuthMethod.Hash) {
|
|
|
|
update = append(update, remoteACLAuthMethod.Name)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we have ACL auth-methods within state which are no longer present in
|
|
|
|
// the authoritative region we should delete them.
|
|
|
|
for localACLAuthMethod := range local {
|
|
|
|
if _, ok := remote[localACLAuthMethod]; !ok {
|
|
|
|
delete = append(delete, localACLAuthMethod)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-12-16 08:08:00 +00:00
|
|
|
// replicateACLBindingRules is used to replicate ACL binding rules from the
|
|
|
|
// authoritative region to this region. The loop should only be run on the
|
|
|
|
// leader within the federated region.
|
|
|
|
func (s *Server) replicateACLBindingRules(stopCh chan struct{}) {
|
|
|
|
|
|
|
|
// Generate our request object. We only need to do this once and reuse it
|
|
|
|
// for every RPC request. The MinQueryIndex is updated after every
|
|
|
|
// successful replication loop, so the next query acts as a blocking query
|
|
|
|
// and only returns upon a change in the authoritative region.
|
|
|
|
req := structs.ACLBindingRulesListRequest{
|
|
|
|
QueryOptions: structs.QueryOptions{
|
|
|
|
AllowStale: true,
|
|
|
|
Region: s.config.AuthoritativeRegion,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create our replication rate limiter for ACL binding rules and log a
|
|
|
|
// lovely message to indicate the process is starting.
|
|
|
|
limiter := rate.NewLimiter(replicationRateLimit, int(replicationRateLimit))
|
|
|
|
s.logger.Debug("starting ACL Binding Rules replication from authoritative region",
|
|
|
|
"authoritative_region", req.Region)
|
|
|
|
|
|
|
|
// Enter the main ACL binding rules replication loop that will only exit
|
|
|
|
// when the stopCh is closed.
|
|
|
|
//
|
|
|
|
// Any error encountered will use the replicationBackoffContinue function
|
|
|
|
// which handles replication backoff and shutdown coordination in the event
|
|
|
|
// of an error inside the loop.
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-stopCh:
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
|
|
|
|
// Rate limit how often we attempt replication. It is OK to ignore
|
|
|
|
// the error as the context will never be cancelled and the limit
|
|
|
|
// parameters are controlled internally.
|
|
|
|
_ = limiter.Wait(context.Background())
|
|
|
|
|
|
|
|
// Set the replication token on each replication iteration so that
|
|
|
|
// it is always current and can handle agent SIGHUP reloads.
|
|
|
|
req.AuthToken = s.ReplicationToken()
|
|
|
|
|
|
|
|
var resp structs.ACLBindingRulesListResponse
|
|
|
|
|
|
|
|
// Make the list RPC request to the authoritative region, so we
|
|
|
|
// capture the latest ACL binding rules listing.
|
|
|
|
err := s.forwardRegion(s.config.AuthoritativeRegion, structs.ACLListBindingRulesRPCMethod, &req, &resp)
|
|
|
|
if err != nil {
|
|
|
|
s.logger.Error("failed to fetch ACL binding rules from authoritative region", "error", err)
|
|
|
|
if s.replicationBackoffContinue(stopCh) {
|
|
|
|
continue
|
|
|
|
} else {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Perform a two-way diff on the ACL binding rules.
|
|
|
|
toDelete, toUpdate := diffACLBindingRules(s.State(), req.MinQueryIndex, resp.ACLBindingRules)
|
|
|
|
|
|
|
|
// A significant amount of time could pass between the last check
|
|
|
|
// on whether we should stop the replication process. Therefore, do
|
|
|
|
// a check here, before calling Raft.
|
|
|
|
select {
|
|
|
|
case <-stopCh:
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we have ACL binding rules to delete, make this call directly
|
|
|
|
// to Raft.
|
|
|
|
if len(toDelete) > 0 {
|
|
|
|
args := structs.ACLBindingRulesDeleteRequest{ACLBindingRuleIDs: toDelete}
|
|
|
|
_, _, err := s.raftApply(structs.ACLBindingRulesDeleteRequestType, &args)
|
|
|
|
|
|
|
|
// If the error was because we lost leadership while calling
|
|
|
|
// Raft, avoid logging as this can be confusing to operators.
|
|
|
|
if err != nil {
|
|
|
|
if err != raft.ErrLeadershipLost {
|
|
|
|
s.logger.Error("failed to delete ACL binding rules", "error", err)
|
|
|
|
}
|
|
|
|
if s.replicationBackoffContinue(stopCh) {
|
|
|
|
continue
|
|
|
|
} else {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fetch any outdated binding rules.
|
|
|
|
var fetched []*structs.ACLBindingRule
|
|
|
|
if len(toUpdate) > 0 {
|
|
|
|
req := structs.ACLBindingRulesRequest{
|
|
|
|
ACLBindingRuleIDs: toUpdate,
|
|
|
|
QueryOptions: structs.QueryOptions{
|
|
|
|
Region: s.config.AuthoritativeRegion,
|
|
|
|
AuthToken: s.ReplicationToken(),
|
|
|
|
AllowStale: true,
|
|
|
|
MinQueryIndex: resp.Index - 1,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
var reply structs.ACLBindingRulesResponse
|
|
|
|
if err := s.forwardRegion(s.config.AuthoritativeRegion, structs.ACLGetBindingRulesRPCMethod, &req, &reply); err != nil {
|
|
|
|
s.logger.Error("failed to fetch ACL binding rules from authoritative region", "error", err)
|
|
|
|
if s.replicationBackoffContinue(stopCh) {
|
|
|
|
continue
|
|
|
|
} else {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for _, aclBindingRule := range reply.ACLBindingRules {
|
|
|
|
fetched = append(fetched, aclBindingRule)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update local binding rules.
|
|
|
|
if len(fetched) > 0 {
|
|
|
|
args := structs.ACLBindingRulesUpsertRequest{
|
|
|
|
ACLBindingRules: fetched,
|
|
|
|
AllowMissingAuthMethods: true,
|
|
|
|
}
|
|
|
|
|
|
|
|
// Perform the upsert directly via Raft.
|
|
|
|
_, _, err := s.raftApply(structs.ACLBindingRulesUpsertRequestType, &args)
|
|
|
|
if err != nil {
|
|
|
|
s.logger.Error("failed to update ACL binding rules", "error", err)
|
|
|
|
if s.replicationBackoffContinue(stopCh) {
|
|
|
|
continue
|
|
|
|
} else {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update the minimum query index, blocks until there is a change.
|
|
|
|
req.MinQueryIndex = resp.Index
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// diffACLBindingRules is used to perform a two-way diff between the local ACL
|
|
|
|
// binding rules and the remote binding rules to determine which ones need to be
|
|
|
|
// deleted or updated. The returned array's contain ACL binding rule names.
|
|
|
|
func diffACLBindingRules(
|
|
|
|
store *state.StateStore, minIndex uint64, remoteList []*structs.ACLBindingRuleListStub) (
|
|
|
|
delete []string, update []string) {
|
|
|
|
|
|
|
|
// The local ACL binding rules tracking is keyed by the name and the value
|
|
|
|
// is the hash of the auth-method.
|
|
|
|
local := make(map[string][]byte)
|
|
|
|
|
|
|
|
// The remote ACL binding rules tracking is keyed by the name; the value is
|
|
|
|
// an empty struct as we already have the full object.
|
|
|
|
remote := make(map[string]struct{})
|
|
|
|
|
|
|
|
// Read all the ACL binding rules currently held within our local state.
|
|
|
|
// This panic will only happen as a developer making a mistake with naming
|
|
|
|
// the index to use.
|
|
|
|
iter, err := store.GetACLBindingRules(nil)
|
|
|
|
if err != nil {
|
|
|
|
panic(fmt.Sprintf("failed to iterate local ACL binding rules: %v", err))
|
|
|
|
}
|
|
|
|
|
|
|
|
// Iterate the local ACL binding rules and add them to our tracking of
|
|
|
|
// local binding rules.
|
|
|
|
for raw := iter.Next(); raw != nil; raw = iter.Next() {
|
|
|
|
aclBindingRule := raw.(*structs.ACLBindingRule)
|
|
|
|
local[aclBindingRule.ID] = aclBindingRule.Hash
|
|
|
|
}
|
|
|
|
|
|
|
|
// Iterate over the remote ACL binding rules.
|
|
|
|
for _, remoteACLBindingRule := range remoteList {
|
|
|
|
remote[remoteACLBindingRule.ID] = struct{}{}
|
|
|
|
|
|
|
|
// Identify whether the ACL auth-method is within the local state. If
|
|
|
|
// it is not, add this to our update list.
|
|
|
|
if localHash, ok := local[remoteACLBindingRule.ID]; !ok {
|
|
|
|
update = append(update, remoteACLBindingRule.ID)
|
|
|
|
|
|
|
|
// Check if the ACL binding rule is newer remotely and there is a
|
|
|
|
// hash mismatch.
|
|
|
|
} else if remoteACLBindingRule.ModifyIndex > minIndex && !bytes.Equal(localHash, remoteACLBindingRule.Hash) {
|
|
|
|
update = append(update, remoteACLBindingRule.ID)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we have ACL binding rules within state which are no longer present in
|
|
|
|
// the authoritative region we should delete them.
|
|
|
|
for localACLBindingRules := range local {
|
|
|
|
if _, ok := remote[localACLBindingRules]; !ok {
|
|
|
|
delete = append(delete, localACLBindingRules)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-11-28 08:20:24 +00:00
|
|
|
// replicationBackoffContinue should be used when a replication loop encounters
|
|
|
|
// an error and wants to wait until either the backoff time has been met, or
|
|
|
|
// the stopCh has been closed. The boolean indicates whether the replication
|
|
|
|
// process should continue.
|
|
|
|
//
|
|
|
|
// Typical use:
|
|
|
|
//
|
|
|
|
// if s.replicationBackoffContinue(stopCh) {
|
|
|
|
// continue
|
|
|
|
// } else {
|
|
|
|
// return
|
|
|
|
// }
|
|
|
|
func (s *Server) replicationBackoffContinue(stopCh chan struct{}) bool {
|
|
|
|
|
|
|
|
timer, timerStopFn := helper.NewSafeTimer(s.config.ReplicationBackoff)
|
|
|
|
defer timerStopFn()
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-timer.C:
|
|
|
|
return true
|
|
|
|
case <-stopCh:
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-12-18 21:16:23 +00:00
|
|
|
// getOrCreateAutopilotConfig is used to get the autopilot config, initializing it if necessary
|
2018-01-30 03:53:34 +00:00
|
|
|
func (s *Server) getOrCreateAutopilotConfig() *structs.AutopilotConfig {
|
2017-12-18 21:16:23 +00:00
|
|
|
state := s.fsm.State()
|
|
|
|
_, config, err := state.AutopilotConfig()
|
|
|
|
if err != nil {
|
2018-09-15 23:23:13 +00:00
|
|
|
s.logger.Named("autopilot").Error("failed to get autopilot config", "error", err)
|
2017-12-18 21:16:23 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
if config != nil {
|
|
|
|
return config
|
|
|
|
}
|
|
|
|
|
2022-10-17 20:23:51 +00:00
|
|
|
if !ServersMeetMinimumVersion(s.Members(), AllRegions, minAutopilotVersion, false) {
|
2018-09-15 23:23:13 +00:00
|
|
|
s.logger.Named("autopilot").Warn("can't initialize until all servers are above minimum version", "min_version", minAutopilotVersion)
|
2017-12-18 21:16:23 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
config = s.config.AutopilotConfig
|
|
|
|
req := structs.AutopilotSetConfigRequest{Config: *config}
|
|
|
|
if _, _, err = s.raftApply(structs.AutopilotRequestType, req); err != nil {
|
2018-09-15 23:23:13 +00:00
|
|
|
s.logger.Named("autopilot").Error("failed to initialize config", "error", err)
|
2017-12-18 21:16:23 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return config
|
|
|
|
}
|
2018-09-28 04:27:38 +00:00
|
|
|
|
2018-11-01 22:05:17 +00:00
|
|
|
// getOrCreateSchedulerConfig is used to get the scheduler config. We create a default
|
|
|
|
// config if it doesn't already exist for bootstrapping an empty cluster
|
2018-09-28 04:27:38 +00:00
|
|
|
func (s *Server) getOrCreateSchedulerConfig() *structs.SchedulerConfiguration {
|
|
|
|
state := s.fsm.State()
|
|
|
|
_, config, err := state.SchedulerConfig()
|
|
|
|
if err != nil {
|
|
|
|
s.logger.Named("core").Error("failed to get scheduler config", "error", err)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
if config != nil {
|
|
|
|
return config
|
|
|
|
}
|
2022-10-17 20:23:51 +00:00
|
|
|
if !ServersMeetMinimumVersion(s.Members(), s.Region(), minSchedulerConfigVersion, false) {
|
2019-01-29 19:48:45 +00:00
|
|
|
s.logger.Named("core").Warn("can't initialize scheduler config until all servers are above minimum version", "min_version", minSchedulerConfigVersion)
|
2019-01-29 18:47:42 +00:00
|
|
|
return nil
|
|
|
|
}
|
2018-09-28 04:27:38 +00:00
|
|
|
|
2020-01-28 16:09:36 +00:00
|
|
|
req := structs.SchedulerSetConfigRequest{Config: s.config.DefaultSchedulerConfig}
|
2018-09-28 04:27:38 +00:00
|
|
|
if _, _, err = s.raftApply(structs.SchedulerConfigRequestType, req); err != nil {
|
|
|
|
s.logger.Named("core").Error("failed to initialize config", "error", err)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return config
|
|
|
|
}
|
2019-11-14 13:18:29 +00:00
|
|
|
|
2022-10-06 16:47:02 +00:00
|
|
|
var minVersionKeyring = version.Must(version.NewVersion("1.4.0"))
|
|
|
|
|
2022-05-31 12:43:51 +00:00
|
|
|
// initializeKeyring creates the first root key if the leader doesn't
|
|
|
|
// already have one. The metadata will be replicated via raft and then
|
|
|
|
// the followers will get the key material from their own key
|
|
|
|
// replication.
|
2022-10-06 16:47:02 +00:00
|
|
|
func (s *Server) initializeKeyring(stopCh <-chan struct{}) {
|
|
|
|
|
|
|
|
logger := s.logger.Named("keyring")
|
2022-05-31 12:43:51 +00:00
|
|
|
|
|
|
|
store := s.fsm.State()
|
|
|
|
keyMeta, err := store.GetActiveRootKeyMeta(nil)
|
|
|
|
if err != nil {
|
2022-10-06 16:47:02 +00:00
|
|
|
logger.Error("failed to get active key: %v", err)
|
|
|
|
return
|
2022-05-31 12:43:51 +00:00
|
|
|
}
|
|
|
|
if keyMeta != nil {
|
2022-10-06 16:47:02 +00:00
|
|
|
return
|
2022-05-31 12:43:51 +00:00
|
|
|
}
|
|
|
|
|
2022-10-06 16:47:02 +00:00
|
|
|
logger.Trace("verifying cluster is ready to initialize keyring")
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-stopCh:
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
}
|
2022-10-17 17:21:16 +00:00
|
|
|
|
2022-10-17 20:23:51 +00:00
|
|
|
if ServersMeetMinimumVersion(s.serf.Members(), s.Region(), minVersionKeyring, true) {
|
2022-10-06 16:47:02 +00:00
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2022-10-21 16:33:16 +00:00
|
|
|
// we might have lost leadership during the version check
|
2022-10-06 16:47:02 +00:00
|
|
|
if !s.IsLeader() {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
logger.Trace("initializing keyring")
|
2022-05-31 12:43:51 +00:00
|
|
|
|
2022-06-02 17:41:59 +00:00
|
|
|
rootKey, err := structs.NewRootKey(structs.EncryptionAlgorithmAES256GCM)
|
2022-07-07 17:48:38 +00:00
|
|
|
rootKey.Meta.SetActive()
|
2022-05-31 12:43:51 +00:00
|
|
|
if err != nil {
|
2022-10-06 16:47:02 +00:00
|
|
|
logger.Error("could not initialize keyring: %v", err)
|
|
|
|
return
|
2022-05-31 12:43:51 +00:00
|
|
|
}
|
|
|
|
|
2022-06-07 12:40:12 +00:00
|
|
|
err = s.encrypter.AddKey(rootKey)
|
2022-05-31 12:43:51 +00:00
|
|
|
if err != nil {
|
2022-10-06 16:47:02 +00:00
|
|
|
logger.Error("could not add initial key to keyring: %v", err)
|
|
|
|
return
|
2022-05-31 12:43:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if _, _, err = s.raftApply(structs.RootKeyMetaUpsertRequestType,
|
|
|
|
structs.KeyringUpdateRootKeyMetaRequest{
|
|
|
|
RootKeyMeta: rootKey.Meta,
|
|
|
|
}); err != nil {
|
2022-10-06 16:47:02 +00:00
|
|
|
logger.Error("could not initialize keyring: %v", err)
|
|
|
|
return
|
2022-05-31 12:43:51 +00:00
|
|
|
}
|
|
|
|
|
2022-10-06 16:47:02 +00:00
|
|
|
logger.Info("initialized keyring", "id", rootKey.Meta.KeyID)
|
2022-05-31 12:43:51 +00:00
|
|
|
}
|
|
|
|
|
2019-11-14 13:18:29 +00:00
|
|
|
func (s *Server) generateClusterID() (string, error) {
|
2022-10-17 20:23:51 +00:00
|
|
|
if !ServersMeetMinimumVersion(s.Members(), AllRegions, minClusterIDVersion, false) {
|
2019-11-14 13:18:29 +00:00
|
|
|
s.logger.Named("core").Warn("cannot initialize cluster ID until all servers are above minimum version", "min_version", minClusterIDVersion)
|
2022-04-02 00:24:02 +00:00
|
|
|
return "", fmt.Errorf("cluster ID cannot be created until all servers are above minimum version %s", minClusterIDVersion)
|
2019-11-14 13:18:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
newMeta := structs.ClusterMetadata{ClusterID: uuid.Generate(), CreateTime: time.Now().UnixNano()}
|
|
|
|
if _, _, err := s.raftApply(structs.ClusterMetadataRequestType, newMeta); err != nil {
|
|
|
|
s.logger.Named("core").Error("failed to create cluster ID", "error", err)
|
2022-04-02 00:24:02 +00:00
|
|
|
return "", fmt.Errorf("failed to create cluster ID: %w", err)
|
2019-11-14 13:18:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
s.logger.Named("core").Info("established cluster id", "cluster_id", newMeta.ClusterID, "create_time", newMeta.CreateTime)
|
|
|
|
return newMeta.ClusterID, nil
|
|
|
|
}
|
2022-07-06 14:13:48 +00:00
|
|
|
|
|
|
|
// handleEvalBrokerStateChange handles changing the evalBroker and blockedEvals
|
|
|
|
// enabled status based on the passed scheduler configuration. The boolean
|
|
|
|
// response indicates whether the caller needs to call restoreEvals() due to
|
|
|
|
// the brokers being enabled. It is for use when the change must take the
|
|
|
|
// scheduler configuration into account. This is not needed when calling
|
|
|
|
// revokeLeadership, as the configuration doesn't matter, and we need to ensure
|
|
|
|
// the brokers are stopped.
|
|
|
|
//
|
|
|
|
// The function checks the server is the leader and uses a mutex to avoid any
|
|
|
|
// potential timings problems. Consider the following timings:
|
2022-08-22 06:54:07 +00:00
|
|
|
// - operator updates the configuration via the API
|
|
|
|
// - the RPC handler applies the change via Raft
|
|
|
|
// - leadership transitions with write barrier
|
|
|
|
// - the RPC handler call this function to enact the change
|
2022-07-06 14:13:48 +00:00
|
|
|
//
|
|
|
|
// The mutex also protects against a situation where leadership is revoked
|
|
|
|
// while this function is being called. Ensuring the correct series of actions
|
|
|
|
// occurs so that state stays consistent.
|
|
|
|
func (s *Server) handleEvalBrokerStateChange(schedConfig *structs.SchedulerConfiguration) bool {
|
|
|
|
|
|
|
|
// Grab the lock first. Once we have this we can be sure to run everything
|
|
|
|
// needed before any leader transition can attempt to modify the state.
|
|
|
|
s.brokerLock.Lock()
|
|
|
|
defer s.brokerLock.Unlock()
|
|
|
|
|
|
|
|
// If we are no longer the leader, exit early.
|
|
|
|
if !s.IsLeader() {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
// enableEvalBroker tracks whether the evalBroker and blockedEvals
|
|
|
|
// processes should be enabled or not. It allows us to answer this question
|
|
|
|
// whether using a persisted Raft configuration, or the default bootstrap
|
|
|
|
// config.
|
|
|
|
var enableBrokers, restoreEvals bool
|
|
|
|
|
|
|
|
// The scheduler config can only be persisted to Raft once quorum has been
|
|
|
|
// established. If this is a fresh cluster, we need to use the default
|
|
|
|
// scheduler config, otherwise we can use the persisted object.
|
|
|
|
switch schedConfig {
|
|
|
|
case nil:
|
|
|
|
enableBrokers = !s.config.DefaultSchedulerConfig.PauseEvalBroker
|
|
|
|
default:
|
|
|
|
enableBrokers = !schedConfig.PauseEvalBroker
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the evalBroker status is changing, set the new state.
|
|
|
|
if enableBrokers != s.evalBroker.Enabled() {
|
|
|
|
s.logger.Info("eval broker status modified", "paused", !enableBrokers)
|
|
|
|
s.evalBroker.SetEnabled(enableBrokers)
|
|
|
|
restoreEvals = enableBrokers
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the blockedEvals status is changing, set the new state.
|
|
|
|
if enableBrokers != s.blockedEvals.Enabled() {
|
|
|
|
s.logger.Info("blocked evals status modified", "paused", !enableBrokers)
|
|
|
|
s.blockedEvals.SetEnabled(enableBrokers)
|
|
|
|
restoreEvals = enableBrokers
|
|
|
|
|
|
|
|
if enableBrokers {
|
|
|
|
s.blockedEvals.SetTimetable(s.fsm.TimeTable())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return restoreEvals
|
|
|
|
}
|