2014-01-09 23:49:09 +00:00
|
|
|
package consul
|
|
|
|
|
|
|
|
import (
|
2018-10-19 16:04:07 +00:00
|
|
|
"context"
|
2014-08-11 21:54:18 +00:00
|
|
|
"fmt"
|
2014-06-16 21:36:12 +00:00
|
|
|
"net"
|
2020-06-17 10:16:13 +00:00
|
|
|
"reflect"
|
2014-06-16 21:36:12 +00:00
|
|
|
"strconv"
|
2017-04-13 21:17:32 +00:00
|
|
|
"sync"
|
2018-10-19 16:04:07 +00:00
|
|
|
"sync/atomic"
|
2014-06-16 21:36:12 +00:00
|
|
|
"time"
|
|
|
|
|
2019-07-30 21:47:39 +00:00
|
|
|
"github.com/armon/go-metrics"
|
2020-11-14 00:26:08 +00:00
|
|
|
"github.com/armon/go-metrics/prometheus"
|
2020-01-28 23:50:41 +00:00
|
|
|
"github.com/hashicorp/go-hclog"
|
2019-07-30 21:47:39 +00:00
|
|
|
"github.com/hashicorp/go-uuid"
|
|
|
|
"github.com/hashicorp/go-version"
|
2014-01-10 20:55:55 +00:00
|
|
|
"github.com/hashicorp/raft"
|
2014-01-09 23:49:09 +00:00
|
|
|
"github.com/hashicorp/serf/serf"
|
2018-10-19 16:04:07 +00:00
|
|
|
"golang.org/x/time/rate"
|
2021-04-08 22:58:15 +00:00
|
|
|
|
|
|
|
"github.com/hashicorp/consul/acl"
|
|
|
|
"github.com/hashicorp/consul/agent/metadata"
|
|
|
|
"github.com/hashicorp/consul/agent/structs"
|
|
|
|
"github.com/hashicorp/consul/api"
|
|
|
|
"github.com/hashicorp/consul/lib"
|
|
|
|
"github.com/hashicorp/consul/logging"
|
|
|
|
"github.com/hashicorp/consul/types"
|
2014-01-09 23:49:09 +00:00
|
|
|
)
|
|
|
|
|
2020-11-14 00:26:08 +00:00
|
|
|
var LeaderSummaries = []prometheus.SummaryDefinition{
|
|
|
|
{
|
|
|
|
Name: []string{"leader", "barrier"},
|
2020-11-16 19:02:11 +00:00
|
|
|
Help: "Measures the time spent waiting for the raft barrier upon gaining leadership.",
|
2020-11-14 00:26:08 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: []string{"leader", "reconcileMember"},
|
2020-11-16 19:02:11 +00:00
|
|
|
Help: "Measures the time spent updating the raft store for a single serf member's information.",
|
2020-11-14 00:26:08 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: []string{"leader", "reapTombstones"},
|
2020-11-16 19:02:11 +00:00
|
|
|
Help: "Measures the time spent clearing tombstones.",
|
2020-11-14 00:26:08 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2014-01-09 23:49:09 +00:00
|
|
|
const (
|
2017-07-14 05:33:47 +00:00
|
|
|
newLeaderEvent = "consul:new-leader"
|
|
|
|
barrierWriteTimeout = 2 * time.Minute
|
2014-01-09 23:49:09 +00:00
|
|
|
)
|
|
|
|
|
2018-06-21 22:42:28 +00:00
|
|
|
var (
|
|
|
|
// caRootPruneInterval is how often we check for stale CARoots to remove.
|
|
|
|
caRootPruneInterval = time.Hour
|
|
|
|
|
2019-04-26 18:25:03 +00:00
|
|
|
// minCentralizedConfigVersion is the minimum Consul version in which centralized
|
|
|
|
// config is supported
|
|
|
|
minCentralizedConfigVersion = version.Must(version.NewVersion("1.5.0"))
|
2018-06-21 22:42:28 +00:00
|
|
|
)
|
2017-12-12 00:38:52 +00:00
|
|
|
|
2014-01-09 23:49:09 +00:00
|
|
|
// monitorLeadership is used to monitor if we acquire or lose our role
|
|
|
|
// as the leader in the Raft cluster. There is some work the leader is
|
|
|
|
// expected to do, so we must react to changes
|
|
|
|
func (s *Server) monitorLeadership() {
|
2017-04-13 21:17:32 +00:00
|
|
|
// We use the notify channel we configured Raft with, NOT Raft's
|
|
|
|
// leaderCh, which is only notified best-effort. Doing this ensures
|
|
|
|
// that we get all notifications in order, which is required for
|
|
|
|
// cleanup and to ensure we never run multiple leader loops.
|
2017-07-06 14:09:21 +00:00
|
|
|
raftNotifyCh := s.raftNotifyCh
|
2017-04-13 21:17:32 +00:00
|
|
|
|
2017-10-06 14:54:49 +00:00
|
|
|
var weAreLeaderCh chan struct{}
|
|
|
|
var leaderLoop sync.WaitGroup
|
2014-01-09 23:49:09 +00:00
|
|
|
for {
|
|
|
|
select {
|
2017-07-05 22:09:18 +00:00
|
|
|
case isLeader := <-raftNotifyCh:
|
2017-10-06 14:54:49 +00:00
|
|
|
switch {
|
|
|
|
case isLeader:
|
|
|
|
if weAreLeaderCh != nil {
|
2020-01-28 23:50:41 +00:00
|
|
|
s.logger.Error("attempted to start the leader loop while running")
|
2017-10-06 14:54:49 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
weAreLeaderCh = make(chan struct{})
|
|
|
|
leaderLoop.Add(1)
|
|
|
|
go func(ch chan struct{}) {
|
|
|
|
defer leaderLoop.Done()
|
|
|
|
s.leaderLoop(ch)
|
|
|
|
}(weAreLeaderCh)
|
2020-01-28 23:50:41 +00:00
|
|
|
s.logger.Info("cluster leadership acquired")
|
2017-10-06 14:54:49 +00:00
|
|
|
|
|
|
|
default:
|
|
|
|
if weAreLeaderCh == nil {
|
2020-01-28 23:50:41 +00:00
|
|
|
s.logger.Error("attempted to stop the leader loop while not running")
|
2017-10-06 14:54:49 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2020-01-28 23:50:41 +00:00
|
|
|
s.logger.Debug("shutting down leader loop")
|
2017-10-06 14:54:49 +00:00
|
|
|
close(weAreLeaderCh)
|
|
|
|
leaderLoop.Wait()
|
|
|
|
weAreLeaderCh = nil
|
2020-01-28 23:50:41 +00:00
|
|
|
s.logger.Info("cluster leadership lost")
|
2014-01-09 23:49:09 +00:00
|
|
|
}
|
|
|
|
case <-s.shutdownCh:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-19 12:50:48 +00:00
|
|
|
func (s *Server) leadershipTransfer() error {
|
|
|
|
retryCount := 3
|
|
|
|
for i := 0; i < retryCount; i++ {
|
|
|
|
future := s.raft.LeadershipTransfer()
|
|
|
|
if err := future.Error(); err != nil {
|
2020-01-28 23:50:41 +00:00
|
|
|
s.logger.Error("failed to transfer leadership attempt, will retry",
|
|
|
|
"attempt", i,
|
|
|
|
"retry_limit", retryCount,
|
|
|
|
"error", err,
|
|
|
|
)
|
2019-06-19 12:50:48 +00:00
|
|
|
} else {
|
2020-01-28 23:50:41 +00:00
|
|
|
s.logger.Info("successfully transferred leadership",
|
|
|
|
"attempt", i,
|
|
|
|
"retry_limit", retryCount,
|
|
|
|
)
|
2019-06-19 12:50:48 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
return fmt.Errorf("failed to transfer leadership in %d attempts", retryCount)
|
|
|
|
}
|
|
|
|
|
2014-01-09 23:49:09 +00:00
|
|
|
// leaderLoop runs as long as we are the leader to run various
|
2015-09-15 12:22:08 +00:00
|
|
|
// maintenance activities
|
2014-01-09 23:49:09 +00:00
|
|
|
func (s *Server) leaderLoop(stopCh chan struct{}) {
|
2020-09-25 17:46:38 +00:00
|
|
|
stopCtx := &lib.StopChannelContext{StopCh: stopCh}
|
|
|
|
|
2014-02-19 20:36:27 +00:00
|
|
|
// Fire a user event indicating a new leader
|
|
|
|
payload := []byte(s.config.NodeName)
|
2017-08-14 14:36:07 +00:00
|
|
|
for name, segment := range s.LANSegments() {
|
|
|
|
if err := segment.UserEvent(newLeaderEvent, payload, false); err != nil {
|
2020-01-28 23:50:41 +00:00
|
|
|
s.logger.Warn("failed to broadcast new leader event on segment",
|
|
|
|
"segment", name,
|
|
|
|
"error", err,
|
|
|
|
)
|
2017-08-14 14:36:07 +00:00
|
|
|
}
|
2014-02-19 20:36:27 +00:00
|
|
|
}
|
|
|
|
|
2014-01-10 20:55:55 +00:00
|
|
|
// Reconcile channel is only used once initial reconcile
|
|
|
|
// has succeeded
|
|
|
|
var reconcileCh chan serf.Member
|
2014-12-13 05:42:24 +00:00
|
|
|
establishedLeader := false
|
2014-01-10 20:55:55 +00:00
|
|
|
|
2014-01-09 23:49:09 +00:00
|
|
|
RECONCILE:
|
2014-01-10 20:55:55 +00:00
|
|
|
// Setup a reconciliation timer
|
|
|
|
reconcileCh = nil
|
|
|
|
interval := time.After(s.config.ReconcileInterval)
|
|
|
|
|
2014-01-09 23:49:09 +00:00
|
|
|
// Apply a raft barrier to ensure our FSM is caught up
|
2014-02-20 23:16:26 +00:00
|
|
|
start := time.Now()
|
2017-07-05 22:09:18 +00:00
|
|
|
barrier := s.raft.Barrier(barrierWriteTimeout)
|
2014-01-09 23:49:09 +00:00
|
|
|
if err := barrier.Error(); err != nil {
|
2020-01-28 23:50:41 +00:00
|
|
|
s.logger.Error("failed to wait for barrier", "error", err)
|
2017-10-06 14:54:49 +00:00
|
|
|
goto WAIT
|
2014-01-09 23:49:09 +00:00
|
|
|
}
|
2017-10-04 23:43:27 +00:00
|
|
|
metrics.MeasureSince([]string{"leader", "barrier"}, start)
|
2014-01-09 23:49:09 +00:00
|
|
|
|
2014-12-13 05:42:24 +00:00
|
|
|
// Check if we need to handle initial leadership actions
|
|
|
|
if !establishedLeader {
|
2020-09-25 17:46:38 +00:00
|
|
|
if err := s.establishLeadership(stopCtx); err != nil {
|
2020-01-28 23:50:41 +00:00
|
|
|
s.logger.Error("failed to establish leadership", "error", err)
|
2018-02-21 18:33:22 +00:00
|
|
|
// Immediately revoke leadership since we didn't successfully
|
|
|
|
// establish leadership.
|
2019-06-19 12:50:48 +00:00
|
|
|
s.revokeLeadership()
|
|
|
|
|
|
|
|
// attempt to transfer leadership. If successful it is
|
|
|
|
// time to leave the leaderLoop since this node is no
|
|
|
|
// longer the leader. If leadershipTransfer() fails, we
|
|
|
|
// will try to acquire it again after
|
|
|
|
// 5 seconds.
|
|
|
|
if err := s.leadershipTransfer(); err != nil {
|
2020-01-28 23:50:41 +00:00
|
|
|
s.logger.Error("failed to transfer leadership", "error", err)
|
2019-06-19 12:50:48 +00:00
|
|
|
interval = time.After(5 * time.Second)
|
|
|
|
goto WAIT
|
2018-02-21 18:33:22 +00:00
|
|
|
}
|
2019-06-19 12:50:48 +00:00
|
|
|
return
|
2014-12-13 05:42:24 +00:00
|
|
|
}
|
|
|
|
establishedLeader = true
|
2019-06-19 12:50:48 +00:00
|
|
|
defer s.revokeLeadership()
|
2014-12-13 05:42:24 +00:00
|
|
|
}
|
|
|
|
|
2014-01-09 23:49:09 +00:00
|
|
|
// Reconcile any missing data
|
|
|
|
if err := s.reconcile(); err != nil {
|
2020-01-28 23:50:41 +00:00
|
|
|
s.logger.Error("failed to reconcile", "error", err)
|
2014-01-09 23:49:09 +00:00
|
|
|
goto WAIT
|
|
|
|
}
|
|
|
|
|
2014-01-10 20:55:55 +00:00
|
|
|
// Initial reconcile worked, now we can process the channel
|
|
|
|
// updates
|
|
|
|
reconcileCh = s.reconcileCh
|
|
|
|
|
2014-01-09 23:49:09 +00:00
|
|
|
WAIT:
|
2017-10-06 14:54:49 +00:00
|
|
|
// Poll the stop channel to give it priority so we don't waste time
|
|
|
|
// trying to perform the other operations if we have been asked to shut
|
|
|
|
// down.
|
|
|
|
select {
|
|
|
|
case <-stopCh:
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
2014-01-10 20:55:55 +00:00
|
|
|
// Periodically reconcile as long as we are the leader,
|
|
|
|
// or when Serf events arrive
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-stopCh:
|
|
|
|
return
|
|
|
|
case <-s.shutdownCh:
|
|
|
|
return
|
|
|
|
case <-interval:
|
|
|
|
goto RECONCILE
|
|
|
|
case member := <-reconcileCh:
|
|
|
|
s.reconcileMember(member)
|
2014-12-15 23:04:21 +00:00
|
|
|
case index := <-s.tombstoneGC.ExpireCh():
|
|
|
|
go s.reapTombstones(index)
|
2017-05-04 18:52:22 +00:00
|
|
|
case errCh := <-s.reassertLeaderCh:
|
2019-06-19 12:50:48 +00:00
|
|
|
// we can get into this state when the initial
|
|
|
|
// establishLeadership has failed as well as the follow
|
|
|
|
// up leadershipTransfer. Afterwards we will be waiting
|
|
|
|
// for the interval to trigger a reconciliation and can
|
|
|
|
// potentially end up here. There is no point to
|
|
|
|
// reassert because this agent was never leader in the
|
|
|
|
// first place.
|
|
|
|
if !establishedLeader {
|
|
|
|
errCh <- fmt.Errorf("leadership has not been established")
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// continue to reassert only if we previously were the
|
|
|
|
// leader, which means revokeLeadership followed by an
|
|
|
|
// establishLeadership().
|
|
|
|
s.revokeLeadership()
|
2020-09-25 17:46:38 +00:00
|
|
|
err := s.establishLeadership(stopCtx)
|
2019-06-19 12:50:48 +00:00
|
|
|
errCh <- err
|
|
|
|
|
|
|
|
// in case establishLeadership failed, we will try to
|
|
|
|
// transfer leadership. At this time raft thinks we are
|
|
|
|
// the leader, but consul disagrees.
|
|
|
|
if err != nil {
|
|
|
|
if err := s.leadershipTransfer(); err != nil {
|
|
|
|
// establishedLeader was true before,
|
|
|
|
// but it no longer is since it revoked
|
|
|
|
// leadership and Leadership transfer
|
|
|
|
// also failed. Which is why it stays
|
|
|
|
// in the leaderLoop, but now
|
|
|
|
// establishedLeader needs to be set to
|
|
|
|
// false.
|
|
|
|
establishedLeader = false
|
|
|
|
interval = time.After(5 * time.Second)
|
|
|
|
goto WAIT
|
|
|
|
}
|
|
|
|
|
|
|
|
// leadershipTransfer was successful and it is
|
|
|
|
// time to leave the leaderLoop.
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2014-01-10 20:55:55 +00:00
|
|
|
}
|
2014-01-09 23:49:09 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-12-13 05:42:24 +00:00
|
|
|
// establishLeadership is invoked once we become leader and are able
|
|
|
|
// to invoke an initial barrier. The barrier is used to ensure any
|
2015-09-11 19:24:54 +00:00
|
|
|
// previously inflight transactions have been committed and that our
|
2014-12-13 05:42:24 +00:00
|
|
|
// state is up-to-date.
|
2020-09-25 17:46:38 +00:00
|
|
|
func (s *Server) establishLeadership(ctx context.Context) error {
|
2020-10-06 18:24:05 +00:00
|
|
|
start := time.Now()
|
2021-09-22 23:34:14 +00:00
|
|
|
if err := s.initializeACLs(ctx); err != nil {
|
2017-08-03 00:05:18 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2014-12-15 22:37:49 +00:00
|
|
|
// Hint the tombstone expiration timer. When we freshly establish leadership
|
|
|
|
// we become the authoritative timer, and so we need to start the clock
|
|
|
|
// on any pending GC events.
|
2015-01-05 22:58:59 +00:00
|
|
|
s.tombstoneGC.SetEnabled(true)
|
2014-12-15 22:37:49 +00:00
|
|
|
lastIndex := s.raft.LastIndex()
|
|
|
|
s.tombstoneGC.Hint(lastIndex)
|
2014-12-13 05:42:24 +00:00
|
|
|
|
|
|
|
// Setup the session timers. This is done both when starting up or when
|
|
|
|
// a leader fail over happens. Since the timers are maintained by the leader
|
|
|
|
// node along, effectively this means all the timers are renewed at the
|
|
|
|
// time of failover. The TTL contract is that the session will not be expired
|
|
|
|
// before the TTL, so expiring it later is allowable.
|
|
|
|
//
|
|
|
|
// This MUST be done after the initial barrier to ensure the latest Sessions
|
|
|
|
// are available to be initialized. Otherwise initialization may use stale
|
|
|
|
// data.
|
|
|
|
if err := s.initializeSessionTimers(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-02-24 04:32:13 +00:00
|
|
|
|
2021-05-20 14:07:23 +00:00
|
|
|
if err := s.establishEnterpriseLeadership(ctx); err != nil {
|
2019-09-24 00:09:56 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-04-13 01:38:36 +00:00
|
|
|
s.getOrCreateAutopilotConfig()
|
2020-09-25 17:46:38 +00:00
|
|
|
s.autopilot.Start(ctx)
|
2018-04-09 04:57:32 +00:00
|
|
|
|
2021-05-20 14:07:23 +00:00
|
|
|
s.startConfigReplication(ctx)
|
2019-04-26 17:38:39 +00:00
|
|
|
|
2021-05-20 14:07:23 +00:00
|
|
|
s.startFederationStateReplication(ctx)
|
2020-03-09 20:59:02 +00:00
|
|
|
|
2021-05-20 14:07:23 +00:00
|
|
|
s.startFederationStateAntiEntropy(ctx)
|
2020-03-09 20:59:02 +00:00
|
|
|
|
2021-05-20 14:07:23 +00:00
|
|
|
if err := s.startConnectLeader(ctx); err != nil {
|
2020-10-06 18:24:05 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Attempt to bootstrap config entries. We wait until after starting the
|
|
|
|
// Connect leader tasks so we hopefully have transitioned to supporting
|
|
|
|
// service-intentions.
|
|
|
|
if err := s.bootstrapConfigEntries(s.config.ConfigEntryBootstrap); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2018-10-15 16:17:48 +00:00
|
|
|
|
2017-06-16 03:41:30 +00:00
|
|
|
s.setConsistentReadReady()
|
2020-10-06 18:24:05 +00:00
|
|
|
|
|
|
|
s.logger.Debug("successfully established leadership", "duration", time.Since(start))
|
2014-12-13 05:42:24 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-01-05 22:58:59 +00:00
|
|
|
// revokeLeadership is invoked once we step down as leader.
|
|
|
|
// This is used to cleanup any state that may be specific to a leader.
|
2019-06-19 12:50:48 +00:00
|
|
|
func (s *Server) revokeLeadership() {
|
2015-01-05 22:58:59 +00:00
|
|
|
// Disable the tombstone GC, since it is only useful as a leader
|
|
|
|
s.tombstoneGC.SetEnabled(false)
|
|
|
|
|
|
|
|
// Clear the session timers on either shutdown or step down, since we
|
|
|
|
// are no longer responsible for session expirations.
|
2019-06-19 12:50:48 +00:00
|
|
|
s.clearAllSessionTimers()
|
2017-03-01 22:04:40 +00:00
|
|
|
|
2019-09-24 00:09:56 +00:00
|
|
|
s.revokeEnterpriseLeadership()
|
|
|
|
|
2020-03-09 20:59:02 +00:00
|
|
|
s.stopFederationStateAntiEntropy()
|
|
|
|
|
|
|
|
s.stopFederationStateReplication()
|
|
|
|
|
2019-04-26 17:38:39 +00:00
|
|
|
s.stopConfigReplication()
|
|
|
|
|
2019-06-24 18:21:51 +00:00
|
|
|
s.stopConnectLeader()
|
2018-10-15 16:17:48 +00:00
|
|
|
|
2019-04-08 17:05:51 +00:00
|
|
|
s.stopACLTokenReaping()
|
|
|
|
|
2018-10-19 16:04:07 +00:00
|
|
|
s.stopACLUpgrade()
|
|
|
|
|
2017-06-16 03:41:30 +00:00
|
|
|
s.resetConsistentReadReady()
|
2019-06-19 12:50:48 +00:00
|
|
|
|
2021-01-27 16:14:52 +00:00
|
|
|
// Stop returns a chan and we want to block until it is closed
|
|
|
|
// which indicates that autopilot is actually stopped.
|
|
|
|
<-s.autopilot.Stop()
|
2015-01-05 22:58:59 +00:00
|
|
|
}
|
|
|
|
|
2018-10-19 16:04:07 +00:00
|
|
|
// initializeACLs is used to setup the ACLs if we are the leader
|
|
|
|
// and need to do this.
|
2021-09-22 23:34:14 +00:00
|
|
|
func (s *Server) initializeACLs(ctx context.Context) error {
|
2020-07-03 20:52:08 +00:00
|
|
|
if !s.config.ACLsEnabled {
|
2018-10-19 16:04:07 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Purge the cache, since it could've changed while we were not the
|
|
|
|
// leader.
|
|
|
|
s.acls.cache.Purge()
|
|
|
|
|
2019-04-26 17:49:28 +00:00
|
|
|
// Purge the auth method validators since they could've changed while we
|
|
|
|
// were not leader.
|
2019-10-24 18:38:09 +00:00
|
|
|
s.aclAuthMethodValidators.Purge()
|
2019-04-26 17:49:28 +00:00
|
|
|
|
2019-03-04 14:52:45 +00:00
|
|
|
// Remove any token affected by CVE-2019-8336
|
|
|
|
if !s.InACLDatacenter() {
|
2019-10-24 18:38:09 +00:00
|
|
|
_, token, err := s.fsm.State().ACLTokenGetBySecret(nil, redactedToken, nil)
|
2019-03-04 14:52:45 +00:00
|
|
|
if err == nil && token != nil {
|
|
|
|
req := structs.ACLTokenBatchDeleteRequest{
|
|
|
|
TokenIDs: []string{token.AccessorID},
|
|
|
|
}
|
|
|
|
|
|
|
|
_, err := s.raftApply(structs.ACLTokenDeleteRequestType, &req)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to remove token with a redacted secret: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-19 16:04:07 +00:00
|
|
|
if s.InACLDatacenter() {
|
2020-01-28 23:50:41 +00:00
|
|
|
s.logger.Info("initializing acls")
|
2018-10-19 16:04:07 +00:00
|
|
|
|
2021-08-19 20:09:42 +00:00
|
|
|
// TODO(partitions): initialize acls in all of the partitions?
|
|
|
|
|
2019-10-15 20:58:50 +00:00
|
|
|
// Create/Upgrade the builtin global-management policy
|
2021-07-22 18:20:45 +00:00
|
|
|
_, policy, err := s.fsm.State().ACLPolicyGetByID(nil, structs.ACLPolicyGlobalManagementID, structs.DefaultEnterpriseMetaInDefaultPartition())
|
2018-10-19 16:04:07 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to get the builtin global-management policy")
|
|
|
|
}
|
2019-10-15 20:58:50 +00:00
|
|
|
if policy == nil || policy.Rules != structs.ACLPolicyGlobalManagement {
|
|
|
|
newPolicy := structs.ACLPolicy{
|
2019-12-06 19:01:34 +00:00
|
|
|
ID: structs.ACLPolicyGlobalManagementID,
|
|
|
|
Name: "global-management",
|
|
|
|
Description: "Builtin Policy that grants unlimited access",
|
|
|
|
Rules: structs.ACLPolicyGlobalManagement,
|
|
|
|
Syntax: acl.SyntaxCurrent,
|
2021-07-22 18:20:45 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
2018-10-19 16:04:07 +00:00
|
|
|
}
|
2019-10-15 20:58:50 +00:00
|
|
|
if policy != nil {
|
|
|
|
newPolicy.Name = policy.Name
|
|
|
|
newPolicy.Description = policy.Description
|
|
|
|
}
|
|
|
|
|
|
|
|
newPolicy.SetHash(true)
|
2018-10-19 16:04:07 +00:00
|
|
|
|
2018-10-31 20:00:46 +00:00
|
|
|
req := structs.ACLPolicyBatchSetRequest{
|
2019-10-15 20:58:50 +00:00
|
|
|
Policies: structs.ACLPolicies{&newPolicy},
|
2018-10-19 16:04:07 +00:00
|
|
|
}
|
2018-10-31 20:00:46 +00:00
|
|
|
_, err := s.raftApply(structs.ACLPolicySetRequestType, &req)
|
2018-10-19 16:04:07 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to create global-management policy: %v", err)
|
|
|
|
}
|
2020-01-28 23:50:41 +00:00
|
|
|
s.logger.Info("Created ACL 'global-management' policy")
|
2018-10-19 16:04:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Check for configured master token.
|
|
|
|
if master := s.config.ACLMasterToken; len(master) > 0 {
|
|
|
|
state := s.fsm.State()
|
|
|
|
if _, err := uuid.ParseUUID(master); err != nil {
|
2020-01-28 23:50:41 +00:00
|
|
|
s.logger.Warn("Configuring a non-UUID master token is deprecated")
|
2018-10-19 16:04:07 +00:00
|
|
|
}
|
|
|
|
|
2019-10-24 18:38:09 +00:00
|
|
|
_, token, err := state.ACLTokenGetBySecret(nil, master, nil)
|
2018-10-19 16:04:07 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to get master token: %v", err)
|
|
|
|
}
|
2019-04-08 17:05:51 +00:00
|
|
|
// Ignoring expiration times to avoid an insertion collision.
|
2018-10-19 16:04:07 +00:00
|
|
|
if token == nil {
|
|
|
|
accessor, err := lib.GenerateUUID(s.checkTokenUUID)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to generate the accessor ID for the master token: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
token := structs.ACLToken{
|
|
|
|
AccessorID: accessor,
|
|
|
|
SecretID: master,
|
|
|
|
Description: "Master Token",
|
|
|
|
Policies: []structs.ACLTokenPolicyLink{
|
|
|
|
{
|
|
|
|
ID: structs.ACLPolicyGlobalManagementID,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
CreateTime: time.Now(),
|
|
|
|
Local: false,
|
|
|
|
|
|
|
|
// DEPRECATED (ACL-Legacy-Compat) - only needed for compatibility
|
2019-12-06 19:01:34 +00:00
|
|
|
Type: structs.ACLTokenTypeManagement,
|
2021-07-22 18:20:45 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
2018-10-19 16:04:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
token.SetHash(true)
|
|
|
|
|
|
|
|
done := false
|
|
|
|
if canBootstrap, _, err := state.CanBootstrapACLToken(); err == nil && canBootstrap {
|
|
|
|
req := structs.ACLTokenBootstrapRequest{
|
|
|
|
Token: token,
|
|
|
|
ResetIndex: 0,
|
|
|
|
}
|
|
|
|
if _, err := s.raftApply(structs.ACLBootstrapRequestType, &req); err == nil {
|
2020-01-28 23:50:41 +00:00
|
|
|
s.logger.Info("Bootstrapped ACL master token from configuration")
|
2018-10-19 16:04:07 +00:00
|
|
|
done = true
|
|
|
|
} else {
|
|
|
|
if err.Error() != structs.ACLBootstrapNotAllowedErr.Error() &&
|
|
|
|
err.Error() != structs.ACLBootstrapInvalidResetIndexErr.Error() {
|
|
|
|
return fmt.Errorf("failed to bootstrap master token: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if !done {
|
|
|
|
// either we didn't attempt to or setting the token with a bootstrap request failed.
|
2018-10-31 20:00:46 +00:00
|
|
|
req := structs.ACLTokenBatchSetRequest{
|
2018-10-19 16:04:07 +00:00
|
|
|
Tokens: structs.ACLTokens{&token},
|
2018-10-31 20:00:46 +00:00
|
|
|
CAS: false,
|
2018-10-19 16:04:07 +00:00
|
|
|
}
|
2018-10-31 20:00:46 +00:00
|
|
|
if _, err := s.raftApply(structs.ACLTokenSetRequestType, &req); err != nil {
|
2018-10-19 16:04:07 +00:00
|
|
|
return fmt.Errorf("failed to create master token: %v", err)
|
|
|
|
}
|
|
|
|
|
2020-01-28 23:50:41 +00:00
|
|
|
s.logger.Info("Created ACL master token from configuration")
|
2018-10-19 16:04:07 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
state := s.fsm.State()
|
2019-10-24 18:38:09 +00:00
|
|
|
_, token, err := state.ACLTokenGetBySecret(nil, structs.ACLTokenAnonymousID, nil)
|
2018-10-19 16:04:07 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to get anonymous token: %v", err)
|
|
|
|
}
|
2019-04-08 17:05:51 +00:00
|
|
|
// Ignoring expiration times to avoid an insertion collision.
|
2018-10-19 16:04:07 +00:00
|
|
|
if token == nil {
|
|
|
|
// DEPRECATED (ACL-Legacy-Compat) - Don't need to query for previous "anonymous" token
|
|
|
|
// check for legacy token that needs an upgrade
|
2019-10-24 18:38:09 +00:00
|
|
|
_, legacyToken, err := state.ACLTokenGetBySecret(nil, anonymousToken, nil)
|
2018-10-19 16:04:07 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to get anonymous token: %v", err)
|
|
|
|
}
|
2019-04-08 17:05:51 +00:00
|
|
|
// Ignoring expiration times to avoid an insertion collision.
|
2018-10-19 16:04:07 +00:00
|
|
|
|
|
|
|
// the token upgrade routine will take care of upgrading the token if a legacy version exists
|
|
|
|
if legacyToken == nil {
|
|
|
|
token = &structs.ACLToken{
|
2019-12-06 19:01:34 +00:00
|
|
|
AccessorID: structs.ACLTokenAnonymousID,
|
|
|
|
SecretID: anonymousToken,
|
|
|
|
Description: "Anonymous Token",
|
|
|
|
CreateTime: time.Now(),
|
2021-07-22 18:20:45 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
2018-10-19 16:04:07 +00:00
|
|
|
}
|
|
|
|
token.SetHash(true)
|
|
|
|
|
2018-10-31 20:00:46 +00:00
|
|
|
req := structs.ACLTokenBatchSetRequest{
|
|
|
|
Tokens: structs.ACLTokens{token},
|
|
|
|
CAS: false,
|
2018-10-19 16:04:07 +00:00
|
|
|
}
|
2018-10-31 20:00:46 +00:00
|
|
|
_, err := s.raftApply(structs.ACLTokenSetRequestType, &req)
|
2018-10-19 16:04:07 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to create anonymous token: %v", err)
|
|
|
|
}
|
2020-01-28 23:50:41 +00:00
|
|
|
s.logger.Info("Created ACL anonymous token from configuration")
|
2018-10-19 16:04:07 +00:00
|
|
|
}
|
|
|
|
}
|
2019-04-08 17:05:51 +00:00
|
|
|
// launch the upgrade go routine to generate accessors for everything
|
2021-05-20 14:07:23 +00:00
|
|
|
s.startACLUpgrade(ctx)
|
2018-10-19 16:04:07 +00:00
|
|
|
} else {
|
2021-05-20 14:07:23 +00:00
|
|
|
s.startACLReplication(ctx)
|
2018-10-19 16:04:07 +00:00
|
|
|
}
|
|
|
|
|
2021-05-20 14:07:23 +00:00
|
|
|
s.startACLTokenReaping(ctx)
|
2018-10-19 16:04:07 +00:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-09-29 20:14:36 +00:00
|
|
|
// legacyACLTokenUpgrade runs a single time to upgrade any tokens that may
|
|
|
|
// have been created immediately before the Consul upgrade, or any legacy tokens
|
|
|
|
// from a restored snapshot.
|
|
|
|
// TODO(ACL-Legacy-Compat): remove in phase 2
|
2019-10-04 17:08:45 +00:00
|
|
|
func (s *Server) legacyACLTokenUpgrade(ctx context.Context) error {
|
2021-09-29 20:14:36 +00:00
|
|
|
// aclUpgradeRateLimit is the number of batch upgrade requests per second allowed.
|
|
|
|
const aclUpgradeRateLimit rate.Limit = 1.0
|
|
|
|
|
|
|
|
// aclUpgradeBatchSize controls how many tokens we look at during each round of upgrading. Individual raft logs
|
|
|
|
// will be further capped using the aclBatchUpsertSize. This limit just prevents us from creating a single slice
|
|
|
|
// with all tokens in it.
|
|
|
|
const aclUpgradeBatchSize = 128
|
|
|
|
|
2019-10-04 17:08:45 +00:00
|
|
|
limiter := rate.NewLimiter(aclUpgradeRateLimit, int(aclUpgradeRateLimit))
|
|
|
|
for {
|
|
|
|
if err := limiter.Wait(ctx); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2018-10-19 16:04:07 +00:00
|
|
|
|
2019-10-04 17:08:45 +00:00
|
|
|
// actually run the upgrade here
|
|
|
|
state := s.fsm.State()
|
2021-09-29 20:14:36 +00:00
|
|
|
tokens, _, err := state.ACLTokenListUpgradeable(aclUpgradeBatchSize)
|
2019-10-04 17:08:45 +00:00
|
|
|
if err != nil {
|
2020-01-28 23:50:41 +00:00
|
|
|
s.logger.Warn("encountered an error while searching for tokens without accessor ids", "error", err)
|
2019-10-04 17:08:45 +00:00
|
|
|
}
|
|
|
|
// No need to check expiration time here, as that only exists for v2 tokens.
|
2018-10-19 16:04:07 +00:00
|
|
|
|
2019-10-04 17:08:45 +00:00
|
|
|
if len(tokens) == 0 {
|
2021-09-29 20:14:36 +00:00
|
|
|
// No new legacy tokens can be created, so we can exit
|
2021-09-29 21:36:43 +00:00
|
|
|
s.stopACLUpgrade() // required to prevent goroutine leak, according to TestAgentLeaks_Server
|
2021-09-29 20:14:36 +00:00
|
|
|
return nil
|
2019-10-04 17:08:45 +00:00
|
|
|
}
|
2018-10-19 16:04:07 +00:00
|
|
|
|
2019-10-04 17:08:45 +00:00
|
|
|
var newTokens structs.ACLTokens
|
|
|
|
for _, token := range tokens {
|
|
|
|
// This should be entirely unnecessary but is just a small safeguard against changing accessor IDs
|
|
|
|
if token.AccessorID != "" {
|
2018-10-19 16:04:07 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2019-10-04 17:08:45 +00:00
|
|
|
newToken := *token
|
|
|
|
if token.SecretID == anonymousToken {
|
|
|
|
newToken.AccessorID = structs.ACLTokenAnonymousID
|
|
|
|
} else {
|
|
|
|
accessor, err := lib.GenerateUUID(s.checkTokenUUID)
|
|
|
|
if err != nil {
|
2020-01-28 23:50:41 +00:00
|
|
|
s.logger.Warn("failed to generate accessor during token auto-upgrade", "error", err)
|
2018-10-19 16:04:07 +00:00
|
|
|
continue
|
|
|
|
}
|
2019-10-04 17:08:45 +00:00
|
|
|
newToken.AccessorID = accessor
|
|
|
|
}
|
2018-10-19 16:04:07 +00:00
|
|
|
|
2019-10-04 17:08:45 +00:00
|
|
|
// Assign the global-management policy to legacy management tokens
|
|
|
|
if len(newToken.Policies) == 0 &&
|
|
|
|
len(newToken.ServiceIdentities) == 0 &&
|
2020-06-16 16:54:27 +00:00
|
|
|
len(newToken.NodeIdentities) == 0 &&
|
2019-10-04 17:08:45 +00:00
|
|
|
len(newToken.Roles) == 0 &&
|
|
|
|
newToken.Type == structs.ACLTokenTypeManagement {
|
|
|
|
newToken.Policies = append(newToken.Policies, structs.ACLTokenPolicyLink{ID: structs.ACLPolicyGlobalManagementID})
|
|
|
|
}
|
2018-10-19 16:04:07 +00:00
|
|
|
|
2019-10-04 17:08:45 +00:00
|
|
|
// need to copy these as we are going to do a CAS operation.
|
|
|
|
newToken.CreateIndex = token.CreateIndex
|
|
|
|
newToken.ModifyIndex = token.ModifyIndex
|
2018-10-31 20:00:46 +00:00
|
|
|
|
2019-10-04 17:08:45 +00:00
|
|
|
newToken.SetHash(true)
|
2018-11-07 15:59:44 +00:00
|
|
|
|
2019-10-04 17:08:45 +00:00
|
|
|
newTokens = append(newTokens, &newToken)
|
|
|
|
}
|
2018-10-19 16:04:07 +00:00
|
|
|
|
2019-10-04 17:08:45 +00:00
|
|
|
req := &structs.ACLTokenBatchSetRequest{Tokens: newTokens, CAS: true}
|
2018-10-19 16:04:07 +00:00
|
|
|
|
2021-04-08 22:58:15 +00:00
|
|
|
_, err = s.raftApply(structs.ACLTokenSetRequestType, req)
|
2019-10-04 17:08:45 +00:00
|
|
|
if err != nil {
|
2020-01-28 23:50:41 +00:00
|
|
|
s.logger.Error("failed to apply acl token upgrade batch", "error", err)
|
2018-10-19 16:04:07 +00:00
|
|
|
}
|
2019-10-04 17:08:45 +00:00
|
|
|
}
|
2018-10-19 16:04:07 +00:00
|
|
|
}
|
|
|
|
|
2021-09-29 20:14:36 +00:00
|
|
|
// TODO(ACL-Legacy-Compat): remove in phase 2. Keeping it for now so that we
|
|
|
|
// can upgrade any tokens created immediately before the upgrade happens.
|
2021-05-20 14:07:23 +00:00
|
|
|
func (s *Server) startACLUpgrade(ctx context.Context) {
|
2019-10-04 17:08:45 +00:00
|
|
|
if s.config.PrimaryDatacenter != s.config.Datacenter {
|
|
|
|
// token upgrades should only run in the primary
|
2018-10-19 16:04:07 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2021-05-20 14:07:23 +00:00
|
|
|
s.leaderRoutineManager.Start(ctx, aclUpgradeRoutineName, s.legacyACLTokenUpgrade)
|
2018-10-19 16:04:07 +00:00
|
|
|
}
|
|
|
|
|
2019-10-04 17:08:45 +00:00
|
|
|
func (s *Server) stopACLUpgrade() {
|
|
|
|
s.leaderRoutineManager.Stop(aclUpgradeRoutineName)
|
|
|
|
}
|
2018-10-19 16:04:07 +00:00
|
|
|
|
2021-05-20 14:07:23 +00:00
|
|
|
func (s *Server) startACLReplication(ctx context.Context) {
|
2019-10-04 17:08:45 +00:00
|
|
|
if s.InACLDatacenter() {
|
|
|
|
return
|
|
|
|
}
|
2018-10-19 16:04:07 +00:00
|
|
|
|
2019-10-04 17:08:45 +00:00
|
|
|
// unlike some other leader routines this initializes some extra state
|
|
|
|
// and therefore we want to prevent re-initialization if things are already
|
|
|
|
// running
|
|
|
|
if s.leaderRoutineManager.IsRunning(aclPolicyReplicationRoutineName) {
|
2018-10-19 16:04:07 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
s.initReplicationStatus()
|
2021-05-20 14:07:23 +00:00
|
|
|
s.leaderRoutineManager.Start(ctx, aclPolicyReplicationRoutineName, s.runACLPolicyReplicator)
|
|
|
|
s.leaderRoutineManager.Start(ctx, aclRoleReplicationRoutineName, s.runACLRoleReplicator)
|
2018-10-19 16:04:07 +00:00
|
|
|
|
2019-04-15 20:43:19 +00:00
|
|
|
if s.config.ACLTokenReplication {
|
2021-05-20 14:07:23 +00:00
|
|
|
s.leaderRoutineManager.Start(ctx, aclTokenReplicationRoutineName, s.runACLTokenReplicator)
|
2019-04-15 20:43:19 +00:00
|
|
|
s.updateACLReplicationStatusRunning(structs.ACLReplicateTokens)
|
|
|
|
} else {
|
|
|
|
s.updateACLReplicationStatusRunning(structs.ACLReplicatePolicies)
|
|
|
|
}
|
|
|
|
}
|
2018-10-19 16:04:07 +00:00
|
|
|
|
2020-01-28 23:50:41 +00:00
|
|
|
type replicateFunc func(ctx context.Context, logger hclog.Logger, lastRemoteIndex uint64) (uint64, bool, error)
|
2019-04-15 20:43:19 +00:00
|
|
|
|
2019-10-04 17:08:45 +00:00
|
|
|
// This function is only intended to be run as a managed go routine, it will block until
|
|
|
|
// the context passed in indicates that it should exit.
|
|
|
|
func (s *Server) runACLPolicyReplicator(ctx context.Context) error {
|
2020-01-28 23:50:41 +00:00
|
|
|
policyLogger := s.aclReplicationLogger(structs.ACLReplicatePolicies.SingularNoun())
|
|
|
|
policyLogger.Info("started ACL Policy replication")
|
2021-04-22 15:20:53 +00:00
|
|
|
return s.runACLReplicator(ctx, policyLogger, structs.ACLReplicatePolicies, s.replicateACLPolicies, "acl-policies")
|
2019-10-04 17:08:45 +00:00
|
|
|
}
|
2018-10-19 16:04:07 +00:00
|
|
|
|
2019-10-04 17:08:45 +00:00
|
|
|
// This function is only intended to be run as a managed go routine, it will block until
|
|
|
|
// the context passed in indicates that it should exit.
|
|
|
|
func (s *Server) runACLRoleReplicator(ctx context.Context) error {
|
2020-01-28 23:50:41 +00:00
|
|
|
roleLogger := s.aclReplicationLogger(structs.ACLReplicateRoles.SingularNoun())
|
|
|
|
roleLogger.Info("started ACL Role replication")
|
2021-04-22 15:20:53 +00:00
|
|
|
return s.runACLReplicator(ctx, roleLogger, structs.ACLReplicateRoles, s.replicateACLRoles, "acl-roles")
|
2019-10-04 17:08:45 +00:00
|
|
|
}
|
2018-10-19 16:04:07 +00:00
|
|
|
|
2019-10-04 17:08:45 +00:00
|
|
|
// This function is only intended to be run as a managed go routine, it will block until
|
|
|
|
// the context passed in indicates that it should exit.
|
|
|
|
func (s *Server) runACLTokenReplicator(ctx context.Context) error {
|
2020-01-28 23:50:41 +00:00
|
|
|
tokenLogger := s.aclReplicationLogger(structs.ACLReplicateTokens.SingularNoun())
|
|
|
|
tokenLogger.Info("started ACL Token replication")
|
2021-04-22 15:20:53 +00:00
|
|
|
return s.runACLReplicator(ctx, tokenLogger, structs.ACLReplicateTokens, s.replicateACLTokens, "acl-tokens")
|
2019-10-04 17:08:45 +00:00
|
|
|
}
|
2018-10-19 16:04:07 +00:00
|
|
|
|
2019-10-04 17:08:45 +00:00
|
|
|
// This function is only intended to be run as a managed go routine, it will block until
|
|
|
|
// the context passed in indicates that it should exit.
|
2020-01-28 23:50:41 +00:00
|
|
|
func (s *Server) runACLReplicator(
|
|
|
|
ctx context.Context,
|
|
|
|
logger hclog.Logger,
|
|
|
|
replicationType structs.ACLReplicationType,
|
|
|
|
replicateFunc replicateFunc,
|
2021-04-22 15:20:53 +00:00
|
|
|
metricName string,
|
2020-01-28 23:50:41 +00:00
|
|
|
) error {
|
2019-10-04 17:08:45 +00:00
|
|
|
var failedAttempts uint
|
|
|
|
limiter := rate.NewLimiter(rate.Limit(s.config.ACLReplicationRate), s.config.ACLReplicationBurst)
|
2018-10-19 16:04:07 +00:00
|
|
|
|
2019-10-04 17:08:45 +00:00
|
|
|
var lastRemoteIndex uint64
|
|
|
|
for {
|
|
|
|
if err := limiter.Wait(ctx); err != nil {
|
|
|
|
return err
|
2018-10-19 16:04:07 +00:00
|
|
|
}
|
|
|
|
|
2019-10-04 17:08:45 +00:00
|
|
|
if s.tokens.ReplicationToken() == "" {
|
|
|
|
continue
|
|
|
|
}
|
2018-10-19 16:04:07 +00:00
|
|
|
|
2020-01-28 23:50:41 +00:00
|
|
|
index, exit, err := replicateFunc(ctx, logger, lastRemoteIndex)
|
2019-10-04 17:08:45 +00:00
|
|
|
if exit {
|
|
|
|
return nil
|
|
|
|
}
|
2018-10-19 16:04:07 +00:00
|
|
|
|
2019-10-04 17:08:45 +00:00
|
|
|
if err != nil {
|
2021-04-22 15:20:53 +00:00
|
|
|
metrics.SetGauge([]string{"leader", "replication", metricName, "status"},
|
|
|
|
0,
|
|
|
|
)
|
2019-10-04 17:08:45 +00:00
|
|
|
lastRemoteIndex = 0
|
2021-08-06 21:35:27 +00:00
|
|
|
s.updateACLReplicationStatusError(err.Error())
|
2020-01-28 23:50:41 +00:00
|
|
|
logger.Warn("ACL replication error (will retry if still leader)",
|
|
|
|
"error", err,
|
|
|
|
)
|
2019-10-04 17:08:45 +00:00
|
|
|
if (1 << failedAttempts) < aclReplicationMaxRetryBackoff {
|
|
|
|
failedAttempts++
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return nil
|
|
|
|
case <-time.After((1 << failedAttempts) * time.Second):
|
|
|
|
// do nothing
|
|
|
|
}
|
|
|
|
} else {
|
2021-04-22 15:20:53 +00:00
|
|
|
metrics.SetGauge([]string{"leader", "replication", metricName, "status"},
|
|
|
|
1,
|
|
|
|
)
|
|
|
|
metrics.SetGauge([]string{"leader", "replication", metricName, "index"},
|
|
|
|
float32(index),
|
|
|
|
)
|
2019-10-04 17:08:45 +00:00
|
|
|
lastRemoteIndex = index
|
|
|
|
s.updateACLReplicationStatusIndex(replicationType, index)
|
2020-01-28 23:50:41 +00:00
|
|
|
logger.Debug("ACL replication completed through remote index",
|
|
|
|
"index", index,
|
|
|
|
)
|
2019-10-04 17:08:45 +00:00
|
|
|
failedAttempts = 0
|
|
|
|
}
|
2018-10-19 16:04:07 +00:00
|
|
|
}
|
2019-10-04 17:08:45 +00:00
|
|
|
}
|
2018-10-19 16:04:07 +00:00
|
|
|
|
2020-01-28 23:50:41 +00:00
|
|
|
func (s *Server) aclReplicationLogger(singularNoun string) hclog.Logger {
|
|
|
|
return s.loggers.
|
|
|
|
Named(logging.Replication).
|
|
|
|
Named(logging.ACL).
|
|
|
|
Named(singularNoun)
|
|
|
|
}
|
|
|
|
|
2019-10-04 17:08:45 +00:00
|
|
|
func (s *Server) stopACLReplication() {
|
|
|
|
// these will be no-ops when not started
|
|
|
|
s.leaderRoutineManager.Stop(aclPolicyReplicationRoutineName)
|
|
|
|
s.leaderRoutineManager.Stop(aclRoleReplicationRoutineName)
|
|
|
|
s.leaderRoutineManager.Stop(aclTokenReplicationRoutineName)
|
2018-10-19 16:04:07 +00:00
|
|
|
}
|
|
|
|
|
2021-05-20 14:07:23 +00:00
|
|
|
func (s *Server) startConfigReplication(ctx context.Context) {
|
2019-04-26 17:38:39 +00:00
|
|
|
if s.config.PrimaryDatacenter == "" || s.config.PrimaryDatacenter == s.config.Datacenter {
|
|
|
|
// replication shouldn't run in the primary DC
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2021-05-20 14:07:23 +00:00
|
|
|
s.leaderRoutineManager.Start(ctx, configReplicationRoutineName, s.configReplicator.Run)
|
2019-04-26 17:38:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Server) stopConfigReplication() {
|
|
|
|
// will be a no-op when not started
|
2019-10-04 17:08:45 +00:00
|
|
|
s.leaderRoutineManager.Stop(configReplicationRoutineName)
|
2019-04-26 17:38:39 +00:00
|
|
|
}
|
|
|
|
|
2021-05-20 14:07:23 +00:00
|
|
|
func (s *Server) startFederationStateReplication(ctx context.Context) {
|
2020-03-09 20:59:02 +00:00
|
|
|
if s.config.PrimaryDatacenter == "" || s.config.PrimaryDatacenter == s.config.Datacenter {
|
|
|
|
// replication shouldn't run in the primary DC
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2021-01-22 16:03:24 +00:00
|
|
|
if s.gatewayLocator != nil {
|
|
|
|
s.gatewayLocator.SetUseReplicationSignal(true)
|
|
|
|
s.gatewayLocator.SetLastFederationStateReplicationError(nil, false)
|
|
|
|
}
|
|
|
|
|
2021-05-20 14:07:23 +00:00
|
|
|
s.leaderRoutineManager.Start(ctx, federationStateReplicationRoutineName, s.federationStateReplicator.Run)
|
2020-03-09 20:59:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Server) stopFederationStateReplication() {
|
|
|
|
// will be a no-op when not started
|
|
|
|
s.leaderRoutineManager.Stop(federationStateReplicationRoutineName)
|
2021-01-22 16:03:24 +00:00
|
|
|
|
|
|
|
if s.gatewayLocator != nil {
|
|
|
|
s.gatewayLocator.SetUseReplicationSignal(false)
|
|
|
|
s.gatewayLocator.SetLastFederationStateReplicationError(nil, false)
|
|
|
|
}
|
2020-03-09 20:59:02 +00:00
|
|
|
}
|
|
|
|
|
2017-04-13 01:38:36 +00:00
|
|
|
// getOrCreateAutopilotConfig is used to get the autopilot config, initializing it if necessary
|
2020-09-25 17:46:38 +00:00
|
|
|
func (s *Server) getOrCreateAutopilotConfig() *structs.AutopilotConfig {
|
2020-01-28 23:50:41 +00:00
|
|
|
logger := s.loggers.Named(logging.Autopilot)
|
2017-02-24 04:32:13 +00:00
|
|
|
state := s.fsm.State()
|
2017-02-24 21:08:49 +00:00
|
|
|
_, config, err := state.AutopilotConfig()
|
2017-02-24 04:32:13 +00:00
|
|
|
if err != nil {
|
2020-01-28 23:50:41 +00:00
|
|
|
logger.Error("failed to get config", "error", err)
|
2017-12-13 01:45:03 +00:00
|
|
|
return nil
|
2017-02-24 04:32:13 +00:00
|
|
|
}
|
|
|
|
if config != nil {
|
2017-12-13 01:45:03 +00:00
|
|
|
return config
|
2017-02-24 04:32:13 +00:00
|
|
|
}
|
|
|
|
|
2017-04-13 00:09:57 +00:00
|
|
|
config = s.config.AutopilotConfig
|
|
|
|
req := structs.AutopilotSetConfigRequest{Config: *config}
|
2017-02-24 21:08:49 +00:00
|
|
|
if _, err = s.raftApply(structs.AutopilotRequestType, req); err != nil {
|
2020-01-28 23:50:41 +00:00
|
|
|
logger.Error("failed to initialize config", "error", err)
|
2017-12-13 01:45:03 +00:00
|
|
|
return nil
|
2017-02-24 04:32:13 +00:00
|
|
|
}
|
|
|
|
|
2017-12-13 01:45:03 +00:00
|
|
|
return config
|
2017-02-24 04:32:13 +00:00
|
|
|
}
|
|
|
|
|
2019-04-26 18:25:03 +00:00
|
|
|
func (s *Server) bootstrapConfigEntries(entries []structs.ConfigEntry) error {
|
|
|
|
if s.config.PrimaryDatacenter != "" && s.config.PrimaryDatacenter != s.config.Datacenter {
|
|
|
|
// only bootstrap in the primary datacenter
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(entries) < 1 {
|
|
|
|
// nothing to initialize
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-03-27 16:31:43 +00:00
|
|
|
if ok, _ := ServersInDCMeetMinimumVersion(s, s.config.Datacenter, minCentralizedConfigVersion); !ok {
|
2020-01-28 23:50:41 +00:00
|
|
|
s.loggers.
|
|
|
|
Named(logging.CentralConfig).
|
|
|
|
Warn("config: can't initialize until all servers >=" + minCentralizedConfigVersion.String())
|
2019-04-26 18:25:03 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
state := s.fsm.State()
|
2020-10-06 18:24:05 +00:00
|
|
|
|
2020-11-13 22:19:12 +00:00
|
|
|
// Do some quick preflight checks to see if someone is doing something
|
|
|
|
// that's not allowed at this time:
|
|
|
|
//
|
|
|
|
// - Trying to upgrade from an older pre-1.9.0 version of consul with
|
|
|
|
// intentions AND are trying to bootstrap a service-intentions config entry
|
|
|
|
// at the same time.
|
|
|
|
//
|
|
|
|
// - Trying to insert service-intentions config entries when connect is
|
|
|
|
// disabled.
|
|
|
|
|
2020-10-06 18:24:05 +00:00
|
|
|
usingConfigEntries, err := s.fsm.State().AreIntentionsInConfigEntries()
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Failed to determine if we are migrating intentions yet: %v", err)
|
|
|
|
}
|
2020-11-13 22:19:12 +00:00
|
|
|
|
|
|
|
if !usingConfigEntries || !s.config.ConnectEnabled {
|
2020-10-06 18:24:05 +00:00
|
|
|
for _, entry := range entries {
|
|
|
|
if entry.GetKind() == structs.ServiceIntentions {
|
2020-11-13 22:19:12 +00:00
|
|
|
if !s.config.ConnectEnabled {
|
|
|
|
return fmt.Errorf("Refusing to apply configuration entry %q / %q because Connect must be enabled to bootstrap intentions",
|
|
|
|
entry.GetKind(), entry.GetName())
|
|
|
|
}
|
|
|
|
if !usingConfigEntries {
|
|
|
|
return fmt.Errorf("Refusing to apply configuration entry %q / %q because intentions are still being migrated to config entries",
|
|
|
|
entry.GetKind(), entry.GetName())
|
|
|
|
}
|
2020-10-06 18:24:05 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-26 18:25:03 +00:00
|
|
|
for _, entry := range entries {
|
2019-04-29 22:08:09 +00:00
|
|
|
// avoid a round trip through Raft if we know the CAS is going to fail
|
2020-01-24 15:04:58 +00:00
|
|
|
_, existing, err := state.ConfigEntry(nil, entry.GetKind(), entry.GetName(), entry.GetEnterpriseMeta())
|
2019-04-26 18:25:03 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Failed to determine whether the configuration for %q / %q already exists: %v", entry.GetKind(), entry.GetName(), err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if existing == nil {
|
2019-04-29 22:08:09 +00:00
|
|
|
// ensure the ModifyIndex is set to 0 for the CAS request
|
|
|
|
entry.GetRaftIndex().ModifyIndex = 0
|
|
|
|
|
2019-04-26 18:25:03 +00:00
|
|
|
req := structs.ConfigEntryRequest{
|
2019-04-29 22:08:09 +00:00
|
|
|
Op: structs.ConfigEntryUpsertCAS,
|
2019-04-26 18:25:03 +00:00
|
|
|
Datacenter: s.config.Datacenter,
|
|
|
|
Entry: entry,
|
|
|
|
}
|
|
|
|
|
2021-04-08 22:58:15 +00:00
|
|
|
_, err := s.raftApply(structs.ConfigEntryRequestType, &req)
|
2019-08-02 04:07:11 +00:00
|
|
|
if err != nil {
|
2019-04-26 18:25:03 +00:00
|
|
|
return fmt.Errorf("Failed to apply configuration entry %q / %q: %v", entry.GetKind(), entry.GetName(), err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2014-04-03 22:51:03 +00:00
|
|
|
// reconcileReaped is used to reconcile nodes that have failed and been reaped
|
2017-09-27 03:49:41 +00:00
|
|
|
// from Serf but remain in the catalog. This is done by looking for unknown nodes with serfHealth checks registered.
|
|
|
|
// We generate a "reap" event to cause the node to be cleaned up.
|
2021-08-19 20:09:42 +00:00
|
|
|
func (s *Server) reconcileReaped(known map[string]struct{}, nodeEntMeta *structs.EnterpriseMeta) error {
|
|
|
|
if nodeEntMeta == nil {
|
|
|
|
nodeEntMeta = structs.NodeEnterpriseMetaInDefaultPartition()
|
|
|
|
}
|
|
|
|
|
2015-10-13 05:21:39 +00:00
|
|
|
state := s.fsm.State()
|
2021-08-19 20:09:42 +00:00
|
|
|
_, checks, err := state.ChecksInState(nil, api.HealthAny, nodeEntMeta)
|
2015-10-12 07:42:09 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2014-10-14 18:04:43 +00:00
|
|
|
for _, check := range checks {
|
2014-04-03 22:51:03 +00:00
|
|
|
// Ignore any non serf checks
|
2017-07-14 05:33:47 +00:00
|
|
|
if check.CheckID != structs.SerfCheckID {
|
2014-04-03 22:51:03 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if this node is "known" by serf
|
|
|
|
if _, ok := known[check.Node]; ok {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get the node services, look for ConsulServiceID
|
2021-08-19 20:09:42 +00:00
|
|
|
_, services, err := state.NodeServices(nil, check.Node, nodeEntMeta)
|
2015-10-12 07:42:09 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2014-04-03 22:51:03 +00:00
|
|
|
serverPort := 0
|
2017-09-27 03:49:41 +00:00
|
|
|
serverAddr := ""
|
|
|
|
serverID := ""
|
|
|
|
|
|
|
|
CHECKS:
|
2014-04-03 22:51:03 +00:00
|
|
|
for _, service := range services.Services {
|
2017-07-14 05:33:47 +00:00
|
|
|
if service.ID == structs.ConsulServiceID {
|
2021-08-19 20:09:42 +00:00
|
|
|
_, node, err := state.GetNode(check.Node, nodeEntMeta)
|
2017-09-27 03:49:41 +00:00
|
|
|
if err != nil {
|
2020-01-28 23:50:41 +00:00
|
|
|
s.logger.Error("Unable to look up node with name", "name", check.Node, "error", err)
|
2017-09-27 03:49:41 +00:00
|
|
|
continue CHECKS
|
|
|
|
}
|
|
|
|
|
|
|
|
serverAddr = node.Address
|
2014-04-03 22:51:03 +00:00
|
|
|
serverPort = service.Port
|
2017-09-27 03:49:41 +00:00
|
|
|
lookupAddr := net.JoinHostPort(serverAddr, strconv.Itoa(serverPort))
|
|
|
|
svr := s.serverLookup.Server(raft.ServerAddress(lookupAddr))
|
|
|
|
if svr != nil {
|
|
|
|
serverID = svr.ID
|
|
|
|
}
|
2014-04-03 22:51:03 +00:00
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-09-27 03:49:41 +00:00
|
|
|
// Create a fake member
|
|
|
|
member := serf.Member{
|
|
|
|
Name: check.Node,
|
|
|
|
Tags: map[string]string{
|
|
|
|
"dc": s.config.Datacenter,
|
|
|
|
"role": "node",
|
|
|
|
},
|
|
|
|
}
|
2021-08-19 20:09:42 +00:00
|
|
|
addEnterpriseSerfTags(member.Tags, nodeEntMeta)
|
2017-09-27 03:49:41 +00:00
|
|
|
|
2014-04-03 22:51:03 +00:00
|
|
|
// Create the appropriate tags if this was a server node
|
|
|
|
if serverPort > 0 {
|
|
|
|
member.Tags["role"] = "consul"
|
|
|
|
member.Tags["port"] = strconv.FormatUint(uint64(serverPort), 10)
|
2017-09-27 03:49:41 +00:00
|
|
|
member.Tags["id"] = serverID
|
|
|
|
member.Addr = net.ParseIP(serverAddr)
|
2014-04-03 22:51:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Attempt to reap this member
|
2021-08-19 20:09:42 +00:00
|
|
|
if err := s.handleReapMember(member, nodeEntMeta); err != nil {
|
2014-04-03 22:51:03 +00:00
|
|
|
return err
|
|
|
|
}
|
2014-01-09 23:49:09 +00:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// reconcileMember is used to do an async reconcile of a single
|
|
|
|
// serf member
|
|
|
|
func (s *Server) reconcileMember(member serf.Member) error {
|
|
|
|
// Check if this is a member we should handle
|
|
|
|
if !s.shouldHandleMember(member) {
|
2021-08-19 20:09:42 +00:00
|
|
|
// TODO(partition): log the partition name
|
2020-01-28 23:50:41 +00:00
|
|
|
s.logger.Warn("skipping reconcile of node", "member", member)
|
2014-01-09 23:49:09 +00:00
|
|
|
return nil
|
|
|
|
}
|
2017-10-04 23:43:27 +00:00
|
|
|
defer metrics.MeasureSince([]string{"leader", "reconcileMember"}, time.Now())
|
2021-08-19 20:09:42 +00:00
|
|
|
|
|
|
|
nodeEntMeta := getSerfMemberEnterpriseMeta(member)
|
|
|
|
|
2014-01-09 23:49:09 +00:00
|
|
|
var err error
|
|
|
|
switch member.Status {
|
|
|
|
case serf.StatusAlive:
|
2021-08-19 20:09:42 +00:00
|
|
|
err = s.handleAliveMember(member, nodeEntMeta)
|
2014-01-09 23:49:09 +00:00
|
|
|
case serf.StatusFailed:
|
2021-08-19 20:09:42 +00:00
|
|
|
err = s.handleFailedMember(member, nodeEntMeta)
|
2014-01-09 23:49:09 +00:00
|
|
|
case serf.StatusLeft:
|
2021-08-19 20:09:42 +00:00
|
|
|
err = s.handleLeftMember(member, nodeEntMeta)
|
2014-03-20 19:51:49 +00:00
|
|
|
case StatusReap:
|
2021-08-19 20:09:42 +00:00
|
|
|
err = s.handleReapMember(member, nodeEntMeta)
|
2014-01-09 23:49:09 +00:00
|
|
|
}
|
|
|
|
if err != nil {
|
2020-01-28 23:50:41 +00:00
|
|
|
s.logger.Error("failed to reconcile member",
|
2021-08-19 20:09:42 +00:00
|
|
|
// TODO(partition): log the partition name
|
2020-01-28 23:50:41 +00:00
|
|
|
"member", member,
|
|
|
|
"error", err,
|
|
|
|
)
|
2014-12-01 04:05:15 +00:00
|
|
|
|
|
|
|
// Permission denied should not bubble up
|
2017-08-23 14:52:48 +00:00
|
|
|
if acl.IsErrPermissionDenied(err) {
|
2014-12-01 04:05:15 +00:00
|
|
|
return nil
|
|
|
|
}
|
2014-01-09 23:49:09 +00:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// shouldHandleMember checks if this is a Consul pool member
|
|
|
|
func (s *Server) shouldHandleMember(member serf.Member) bool {
|
|
|
|
if valid, dc := isConsulNode(member); valid && dc == s.config.Datacenter {
|
|
|
|
return true
|
|
|
|
}
|
2017-08-14 14:36:07 +00:00
|
|
|
if valid, parts := metadata.IsConsulServer(member); valid &&
|
|
|
|
parts.Segment == "" &&
|
|
|
|
parts.Datacenter == s.config.Datacenter {
|
2014-01-09 23:49:09 +00:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
// handleAliveMember is used to ensure the node
|
|
|
|
// is registered, with a passing health check.
|
2021-08-19 20:09:42 +00:00
|
|
|
func (s *Server) handleAliveMember(member serf.Member, nodeEntMeta *structs.EnterpriseMeta) error {
|
|
|
|
if nodeEntMeta == nil {
|
|
|
|
nodeEntMeta = structs.NodeEnterpriseMetaInDefaultPartition()
|
|
|
|
}
|
|
|
|
|
2014-01-10 01:59:31 +00:00
|
|
|
// Register consul service if a server
|
|
|
|
var service *structs.NodeService
|
2017-07-06 10:48:37 +00:00
|
|
|
if valid, parts := metadata.IsConsulServer(member); valid {
|
2014-01-10 01:59:31 +00:00
|
|
|
service = &structs.NodeService{
|
2017-07-14 05:33:47 +00:00
|
|
|
ID: structs.ConsulServiceID,
|
|
|
|
Service: structs.ConsulServiceName,
|
2014-01-20 23:39:07 +00:00
|
|
|
Port: parts.Port,
|
2019-06-26 21:46:47 +00:00
|
|
|
Weights: &structs.Weights{
|
|
|
|
Passing: 1,
|
|
|
|
Warning: 1,
|
|
|
|
},
|
2021-08-19 20:09:42 +00:00
|
|
|
EnterpriseMeta: *nodeEntMeta,
|
2019-06-26 21:46:47 +00:00
|
|
|
Meta: map[string]string{
|
2020-11-17 15:53:57 +00:00
|
|
|
// DEPRECATED - remove nonvoter in favor of read_replica in a future version of consul
|
Consul Service meta wrongly computes and exposes non_voter meta (#8731)
* Consul Service meta wrongly computes and exposes non_voter meta
In Serf Tags, entreprise members being non-voters use the tag
`nonvoter=1`, not `non_voter = false`, so non-voters in members
were wrongly displayed as voter.
Demonstration:
```
consul members -detailed|grep voter
consul20-hk5 10.200.100.110:8301 alive acls=1,build=1.8.4+ent,dc=hk5,expect=3,ft_fs=1,ft_ns=1,id=xxxxxxxx-5629-08f2-3a79-10a1ab3849d5,nonvoter=1,port=8300,raft_vsn=3,role=consul,segment=<all>,use_tls=1,vsn=2,vsn_max=3,vsn_min=2,wan_join_port=8302
```
* Added changelog
* Added changelog entry
2020-10-09 21:18:24 +00:00
|
|
|
"non_voter": strconv.FormatBool(member.Tags["nonvoter"] == "1"),
|
2020-11-17 15:53:57 +00:00
|
|
|
"read_replica": strconv.FormatBool(member.Tags["read_replica"] == "1"),
|
2019-06-26 21:46:47 +00:00
|
|
|
"raft_version": strconv.Itoa(parts.RaftVersion),
|
|
|
|
"serf_protocol_current": strconv.FormatUint(uint64(member.ProtocolCur), 10),
|
|
|
|
"serf_protocol_min": strconv.FormatUint(uint64(member.ProtocolMin), 10),
|
|
|
|
"serf_protocol_max": strconv.FormatUint(uint64(member.ProtocolMax), 10),
|
|
|
|
"version": parts.Build.String(),
|
|
|
|
},
|
2014-01-10 01:59:31 +00:00
|
|
|
}
|
2014-01-10 20:55:55 +00:00
|
|
|
|
|
|
|
// Attempt to join the consul server
|
2014-01-20 23:56:29 +00:00
|
|
|
if err := s.joinConsulServer(member, parts); err != nil {
|
2014-01-10 20:55:55 +00:00
|
|
|
return err
|
|
|
|
}
|
2014-01-10 01:59:31 +00:00
|
|
|
}
|
|
|
|
|
2014-01-09 23:49:09 +00:00
|
|
|
// Check if the node exists
|
2015-10-13 05:21:39 +00:00
|
|
|
state := s.fsm.State()
|
2021-08-19 20:09:42 +00:00
|
|
|
_, node, err := state.GetNode(member.Name, nodeEntMeta)
|
2015-10-12 07:42:09 +00:00
|
|
|
if err != nil {
|
2015-10-20 20:37:11 +00:00
|
|
|
return err
|
2015-10-12 07:42:09 +00:00
|
|
|
}
|
|
|
|
if node != nil && node.Address == member.Addr.String() {
|
2014-01-10 06:12:08 +00:00
|
|
|
// Check if the associated service is available
|
|
|
|
if service != nil {
|
|
|
|
match := false
|
2021-08-19 20:09:42 +00:00
|
|
|
_, services, err := state.NodeServices(nil, member.Name, nodeEntMeta)
|
2015-10-12 07:42:09 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2014-03-05 23:03:12 +00:00
|
|
|
if services != nil {
|
2020-06-17 10:16:13 +00:00
|
|
|
for id, serv := range services.Services {
|
2014-03-05 23:03:12 +00:00
|
|
|
if id == service.ID {
|
2020-06-17 10:16:13 +00:00
|
|
|
// If metadata are different, be sure to update it
|
|
|
|
match = reflect.DeepEqual(serv.Meta, service.Meta)
|
2014-03-05 23:03:12 +00:00
|
|
|
}
|
2014-01-10 06:12:08 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if !match {
|
|
|
|
goto AFTER_CHECK
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-01-09 23:49:09 +00:00
|
|
|
// Check if the serfCheck is in the passing state
|
2021-08-19 20:09:42 +00:00
|
|
|
_, checks, err := state.NodeChecks(nil, member.Name, nodeEntMeta)
|
2015-10-12 07:42:09 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2014-01-09 23:49:09 +00:00
|
|
|
for _, check := range checks {
|
2017-07-14 05:33:47 +00:00
|
|
|
if check.CheckID == structs.SerfCheckID && check.Status == api.HealthPassing {
|
2014-01-09 23:49:09 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2014-01-10 06:12:08 +00:00
|
|
|
AFTER_CHECK:
|
2020-01-28 23:50:41 +00:00
|
|
|
s.logger.Info("member joined, marking health alive", "member", member.Name)
|
2014-01-09 23:49:09 +00:00
|
|
|
|
2017-03-23 22:01:46 +00:00
|
|
|
// Register with the catalog.
|
2014-01-09 23:49:09 +00:00
|
|
|
req := structs.RegisterRequest{
|
|
|
|
Datacenter: s.config.Datacenter,
|
|
|
|
Node: member.Name,
|
2017-03-23 22:01:46 +00:00
|
|
|
ID: types.NodeID(member.Tags["id"]),
|
2014-01-09 23:49:09 +00:00
|
|
|
Address: member.Addr.String(),
|
2014-01-10 01:57:13 +00:00
|
|
|
Service: service,
|
2014-01-09 23:49:09 +00:00
|
|
|
Check: &structs.HealthCheck{
|
|
|
|
Node: member.Name,
|
2017-07-14 05:33:47 +00:00
|
|
|
CheckID: structs.SerfCheckID,
|
|
|
|
Name: structs.SerfCheckName,
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2017-07-14 05:33:47 +00:00
|
|
|
Output: structs.SerfCheckAliveOutput,
|
2014-01-09 23:49:09 +00:00
|
|
|
},
|
2021-08-19 20:09:42 +00:00
|
|
|
EnterpriseMeta: *nodeEntMeta,
|
2014-01-09 23:49:09 +00:00
|
|
|
}
|
2019-03-08 06:42:54 +00:00
|
|
|
if node != nil {
|
|
|
|
req.TaggedAddresses = node.TaggedAddresses
|
|
|
|
req.NodeMeta = node.Meta
|
|
|
|
}
|
|
|
|
|
2017-03-23 20:34:30 +00:00
|
|
|
_, err = s.raftApply(structs.RegisterRequestType, &req)
|
|
|
|
return err
|
2014-01-09 23:49:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// handleFailedMember is used to mark the node's status
|
|
|
|
// as being critical, along with all checks as unknown.
|
2021-08-19 20:09:42 +00:00
|
|
|
func (s *Server) handleFailedMember(member serf.Member, nodeEntMeta *structs.EnterpriseMeta) error {
|
|
|
|
if nodeEntMeta == nil {
|
|
|
|
nodeEntMeta = structs.NodeEnterpriseMetaInDefaultPartition()
|
|
|
|
}
|
|
|
|
|
2014-01-09 23:49:09 +00:00
|
|
|
// Check if the node exists
|
2015-10-13 05:21:39 +00:00
|
|
|
state := s.fsm.State()
|
2021-08-19 20:09:42 +00:00
|
|
|
_, node, err := state.GetNode(member.Name, nodeEntMeta)
|
2015-10-12 07:42:09 +00:00
|
|
|
if err != nil {
|
2015-10-20 20:37:11 +00:00
|
|
|
return err
|
2015-10-12 07:42:09 +00:00
|
|
|
}
|
2019-04-26 20:33:41 +00:00
|
|
|
|
|
|
|
if node == nil {
|
2020-01-28 23:50:41 +00:00
|
|
|
s.logger.Info("ignoring failed event for member because it does not exist in the catalog", "member", member.Name)
|
2019-04-26 20:33:41 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-08-19 20:09:42 +00:00
|
|
|
// TODO(partitions): get the ent meta by parsing serf tags
|
|
|
|
|
2019-04-26 20:33:41 +00:00
|
|
|
if node.Address == member.Addr.String() {
|
2014-01-09 23:49:09 +00:00
|
|
|
// Check if the serfCheck is in the critical state
|
2021-08-19 20:09:42 +00:00
|
|
|
_, checks, err := state.NodeChecks(nil, member.Name, nodeEntMeta)
|
2015-10-12 07:42:09 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2014-01-09 23:49:09 +00:00
|
|
|
for _, check := range checks {
|
2017-07-14 05:33:47 +00:00
|
|
|
if check.CheckID == structs.SerfCheckID && check.Status == api.HealthCritical {
|
2014-01-09 23:49:09 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-01-28 23:50:41 +00:00
|
|
|
s.logger.Info("member failed, marking health critical", "member", member.Name)
|
2014-01-09 23:49:09 +00:00
|
|
|
|
|
|
|
// Register with the catalog
|
|
|
|
req := structs.RegisterRequest{
|
2021-08-19 20:09:42 +00:00
|
|
|
Datacenter: s.config.Datacenter,
|
|
|
|
Node: member.Name,
|
|
|
|
EnterpriseMeta: *nodeEntMeta,
|
|
|
|
ID: types.NodeID(member.Tags["id"]),
|
|
|
|
Address: member.Addr.String(),
|
2014-01-09 23:49:09 +00:00
|
|
|
Check: &structs.HealthCheck{
|
|
|
|
Node: member.Name,
|
2017-07-14 05:33:47 +00:00
|
|
|
CheckID: structs.SerfCheckID,
|
|
|
|
Name: structs.SerfCheckName,
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthCritical,
|
2017-07-14 05:33:47 +00:00
|
|
|
Output: structs.SerfCheckFailedOutput,
|
2014-01-09 23:49:09 +00:00
|
|
|
},
|
2017-03-23 22:01:46 +00:00
|
|
|
|
|
|
|
// If there's existing information about the node, do not
|
|
|
|
// clobber it.
|
|
|
|
SkipNodeUpdate: true,
|
2014-01-09 23:49:09 +00:00
|
|
|
}
|
2017-03-23 20:34:30 +00:00
|
|
|
_, err = s.raftApply(structs.RegisterRequestType, &req)
|
|
|
|
return err
|
2014-01-09 23:49:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// handleLeftMember is used to handle members that gracefully
|
|
|
|
// left. They are deregistered if necessary.
|
2021-08-19 20:09:42 +00:00
|
|
|
func (s *Server) handleLeftMember(member serf.Member, nodeEntMeta *structs.EnterpriseMeta) error {
|
|
|
|
return s.handleDeregisterMember("left", member, nodeEntMeta)
|
2014-03-20 19:51:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// handleReapMember is used to handle members that have been
|
|
|
|
// reaped after a prolonged failure. They are deregistered.
|
2021-08-19 20:09:42 +00:00
|
|
|
func (s *Server) handleReapMember(member serf.Member, nodeEntMeta *structs.EnterpriseMeta) error {
|
|
|
|
return s.handleDeregisterMember("reaped", member, nodeEntMeta)
|
2014-03-20 19:51:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// handleDeregisterMember is used to deregister a member of a given reason
|
2021-08-19 20:09:42 +00:00
|
|
|
func (s *Server) handleDeregisterMember(reason string, member serf.Member, nodeEntMeta *structs.EnterpriseMeta) error {
|
|
|
|
if nodeEntMeta == nil {
|
|
|
|
nodeEntMeta = structs.NodeEnterpriseMetaInDefaultPartition()
|
|
|
|
}
|
|
|
|
|
2015-01-21 00:13:54 +00:00
|
|
|
// Do not deregister ourself. This can only happen if the current leader
|
|
|
|
// is leaving. Instead, we should allow a follower to take-over and
|
|
|
|
// deregister us later.
|
|
|
|
if member.Name == s.config.NodeName {
|
2020-01-28 23:50:41 +00:00
|
|
|
s.logger.Warn("deregistering self should be done by follower", "name", s.config.NodeName)
|
2014-01-09 23:49:09 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2014-01-10 20:55:55 +00:00
|
|
|
// Remove from Raft peers if this was a server
|
2020-09-25 17:46:38 +00:00
|
|
|
if valid, _ := metadata.IsConsulServer(member); valid {
|
|
|
|
if err := s.removeConsulServer(member); err != nil {
|
2014-01-10 20:55:55 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-10-12 07:42:09 +00:00
|
|
|
// Check if the node does not exist
|
2015-10-13 05:21:39 +00:00
|
|
|
state := s.fsm.State()
|
2021-08-19 20:09:42 +00:00
|
|
|
_, node, err := state.GetNode(member.Name, nodeEntMeta)
|
2015-10-12 07:42:09 +00:00
|
|
|
if err != nil {
|
2015-10-20 20:37:11 +00:00
|
|
|
return err
|
2015-10-12 07:42:09 +00:00
|
|
|
}
|
|
|
|
if node == nil {
|
2014-10-14 05:14:43 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2014-01-09 23:49:09 +00:00
|
|
|
// Deregister the node
|
2020-01-28 23:50:41 +00:00
|
|
|
s.logger.Info("deregistering member", "member", member.Name, "reason", reason)
|
2014-01-09 23:49:09 +00:00
|
|
|
req := structs.DeregisterRequest{
|
2021-08-19 20:09:42 +00:00
|
|
|
Datacenter: s.config.Datacenter,
|
|
|
|
Node: member.Name,
|
|
|
|
EnterpriseMeta: *nodeEntMeta,
|
2014-01-09 23:49:09 +00:00
|
|
|
}
|
2017-03-23 20:34:30 +00:00
|
|
|
_, err = s.raftApply(structs.DeregisterRequestType, &req)
|
|
|
|
return err
|
2014-01-09 23:49:09 +00:00
|
|
|
}
|
2014-01-10 20:55:55 +00:00
|
|
|
|
|
|
|
// joinConsulServer is used to try to join another consul server
|
2017-07-06 10:48:37 +00:00
|
|
|
func (s *Server) joinConsulServer(m serf.Member, parts *metadata.Server) error {
|
2014-01-20 23:56:29 +00:00
|
|
|
// Check for possibility of multiple bootstrap nodes
|
2014-01-30 21:13:29 +00:00
|
|
|
if parts.Bootstrap {
|
2014-01-20 23:56:29 +00:00
|
|
|
members := s.serfLAN.Members()
|
|
|
|
for _, member := range members {
|
2017-07-06 10:48:37 +00:00
|
|
|
valid, p := metadata.IsConsulServer(member)
|
2014-01-30 21:13:29 +00:00
|
|
|
if valid && member.Name != m.Name && p.Bootstrap {
|
2020-01-28 23:50:41 +00:00
|
|
|
s.logger.Error("Two nodes are in bootstrap mode. Only one node should be in bootstrap mode, not adding Raft peer.",
|
|
|
|
"node_to_add", m.Name,
|
|
|
|
"other", member.Name,
|
|
|
|
)
|
2014-01-20 23:56:29 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-25 17:46:38 +00:00
|
|
|
// We used to do a check here and prevent adding the server if the cluster size was too small (1 or 2 servers) as a means
|
|
|
|
// of preventing the case where we may remove ourselves and cause a loss of leadership. The Autopilot AddServer function
|
|
|
|
// will now handle simple address updates better and so long as the address doesn't conflict with another node
|
|
|
|
// it will not require a removal but will instead just update the address. If it would require a removal of other nodes
|
|
|
|
// due to conflicts then the logic regarding cluster sizes will kick in and prevent doing anything dangerous that could
|
|
|
|
// cause loss of leadership.
|
2017-09-06 20:05:51 +00:00
|
|
|
|
2020-09-25 17:46:38 +00:00
|
|
|
// get the autpilot library version of a server from the serf member
|
|
|
|
apServer, err := s.autopilotServer(m)
|
2017-09-06 20:07:42 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-02-17 18:49:16 +00:00
|
|
|
|
2020-09-25 17:46:38 +00:00
|
|
|
// now ask autopilot to add it
|
|
|
|
return s.autopilot.AddServer(apServer)
|
2014-01-10 20:55:55 +00:00
|
|
|
}
|
2014-01-10 23:05:34 +00:00
|
|
|
|
2014-01-20 23:39:07 +00:00
|
|
|
// removeConsulServer is used to try to remove a consul server that has left
|
2020-09-25 17:46:38 +00:00
|
|
|
func (s *Server) removeConsulServer(m serf.Member) error {
|
|
|
|
server, err := s.autopilotServer(m)
|
|
|
|
if err != nil || server == nil {
|
2017-02-22 20:53:32 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-09-25 17:46:38 +00:00
|
|
|
return s.autopilot.RemoveServer(server.ID)
|
2014-01-10 23:05:34 +00:00
|
|
|
}
|
2014-12-15 23:04:21 +00:00
|
|
|
|
|
|
|
// reapTombstones is invoked by the current leader to manage garbage
|
|
|
|
// collection of tombstones. When a key is deleted, we trigger a tombstone
|
|
|
|
// GC clock. Once the expiration is reached, this routine is invoked
|
|
|
|
// to clear all tombstones before this index. This must be replicated
|
|
|
|
// through Raft to ensure consistency. We do this outside the leader loop
|
|
|
|
// to avoid blocking.
|
|
|
|
func (s *Server) reapTombstones(index uint64) {
|
2017-10-04 23:43:27 +00:00
|
|
|
defer metrics.MeasureSince([]string{"leader", "reapTombstones"}, time.Now())
|
2014-12-15 23:28:56 +00:00
|
|
|
req := structs.TombstoneRequest{
|
2017-03-23 20:34:30 +00:00
|
|
|
Datacenter: s.config.Datacenter,
|
|
|
|
Op: structs.TombstoneReap,
|
|
|
|
ReapIndex: index,
|
2014-12-15 23:04:21 +00:00
|
|
|
}
|
2014-12-15 23:28:56 +00:00
|
|
|
_, err := s.raftApply(structs.TombstoneRequestType, &req)
|
2014-12-15 23:04:21 +00:00
|
|
|
if err != nil {
|
2020-01-28 23:50:41 +00:00
|
|
|
s.logger.Error("failed to reap tombstones up to index",
|
|
|
|
"index", index,
|
|
|
|
"error", err,
|
|
|
|
)
|
2014-12-15 23:04:21 +00:00
|
|
|
}
|
|
|
|
}
|
2020-06-04 21:05:27 +00:00
|
|
|
|
2021-01-25 19:24:32 +00:00
|
|
|
func (s *Server) setDatacenterSupportsFederationStates() {
|
|
|
|
atomic.StoreInt32(&s.dcSupportsFederationStates, 1)
|
|
|
|
}
|
|
|
|
|
2020-06-04 21:05:27 +00:00
|
|
|
func (s *Server) DatacenterSupportsFederationStates() bool {
|
|
|
|
if atomic.LoadInt32(&s.dcSupportsFederationStates) != 0 {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
state := serversFederationStatesInfo{
|
|
|
|
supported: true,
|
|
|
|
found: false,
|
|
|
|
}
|
|
|
|
|
2020-10-06 18:24:05 +00:00
|
|
|
// if we are in a secondary, check if they are supported in the primary dc
|
2020-06-04 21:05:27 +00:00
|
|
|
if s.config.PrimaryDatacenter != s.config.Datacenter {
|
|
|
|
s.router.CheckServers(s.config.PrimaryDatacenter, state.update)
|
|
|
|
|
|
|
|
if !state.supported || !state.found {
|
|
|
|
s.logger.Debug("federation states are not enabled in the primary dc")
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// check the servers in the local DC
|
|
|
|
s.router.CheckServers(s.config.Datacenter, state.update)
|
|
|
|
|
|
|
|
if state.supported && state.found {
|
2021-01-25 19:24:32 +00:00
|
|
|
s.setDatacenterSupportsFederationStates()
|
2020-06-04 21:05:27 +00:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
s.logger.Debug("federation states are not enabled in this datacenter", "datacenter", s.config.Datacenter)
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
type serversFederationStatesInfo struct {
|
|
|
|
// supported indicates whether every processed server supports federation states
|
|
|
|
supported bool
|
|
|
|
|
|
|
|
// found indicates that at least one server was processed
|
|
|
|
found bool
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *serversFederationStatesInfo) update(srv *metadata.Server) bool {
|
|
|
|
if srv.Status != serf.StatusAlive && srv.Status != serf.StatusFailed {
|
|
|
|
// they are left or something so regardless we treat these servers as meeting
|
|
|
|
// the version requirement
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
// mark that we processed at least one server
|
|
|
|
s.found = true
|
|
|
|
|
|
|
|
if supported, ok := srv.FeatureFlags["fs"]; ok && supported == 1 {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
// mark that at least one server does not support federation states
|
|
|
|
s.supported = false
|
|
|
|
|
|
|
|
// prevent continuing server evaluation
|
|
|
|
return false
|
|
|
|
}
|
2020-10-06 18:24:05 +00:00
|
|
|
|
|
|
|
func (s *Server) setDatacenterSupportsIntentionsAsConfigEntries() {
|
|
|
|
atomic.StoreInt32(&s.dcSupportsIntentionsAsConfigEntries, 1)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Server) DatacenterSupportsIntentionsAsConfigEntries() bool {
|
|
|
|
if atomic.LoadInt32(&s.dcSupportsIntentionsAsConfigEntries) != 0 {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
state := serversIntentionsAsConfigEntriesInfo{
|
|
|
|
supported: true,
|
|
|
|
found: false,
|
|
|
|
}
|
|
|
|
|
|
|
|
// if we are in a secondary, check if they are supported in the primary dc
|
|
|
|
if s.config.PrimaryDatacenter != s.config.Datacenter {
|
|
|
|
s.router.CheckServers(s.config.PrimaryDatacenter, state.update)
|
|
|
|
|
|
|
|
if !state.supported || !state.found {
|
|
|
|
s.logger.Debug("intentions have not been migrated to config entries in the primary dc yet")
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// check the servers in the local DC
|
|
|
|
s.router.CheckServers(s.config.Datacenter, state.update)
|
|
|
|
|
|
|
|
if state.supported && state.found {
|
|
|
|
s.setDatacenterSupportsIntentionsAsConfigEntries()
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
s.logger.Debug("intentions cannot be migrated to config entries in this datacenter", "datacenter", s.config.Datacenter)
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
type serversIntentionsAsConfigEntriesInfo struct {
|
|
|
|
// supported indicates whether every processed server supports intentions as config entries
|
|
|
|
supported bool
|
|
|
|
|
|
|
|
// found indicates that at least one server was processed
|
|
|
|
found bool
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *serversIntentionsAsConfigEntriesInfo) update(srv *metadata.Server) bool {
|
|
|
|
if srv.Status != serf.StatusAlive && srv.Status != serf.StatusFailed {
|
|
|
|
// they are left or something so regardless we treat these servers as meeting
|
|
|
|
// the version requirement
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
// mark that we processed at least one server
|
|
|
|
s.found = true
|
|
|
|
|
|
|
|
if supported, ok := srv.FeatureFlags["si"]; ok && supported == 1 {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
// mark that at least one server does not support service-intentions
|
|
|
|
s.supported = false
|
|
|
|
|
|
|
|
// prevent continuing server evaluation
|
|
|
|
return false
|
|
|
|
}
|