2019-06-24 18:21:51 +00:00
|
|
|
package consul
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"golang.org/x/time/rate"
|
|
|
|
|
|
|
|
"github.com/hashicorp/consul/agent/structs"
|
2020-01-28 23:50:41 +00:00
|
|
|
"github.com/hashicorp/consul/logging"
|
2019-06-24 18:21:51 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
|
|
|
// loopRateLimit is the maximum rate per second at which we can rerun CA and intention
|
|
|
|
// replication watches.
|
|
|
|
loopRateLimit rate.Limit = 0.2
|
|
|
|
|
|
|
|
// retryBucketSize is the maximum number of stored rate limit attempts for looped
|
|
|
|
// blocking query operations.
|
|
|
|
retryBucketSize = 5
|
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
|
|
|
// maxRetryBackoff is the maximum number of seconds to wait between failed blocking
|
|
|
|
// queries when backing off.
|
|
|
|
maxRetryBackoff = 256
|
|
|
|
)
|
|
|
|
|
2020-11-12 01:05:04 +00:00
|
|
|
// startConnectLeader starts multi-dc connect leader routines.
|
2021-05-20 14:07:23 +00:00
|
|
|
func (s *Server) startConnectLeader(ctx context.Context) error {
|
2020-11-12 01:05:04 +00:00
|
|
|
if !s.config.ConnectEnabled {
|
|
|
|
return nil
|
2019-07-26 19:57:57 +00:00
|
|
|
}
|
|
|
|
|
2021-05-20 14:07:23 +00:00
|
|
|
s.caManager.Start(ctx)
|
|
|
|
s.leaderRoutineManager.Start(ctx, caRootPruningRoutineName, s.runCARootPruning)
|
2021-08-04 17:05:10 +00:00
|
|
|
s.leaderRoutineManager.Start(ctx, caRootMetricRoutineName, rootCAExpiryMonitor(s).Monitor)
|
|
|
|
s.leaderRoutineManager.Start(ctx, caSigningMetricRoutineName, signingCAExpiryMonitor(s).Monitor)
|
2021-01-15 18:20:27 +00:00
|
|
|
|
2021-05-20 14:07:23 +00:00
|
|
|
return s.startIntentionConfigEntryMigration(ctx)
|
2019-07-26 19:57:57 +00:00
|
|
|
}
|
|
|
|
|
2020-11-12 01:05:04 +00:00
|
|
|
// stopConnectLeader stops connect specific leader functions.
|
|
|
|
func (s *Server) stopConnectLeader() {
|
2021-01-15 18:20:27 +00:00
|
|
|
s.caManager.Stop()
|
2020-11-12 01:05:04 +00:00
|
|
|
s.leaderRoutineManager.Stop(intentionMigrationRoutineName)
|
|
|
|
s.leaderRoutineManager.Stop(caRootPruningRoutineName)
|
2021-07-07 13:41:01 +00:00
|
|
|
s.leaderRoutineManager.Stop(caRootMetricRoutineName)
|
|
|
|
s.leaderRoutineManager.Stop(caSigningMetricRoutineName)
|
2019-07-26 19:57:57 +00:00
|
|
|
}
|
|
|
|
|
2019-10-04 17:08:45 +00:00
|
|
|
func (s *Server) runCARootPruning(ctx context.Context) error {
|
2019-07-26 19:57:57 +00:00
|
|
|
ticker := time.NewTicker(caRootPruneInterval)
|
|
|
|
defer ticker.Stop()
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
2019-10-04 17:08:45 +00:00
|
|
|
case <-ctx.Done():
|
|
|
|
return nil
|
2019-07-26 19:57:57 +00:00
|
|
|
case <-ticker.C:
|
|
|
|
if err := s.pruneCARoots(); err != nil {
|
2020-01-28 23:50:41 +00:00
|
|
|
s.loggers.Named(logging.Connect).Error("error pruning CA roots", "error", err)
|
2019-07-26 19:57:57 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// pruneCARoots looks for any CARoots that have been rotated out and expired.
|
|
|
|
func (s *Server) pruneCARoots() error {
|
|
|
|
if !s.config.ConnectEnabled {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
state := s.fsm.State()
|
|
|
|
idx, roots, err := state.CARoots(nil)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-08-19 18:03:03 +00:00
|
|
|
_, caConf, err := state.CAConfig(nil)
|
2019-07-26 19:57:57 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
common, err := caConf.GetCommonConfig()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
var newRoots structs.CARoots
|
|
|
|
for _, r := range roots {
|
|
|
|
if !r.Active && !r.RotatedOutAt.IsZero() && time.Now().Sub(r.RotatedOutAt) > common.LeafCertTTL*2 {
|
2020-01-28 23:50:41 +00:00
|
|
|
s.loggers.Named(logging.Connect).Info("pruning old unused root CA", "id", r.ID)
|
2019-07-26 19:57:57 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
newRoot := *r
|
|
|
|
newRoots = append(newRoots, &newRoot)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Return early if there's nothing to remove.
|
|
|
|
if len(newRoots) == len(roots) {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Commit the new root state.
|
|
|
|
var args structs.CARequest
|
|
|
|
args.Op = structs.CAOpSetRoots
|
|
|
|
args.Index = idx
|
|
|
|
args.Roots = newRoots
|
2021-04-08 22:58:15 +00:00
|
|
|
_, err = s.raftApply(structs.ConnectCARequestType, args)
|
|
|
|
return err
|
2019-07-26 19:57:57 +00:00
|
|
|
}
|
|
|
|
|
2019-06-24 18:21:51 +00:00
|
|
|
// retryLoopBackoff loops a given function indefinitely, backing off exponentially
|
|
|
|
// upon errors up to a maximum of maxRetryBackoff seconds.
|
2020-06-24 16:36:14 +00:00
|
|
|
func retryLoopBackoff(ctx context.Context, loopFn func() error, errFn func(error)) {
|
2020-09-04 09:47:16 +00:00
|
|
|
retryLoopBackoffHandleSuccess(ctx, loopFn, errFn, false)
|
|
|
|
}
|
|
|
|
|
|
|
|
func retryLoopBackoffAbortOnSuccess(ctx context.Context, loopFn func() error, errFn func(error)) {
|
|
|
|
retryLoopBackoffHandleSuccess(ctx, loopFn, errFn, true)
|
|
|
|
}
|
|
|
|
|
|
|
|
func retryLoopBackoffHandleSuccess(ctx context.Context, loopFn func() error, errFn func(error), abortOnSuccess bool) {
|
2019-06-24 18:21:51 +00:00
|
|
|
var failedAttempts uint
|
|
|
|
limiter := rate.NewLimiter(loopRateLimit, retryBucketSize)
|
|
|
|
for {
|
|
|
|
// Rate limit how often we run the loop
|
2020-06-24 16:36:14 +00:00
|
|
|
limiter.Wait(ctx)
|
2019-06-24 18:21:51 +00:00
|
|
|
select {
|
2020-06-24 16:36:14 +00:00
|
|
|
case <-ctx.Done():
|
2019-06-24 18:21:51 +00:00
|
|
|
return
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
if (1 << failedAttempts) < maxRetryBackoff {
|
|
|
|
failedAttempts++
|
|
|
|
}
|
|
|
|
retryTime := (1 << failedAttempts) * time.Second
|
|
|
|
|
|
|
|
if err := loopFn(); err != nil {
|
|
|
|
errFn(err)
|
2020-06-24 16:36:14 +00:00
|
|
|
|
|
|
|
timer := time.NewTimer(retryTime)
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
timer.Stop()
|
|
|
|
return
|
|
|
|
case <-timer.C:
|
|
|
|
continue
|
|
|
|
}
|
2020-09-04 09:47:16 +00:00
|
|
|
} else if abortOnSuccess {
|
|
|
|
return
|
2019-06-24 18:21:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Reset the failed attempts after a successful run.
|
|
|
|
failedAttempts = 0
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// nextIndexVal computes the next index value to query for, resetting to zero
|
|
|
|
// if the index went backward.
|
|
|
|
func nextIndexVal(prevIdx, idx uint64) uint64 {
|
|
|
|
if prevIdx > idx {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
return idx
|
|
|
|
}
|
|
|
|
|
2020-01-17 22:27:13 +00:00
|
|
|
// halfTime returns a duration that is half the time between notBefore and
|
|
|
|
// notAfter.
|
|
|
|
func halfTime(notBefore, notAfter time.Time) time.Duration {
|
|
|
|
interval := notAfter.Sub(notBefore)
|
|
|
|
return interval / 2
|
|
|
|
}
|
|
|
|
|
|
|
|
// lessThanHalfTimePassed decides if half the time between notBefore and
|
|
|
|
// notAfter has passed relative to now.
|
|
|
|
// lessThanHalfTimePassed is being called while holding caProviderReconfigurationLock
|
|
|
|
// which means it must never take that lock itself or call anything that does.
|
|
|
|
func lessThanHalfTimePassed(now, notBefore, notAfter time.Time) bool {
|
|
|
|
t := notBefore.Add(halfTime(notBefore, notAfter))
|
|
|
|
return t.Sub(now) > 0
|
|
|
|
}
|