2019-06-24 18:21:51 +00:00
|
|
|
package consul
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"fmt"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"golang.org/x/time/rate"
|
|
|
|
|
|
|
|
"github.com/hashicorp/consul/agent/connect/ca"
|
|
|
|
"github.com/hashicorp/consul/agent/structs"
|
2020-01-28 23:50:41 +00:00
|
|
|
"github.com/hashicorp/consul/logging"
|
2019-06-24 18:21:51 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
|
|
|
// loopRateLimit is the maximum rate per second at which we can rerun CA and intention
|
|
|
|
// replication watches.
|
|
|
|
loopRateLimit rate.Limit = 0.2
|
|
|
|
|
|
|
|
// retryBucketSize is the maximum number of stored rate limit attempts for looped
|
|
|
|
// blocking query operations.
|
|
|
|
retryBucketSize = 5
|
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
|
|
|
// maxRetryBackoff is the maximum number of seconds to wait between failed blocking
|
|
|
|
// queries when backing off.
|
|
|
|
maxRetryBackoff = 256
|
|
|
|
)
|
|
|
|
|
2020-11-12 01:05:04 +00:00
|
|
|
// startConnectLeader starts multi-dc connect leader routines.
|
|
|
|
func (s *Server) startConnectLeader() error {
|
|
|
|
if !s.config.ConnectEnabled {
|
|
|
|
return nil
|
2019-07-26 19:57:57 +00:00
|
|
|
}
|
|
|
|
|
2020-11-12 01:05:04 +00:00
|
|
|
// Start the Connect secondary DC actions if enabled.
|
|
|
|
if s.config.Datacenter != s.config.PrimaryDatacenter {
|
|
|
|
s.leaderRoutineManager.Start(secondaryCARootWatchRoutineName, s.caManager.secondaryCARootWatch)
|
2019-07-26 19:57:57 +00:00
|
|
|
}
|
|
|
|
|
2020-11-12 01:05:04 +00:00
|
|
|
s.leaderRoutineManager.Start(intermediateCertRenewWatchRoutineName, s.caManager.intermediateCertRenewalWatch)
|
|
|
|
s.leaderRoutineManager.Start(caRootPruningRoutineName, s.runCARootPruning)
|
|
|
|
return s.startIntentionConfigEntryMigration()
|
2019-07-26 19:57:57 +00:00
|
|
|
}
|
|
|
|
|
2020-11-12 01:05:04 +00:00
|
|
|
// stopConnectLeader stops connect specific leader functions.
|
|
|
|
func (s *Server) stopConnectLeader() {
|
|
|
|
s.leaderRoutineManager.Stop(intentionMigrationRoutineName)
|
|
|
|
s.leaderRoutineManager.Stop(secondaryCARootWatchRoutineName)
|
|
|
|
s.leaderRoutineManager.Stop(intermediateCertRenewWatchRoutineName)
|
|
|
|
s.leaderRoutineManager.Stop(caRootPruningRoutineName)
|
|
|
|
|
|
|
|
// If the provider implements NeedsStop, we call Stop to perform any shutdown actions.
|
|
|
|
provider, _ := s.caManager.getCAProvider()
|
|
|
|
if provider != nil {
|
|
|
|
if needsStop, ok := provider.(ca.NeedsStop); ok {
|
|
|
|
needsStop.Stop()
|
|
|
|
}
|
2019-11-01 13:20:26 +00:00
|
|
|
}
|
2019-07-26 19:57:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// createProvider returns a connect CA provider from the given config.
|
|
|
|
func (s *Server) createCAProvider(conf *structs.CAConfiguration) (ca.Provider, error) {
|
2019-11-11 20:30:01 +00:00
|
|
|
var p ca.Provider
|
2019-07-26 19:57:57 +00:00
|
|
|
switch conf.Provider {
|
|
|
|
case structs.ConsulCAProvider:
|
2019-11-11 20:30:01 +00:00
|
|
|
p = &ca.ConsulProvider{Delegate: &consulCADelegate{s}}
|
2019-07-26 19:57:57 +00:00
|
|
|
case structs.VaultCAProvider:
|
2020-09-11 15:41:05 +00:00
|
|
|
p = ca.NewVaultProvider()
|
2019-11-21 17:40:29 +00:00
|
|
|
case structs.AWSCAProvider:
|
|
|
|
p = &ca.AWSProvider{}
|
2019-07-26 19:57:57 +00:00
|
|
|
default:
|
|
|
|
return nil, fmt.Errorf("unknown CA provider %q", conf.Provider)
|
|
|
|
}
|
2019-11-11 20:30:01 +00:00
|
|
|
|
|
|
|
// If the provider implements NeedsLogger, we give it our logger.
|
|
|
|
if needsLogger, ok := p.(ca.NeedsLogger); ok {
|
|
|
|
needsLogger.SetLogger(s.logger)
|
|
|
|
}
|
|
|
|
|
|
|
|
return p, nil
|
2019-07-26 19:57:57 +00:00
|
|
|
}
|
|
|
|
|
2019-10-04 17:08:45 +00:00
|
|
|
func (s *Server) runCARootPruning(ctx context.Context) error {
|
2019-07-26 19:57:57 +00:00
|
|
|
ticker := time.NewTicker(caRootPruneInterval)
|
|
|
|
defer ticker.Stop()
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
2019-10-04 17:08:45 +00:00
|
|
|
case <-ctx.Done():
|
|
|
|
return nil
|
2019-07-26 19:57:57 +00:00
|
|
|
case <-ticker.C:
|
|
|
|
if err := s.pruneCARoots(); err != nil {
|
2020-01-28 23:50:41 +00:00
|
|
|
s.loggers.Named(logging.Connect).Error("error pruning CA roots", "error", err)
|
2019-07-26 19:57:57 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// pruneCARoots looks for any CARoots that have been rotated out and expired.
|
|
|
|
func (s *Server) pruneCARoots() error {
|
|
|
|
if !s.config.ConnectEnabled {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
state := s.fsm.State()
|
|
|
|
idx, roots, err := state.CARoots(nil)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-08-19 18:03:03 +00:00
|
|
|
_, caConf, err := state.CAConfig(nil)
|
2019-07-26 19:57:57 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
common, err := caConf.GetCommonConfig()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
var newRoots structs.CARoots
|
|
|
|
for _, r := range roots {
|
|
|
|
if !r.Active && !r.RotatedOutAt.IsZero() && time.Now().Sub(r.RotatedOutAt) > common.LeafCertTTL*2 {
|
2020-01-28 23:50:41 +00:00
|
|
|
s.loggers.Named(logging.Connect).Info("pruning old unused root CA", "id", r.ID)
|
2019-07-26 19:57:57 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
newRoot := *r
|
|
|
|
newRoots = append(newRoots, &newRoot)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Return early if there's nothing to remove.
|
|
|
|
if len(newRoots) == len(roots) {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Commit the new root state.
|
|
|
|
var args structs.CARequest
|
|
|
|
args.Op = structs.CAOpSetRoots
|
|
|
|
args.Index = idx
|
|
|
|
args.Roots = newRoots
|
|
|
|
resp, err := s.raftApply(structs.ConnectCARequestType, args)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if respErr, ok := resp.(error); ok {
|
|
|
|
return respErr
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-06-24 18:21:51 +00:00
|
|
|
// retryLoopBackoff loops a given function indefinitely, backing off exponentially
|
|
|
|
// upon errors up to a maximum of maxRetryBackoff seconds.
|
2020-06-24 16:36:14 +00:00
|
|
|
func retryLoopBackoff(ctx context.Context, loopFn func() error, errFn func(error)) {
|
2020-09-04 09:47:16 +00:00
|
|
|
retryLoopBackoffHandleSuccess(ctx, loopFn, errFn, false)
|
|
|
|
}
|
|
|
|
|
|
|
|
func retryLoopBackoffAbortOnSuccess(ctx context.Context, loopFn func() error, errFn func(error)) {
|
|
|
|
retryLoopBackoffHandleSuccess(ctx, loopFn, errFn, true)
|
|
|
|
}
|
|
|
|
|
|
|
|
func retryLoopBackoffHandleSuccess(ctx context.Context, loopFn func() error, errFn func(error), abortOnSuccess bool) {
|
2019-06-24 18:21:51 +00:00
|
|
|
var failedAttempts uint
|
|
|
|
limiter := rate.NewLimiter(loopRateLimit, retryBucketSize)
|
|
|
|
for {
|
|
|
|
// Rate limit how often we run the loop
|
2020-06-24 16:36:14 +00:00
|
|
|
limiter.Wait(ctx)
|
2019-06-24 18:21:51 +00:00
|
|
|
select {
|
2020-06-24 16:36:14 +00:00
|
|
|
case <-ctx.Done():
|
2019-06-24 18:21:51 +00:00
|
|
|
return
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
if (1 << failedAttempts) < maxRetryBackoff {
|
|
|
|
failedAttempts++
|
|
|
|
}
|
|
|
|
retryTime := (1 << failedAttempts) * time.Second
|
|
|
|
|
|
|
|
if err := loopFn(); err != nil {
|
|
|
|
errFn(err)
|
2020-06-24 16:36:14 +00:00
|
|
|
|
|
|
|
timer := time.NewTimer(retryTime)
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
timer.Stop()
|
|
|
|
return
|
|
|
|
case <-timer.C:
|
|
|
|
continue
|
|
|
|
}
|
2020-09-04 09:47:16 +00:00
|
|
|
} else if abortOnSuccess {
|
|
|
|
return
|
2019-06-24 18:21:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Reset the failed attempts after a successful run.
|
|
|
|
failedAttempts = 0
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// nextIndexVal computes the next index value to query for, resetting to zero
|
|
|
|
// if the index went backward.
|
|
|
|
func nextIndexVal(prevIdx, idx uint64) uint64 {
|
|
|
|
if prevIdx > idx {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
return idx
|
|
|
|
}
|
|
|
|
|
2020-01-17 22:27:13 +00:00
|
|
|
// halfTime returns a duration that is half the time between notBefore and
|
|
|
|
// notAfter.
|
|
|
|
func halfTime(notBefore, notAfter time.Time) time.Duration {
|
|
|
|
interval := notAfter.Sub(notBefore)
|
|
|
|
return interval / 2
|
|
|
|
}
|
|
|
|
|
|
|
|
// lessThanHalfTimePassed decides if half the time between notBefore and
|
|
|
|
// notAfter has passed relative to now.
|
|
|
|
// lessThanHalfTimePassed is being called while holding caProviderReconfigurationLock
|
|
|
|
// which means it must never take that lock itself or call anything that does.
|
|
|
|
func lessThanHalfTimePassed(now, notBefore, notAfter time.Time) bool {
|
|
|
|
t := notBefore.Add(halfTime(notBefore, notAfter))
|
|
|
|
return t.Sub(now) > 0
|
|
|
|
}
|
2020-11-12 01:05:04 +00:00
|
|
|
|
|
|
|
func (s *Server) generateCASignRequest(csr string) *structs.CASignRequest {
|
|
|
|
return &structs.CASignRequest{
|
|
|
|
Datacenter: s.config.PrimaryDatacenter,
|
|
|
|
CSR: csr,
|
|
|
|
WriteRequest: structs.WriteRequest{Token: s.tokens.ReplicationToken()},
|
|
|
|
}
|
|
|
|
}
|