2023-03-15 16:00:52 +00:00
|
|
|
// Copyright (c) HashiCorp, Inc.
|
|
|
|
// SPDX-License-Identifier: MPL-2.0
|
|
|
|
|
2018-05-31 00:30:50 +00:00
|
|
|
package vault
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"crypto/ecdsa"
|
|
|
|
"crypto/x509"
|
|
|
|
"errors"
|
|
|
|
"fmt"
|
2022-10-06 18:24:16 +00:00
|
|
|
"os"
|
|
|
|
"sort"
|
2019-07-05 21:19:15 +00:00
|
|
|
"strings"
|
2020-12-10 11:50:11 +00:00
|
|
|
"sync"
|
2018-05-31 00:30:50 +00:00
|
|
|
"sync/atomic"
|
|
|
|
"time"
|
|
|
|
|
2019-10-18 18:46:00 +00:00
|
|
|
"github.com/armon/go-metrics"
|
2021-06-11 17:18:16 +00:00
|
|
|
"github.com/hashicorp/errwrap"
|
2019-10-18 18:46:00 +00:00
|
|
|
"github.com/hashicorp/go-multierror"
|
|
|
|
"github.com/hashicorp/go-uuid"
|
2019-04-13 07:44:06 +00:00
|
|
|
"github.com/hashicorp/vault/helper/namespace"
|
2021-06-11 17:18:16 +00:00
|
|
|
"github.com/hashicorp/vault/sdk/helper/certutil"
|
|
|
|
"github.com/hashicorp/vault/sdk/helper/consts"
|
|
|
|
"github.com/hashicorp/vault/sdk/helper/jsonutil"
|
|
|
|
"github.com/hashicorp/vault/sdk/logical"
|
|
|
|
"github.com/hashicorp/vault/sdk/physical"
|
2020-01-11 01:39:52 +00:00
|
|
|
"github.com/hashicorp/vault/vault/seal"
|
2018-06-29 23:23:52 +00:00
|
|
|
"github.com/oklog/run"
|
2018-05-31 00:30:50 +00:00
|
|
|
)
|
|
|
|
|
2018-08-25 21:41:55 +00:00
|
|
|
const (
|
|
|
|
// lockRetryInterval is the interval we re-attempt to acquire the
|
|
|
|
// HA lock if an error is encountered
|
|
|
|
lockRetryInterval = 10 * time.Second
|
|
|
|
|
|
|
|
// leaderCheckInterval is how often a standby checks for a new leader
|
|
|
|
leaderCheckInterval = 2500 * time.Millisecond
|
|
|
|
|
|
|
|
// keyRotateCheckInterval is how often a standby checks for a key
|
|
|
|
// rotation taking place.
|
|
|
|
keyRotateCheckInterval = 10 * time.Second
|
|
|
|
|
|
|
|
// leaderPrefixCleanDelay is how long to wait between deletions
|
|
|
|
// of orphaned leader keys, to prevent slamming the backend.
|
|
|
|
leaderPrefixCleanDelay = 200 * time.Millisecond
|
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
|
|
|
addEnterpriseHaActors func(*Core, *run.Group) chan func() = addEnterpriseHaActorsNoop
|
|
|
|
interruptPerfStandby func(chan func(), chan struct{}) chan struct{} = interruptPerfStandbyNoop
|
|
|
|
)
|
|
|
|
|
|
|
|
func addEnterpriseHaActorsNoop(*Core, *run.Group) chan func() { return nil }
|
|
|
|
func interruptPerfStandbyNoop(chan func(), chan struct{}) chan struct{} {
|
|
|
|
return make(chan struct{})
|
|
|
|
}
|
|
|
|
|
2018-05-31 00:30:50 +00:00
|
|
|
// Standby checks if the Vault is in standby mode
|
|
|
|
func (c *Core) Standby() (bool, error) {
|
|
|
|
c.stateLock.RLock()
|
2018-06-14 13:49:10 +00:00
|
|
|
standby := c.standby
|
|
|
|
c.stateLock.RUnlock()
|
|
|
|
return standby, nil
|
2018-05-31 00:30:50 +00:00
|
|
|
}
|
|
|
|
|
2018-08-27 17:01:07 +00:00
|
|
|
// PerfStandby checks if the vault is a performance standby
|
2022-02-17 19:43:07 +00:00
|
|
|
// This function cannot be used during request handling
|
|
|
|
// because this causes a deadlock with the statelock.
|
2018-08-27 17:01:07 +00:00
|
|
|
func (c *Core) PerfStandby() bool {
|
|
|
|
c.stateLock.RLock()
|
|
|
|
perfStandby := c.perfStandby
|
|
|
|
c.stateLock.RUnlock()
|
|
|
|
return perfStandby
|
|
|
|
}
|
|
|
|
|
2020-12-12 00:50:19 +00:00
|
|
|
func (c *Core) ActiveTime() time.Time {
|
|
|
|
c.stateLock.RLock()
|
|
|
|
activeTime := c.activeTime
|
|
|
|
c.stateLock.RUnlock()
|
|
|
|
return activeTime
|
|
|
|
}
|
|
|
|
|
2020-10-26 20:47:54 +00:00
|
|
|
// StandbyStates is meant as a way to avoid some extra locking on the very
|
|
|
|
// common sys/health check.
|
|
|
|
func (c *Core) StandbyStates() (standby, perfStandby bool) {
|
|
|
|
c.stateLock.RLock()
|
|
|
|
standby = c.standby
|
|
|
|
perfStandby = c.perfStandby
|
|
|
|
c.stateLock.RUnlock()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-10-06 18:24:16 +00:00
|
|
|
// getHAMembers retrieves cluster membership that doesn't depend on raft. This should only ever be called by the
|
|
|
|
// active node.
|
|
|
|
func (c *Core) getHAMembers() ([]HAStatusNode, error) {
|
|
|
|
hostname, err := os.Hostname()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
leader := HAStatusNode{
|
|
|
|
Hostname: hostname,
|
|
|
|
APIAddress: c.redirectAddr,
|
|
|
|
ClusterAddress: c.ClusterAddr(),
|
|
|
|
ActiveNode: true,
|
|
|
|
Version: c.effectiveSDKVersion,
|
|
|
|
}
|
|
|
|
|
|
|
|
if rb := c.getRaftBackend(); rb != nil {
|
|
|
|
leader.UpgradeVersion = rb.EffectiveVersion()
|
|
|
|
leader.RedundancyZone = rb.RedundancyZone()
|
|
|
|
}
|
|
|
|
|
|
|
|
nodes := []HAStatusNode{leader}
|
|
|
|
|
|
|
|
for _, peerNode := range c.GetHAPeerNodesCached() {
|
|
|
|
lastEcho := peerNode.LastEcho
|
|
|
|
nodes = append(nodes, HAStatusNode{
|
|
|
|
Hostname: peerNode.Hostname,
|
|
|
|
APIAddress: peerNode.APIAddress,
|
|
|
|
ClusterAddress: peerNode.ClusterAddress,
|
|
|
|
LastEcho: &lastEcho,
|
|
|
|
Version: peerNode.Version,
|
|
|
|
UpgradeVersion: peerNode.UpgradeVersion,
|
|
|
|
RedundancyZone: peerNode.RedundancyZone,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
sort.Slice(nodes, func(i, j int) bool {
|
|
|
|
return nodes[i].APIAddress < nodes[j].APIAddress
|
|
|
|
})
|
|
|
|
|
|
|
|
return nodes, nil
|
|
|
|
}
|
|
|
|
|
2023-05-25 12:02:39 +00:00
|
|
|
// Leader is used to get information about the current active leader in relation to the current node (core).
|
|
|
|
// It utilizes a state lock on the Core by attempting to acquire a read lock. Care should be taken not to
|
|
|
|
// call this method if a read lock on this Core's state lock is currently held, as this can cause deadlock.
|
|
|
|
// e.g. if called from within request handling.
|
2018-05-31 00:30:50 +00:00
|
|
|
func (c *Core) Leader() (isLeader bool, leaderAddr, clusterAddr string, err error) {
|
2018-06-09 19:35:22 +00:00
|
|
|
// Check if HA enabled. We don't need the lock for this check as it's set
|
|
|
|
// on startup and never modified
|
|
|
|
if c.ha == nil {
|
|
|
|
return false, "", "", ErrHANotEnabled
|
|
|
|
}
|
|
|
|
|
2018-05-31 00:30:50 +00:00
|
|
|
// Check if sealed
|
2018-07-24 20:57:25 +00:00
|
|
|
if c.Sealed() {
|
2018-05-31 00:30:50 +00:00
|
|
|
return false, "", "", consts.ErrSealed
|
|
|
|
}
|
|
|
|
|
2018-07-24 20:57:25 +00:00
|
|
|
c.stateLock.RLock()
|
|
|
|
|
2018-05-31 00:30:50 +00:00
|
|
|
// Check if we are the leader
|
|
|
|
if !c.standby {
|
2018-06-14 13:49:10 +00:00
|
|
|
c.stateLock.RUnlock()
|
2019-06-27 17:00:03 +00:00
|
|
|
return true, c.redirectAddr, c.ClusterAddr(), nil
|
2018-05-31 00:30:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Initialize a lock
|
2018-10-12 16:29:15 +00:00
|
|
|
lock, err := c.ha.LockWith(CoreLockPath, "read")
|
2018-05-31 00:30:50 +00:00
|
|
|
if err != nil {
|
2018-06-14 13:49:10 +00:00
|
|
|
c.stateLock.RUnlock()
|
2018-05-31 00:30:50 +00:00
|
|
|
return false, "", "", err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Read the value
|
|
|
|
held, leaderUUID, err := lock.Value()
|
|
|
|
if err != nil {
|
2018-06-14 13:49:10 +00:00
|
|
|
c.stateLock.RUnlock()
|
2018-05-31 00:30:50 +00:00
|
|
|
return false, "", "", err
|
|
|
|
}
|
|
|
|
if !held {
|
2018-06-14 13:49:10 +00:00
|
|
|
c.stateLock.RUnlock()
|
2018-05-31 00:30:50 +00:00
|
|
|
return false, "", "", nil
|
|
|
|
}
|
|
|
|
|
2019-02-06 02:01:18 +00:00
|
|
|
var localLeaderUUID, localRedirectAddr, localClusterAddr string
|
|
|
|
clusterLeaderParams := c.clusterLeaderParams.Load().(*ClusterLeaderParams)
|
|
|
|
if clusterLeaderParams != nil {
|
|
|
|
localLeaderUUID = clusterLeaderParams.LeaderUUID
|
|
|
|
localRedirectAddr = clusterLeaderParams.LeaderRedirectAddr
|
|
|
|
localClusterAddr = clusterLeaderParams.LeaderClusterAddr
|
|
|
|
}
|
2018-05-31 00:30:50 +00:00
|
|
|
|
|
|
|
// If the leader hasn't changed, return the cached value; nothing changes
|
|
|
|
// mid-leadership, and the barrier caches anyways
|
2019-02-06 02:01:18 +00:00
|
|
|
if leaderUUID == localLeaderUUID && localRedirectAddr != "" {
|
2018-06-14 13:49:10 +00:00
|
|
|
c.stateLock.RUnlock()
|
2019-02-06 02:01:18 +00:00
|
|
|
return false, localRedirectAddr, localClusterAddr, nil
|
2018-05-31 00:30:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
c.logger.Trace("found new active node information, refreshing")
|
|
|
|
|
2018-06-14 13:49:10 +00:00
|
|
|
defer c.stateLock.RUnlock()
|
2019-02-06 02:01:18 +00:00
|
|
|
c.leaderParamsLock.Lock()
|
|
|
|
defer c.leaderParamsLock.Unlock()
|
2018-05-31 00:30:50 +00:00
|
|
|
|
|
|
|
// Validate base conditions again
|
2019-02-06 02:01:18 +00:00
|
|
|
clusterLeaderParams = c.clusterLeaderParams.Load().(*ClusterLeaderParams)
|
|
|
|
if clusterLeaderParams != nil {
|
|
|
|
localLeaderUUID = clusterLeaderParams.LeaderUUID
|
|
|
|
localRedirectAddr = clusterLeaderParams.LeaderRedirectAddr
|
|
|
|
localClusterAddr = clusterLeaderParams.LeaderClusterAddr
|
|
|
|
} else {
|
|
|
|
localLeaderUUID = ""
|
|
|
|
localRedirectAddr = ""
|
|
|
|
localClusterAddr = ""
|
|
|
|
}
|
|
|
|
|
|
|
|
if leaderUUID == localLeaderUUID && localRedirectAddr != "" {
|
|
|
|
return false, localRedirectAddr, localClusterAddr, nil
|
2018-05-31 00:30:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
key := coreLeaderPrefix + leaderUUID
|
|
|
|
// Use background because postUnseal isn't run on standby
|
|
|
|
entry, err := c.barrier.Get(context.Background(), key)
|
|
|
|
if err != nil {
|
|
|
|
return false, "", "", err
|
|
|
|
}
|
|
|
|
if entry == nil {
|
|
|
|
return false, "", "", nil
|
|
|
|
}
|
|
|
|
|
|
|
|
var oldAdv bool
|
|
|
|
|
|
|
|
var adv activeAdvertisement
|
|
|
|
err = jsonutil.DecodeJSON(entry.Value, &adv)
|
|
|
|
if err != nil {
|
|
|
|
// Fall back to pre-struct handling
|
|
|
|
adv.RedirectAddr = string(entry.Value)
|
|
|
|
c.logger.Debug("parsed redirect addr for new active node", "redirect_addr", adv.RedirectAddr)
|
|
|
|
oldAdv = true
|
|
|
|
}
|
|
|
|
|
2022-05-18 18:50:18 +00:00
|
|
|
// At the top of this function we return early when we're the active node.
|
|
|
|
// If we're not the active node, and there's a stale advertisement pointing
|
|
|
|
// to ourself, there's no point in paying any attention to it. And by
|
|
|
|
// disregarding it, we can avoid a panic in raft tests using the Inmem network
|
|
|
|
// layer when we try to connect back to ourself.
|
2023-03-24 14:15:25 +00:00
|
|
|
if adv.ClusterAddr == c.ClusterAddr() && adv.RedirectAddr == c.redirectAddr && c.getRaftBackend() != nil {
|
2022-05-18 18:50:18 +00:00
|
|
|
return false, "", "", nil
|
|
|
|
}
|
|
|
|
|
2018-05-31 00:30:50 +00:00
|
|
|
if !oldAdv {
|
|
|
|
c.logger.Debug("parsing information for new active node", "active_cluster_addr", adv.ClusterAddr, "active_redirect_addr", adv.RedirectAddr)
|
|
|
|
|
|
|
|
// Ensure we are using current values
|
|
|
|
err = c.loadLocalClusterTLS(adv)
|
|
|
|
if err != nil {
|
|
|
|
return false, "", "", err
|
|
|
|
}
|
|
|
|
|
|
|
|
// This will ensure that we both have a connection at the ready and that
|
|
|
|
// the address is the current known value
|
|
|
|
// Since this is standby, we don't use the active context. Later we may
|
|
|
|
// use a process-scoped context
|
|
|
|
err = c.refreshRequestForwardingConnection(context.Background(), adv.ClusterAddr)
|
|
|
|
if err != nil {
|
|
|
|
return false, "", "", err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Don't set these until everything has been parsed successfully or we'll
|
|
|
|
// never try again
|
2019-02-06 02:01:18 +00:00
|
|
|
c.clusterLeaderParams.Store(&ClusterLeaderParams{
|
|
|
|
LeaderUUID: leaderUUID,
|
|
|
|
LeaderRedirectAddr: adv.RedirectAddr,
|
|
|
|
LeaderClusterAddr: adv.ClusterAddr,
|
|
|
|
})
|
2018-05-31 00:30:50 +00:00
|
|
|
|
|
|
|
return false, adv.RedirectAddr, adv.ClusterAddr, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// StepDown is used to step down from leadership
|
2018-07-24 21:50:49 +00:00
|
|
|
func (c *Core) StepDown(httpCtx context.Context, req *logical.Request) (retErr error) {
|
2018-05-31 00:30:50 +00:00
|
|
|
defer metrics.MeasureSince([]string{"core", "step_down"}, time.Now())
|
|
|
|
|
|
|
|
if req == nil {
|
2021-06-11 17:18:16 +00:00
|
|
|
return errors.New("nil request to step-down")
|
2018-05-31 00:30:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
c.stateLock.RLock()
|
|
|
|
defer c.stateLock.RUnlock()
|
2019-06-19 13:40:57 +00:00
|
|
|
|
2018-07-24 20:57:25 +00:00
|
|
|
if c.Sealed() {
|
2018-05-31 00:30:50 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
if c.ha == nil || c.standby {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-09-06 16:03:26 +00:00
|
|
|
ctx, cancel := context.WithCancel(namespace.RootContext(nil))
|
2018-07-24 21:50:49 +00:00
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
case <-httpCtx.Done():
|
|
|
|
cancel()
|
|
|
|
}
|
|
|
|
}()
|
2018-05-31 00:30:50 +00:00
|
|
|
|
2021-06-11 17:18:16 +00:00
|
|
|
err := c.PopulateTokenEntry(ctx, req)
|
|
|
|
if err != nil {
|
|
|
|
if errwrap.Contains(err, logical.ErrPermissionDenied.Error()) {
|
|
|
|
return logical.ErrPermissionDenied
|
|
|
|
}
|
|
|
|
return logical.ErrInvalidRequest
|
|
|
|
}
|
2018-09-18 03:03:00 +00:00
|
|
|
acl, te, entity, identityPolicies, err := c.fetchACLTokenEntryAndEntity(ctx, req)
|
2018-05-31 00:30:50 +00:00
|
|
|
if err != nil {
|
2021-06-11 17:18:16 +00:00
|
|
|
return err
|
2018-05-31 00:30:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Audit-log the request before going any further
|
|
|
|
auth := &logical.Auth{
|
2018-09-18 03:03:00 +00:00
|
|
|
ClientToken: req.ClientToken,
|
2018-10-15 16:56:24 +00:00
|
|
|
Accessor: req.ClientTokenAccessor,
|
2018-05-31 00:30:50 +00:00
|
|
|
}
|
|
|
|
if te != nil {
|
2018-09-18 03:03:00 +00:00
|
|
|
auth.IdentityPolicies = identityPolicies[te.NamespaceID]
|
|
|
|
delete(identityPolicies, te.NamespaceID)
|
|
|
|
auth.ExternalNamespacePolicies = identityPolicies
|
2018-06-14 13:49:33 +00:00
|
|
|
auth.TokenPolicies = te.Policies
|
2018-09-18 03:03:00 +00:00
|
|
|
auth.Policies = append(te.Policies, identityPolicies[te.NamespaceID]...)
|
2018-05-31 00:30:50 +00:00
|
|
|
auth.Metadata = te.Meta
|
|
|
|
auth.DisplayName = te.DisplayName
|
|
|
|
auth.EntityID = te.EntityID
|
2018-10-15 16:56:24 +00:00
|
|
|
auth.TokenType = te.Type
|
2018-05-31 00:30:50 +00:00
|
|
|
}
|
|
|
|
|
2019-05-22 22:52:53 +00:00
|
|
|
logInput := &logical.LogInput{
|
2018-05-31 00:30:50 +00:00
|
|
|
Auth: auth,
|
|
|
|
Request: req,
|
|
|
|
}
|
|
|
|
if err := c.auditBroker.LogRequest(ctx, logInput, c.auditedHeaders); err != nil {
|
|
|
|
c.logger.Error("failed to audit request", "request_path", req.Path, "error", err)
|
2021-06-11 17:18:16 +00:00
|
|
|
return errors.New("failed to audit request, cannot continue")
|
2018-05-31 00:30:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if entity != nil && entity.Disabled {
|
2018-06-19 16:57:19 +00:00
|
|
|
c.logger.Warn("permission denied as the entity on the token is disabled")
|
2021-06-11 17:18:16 +00:00
|
|
|
return logical.ErrPermissionDenied
|
2018-06-19 16:57:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if te != nil && te.EntityID != "" && entity == nil {
|
|
|
|
c.logger.Warn("permission denied as the entity on the token is invalid")
|
2021-06-11 17:18:16 +00:00
|
|
|
return logical.ErrPermissionDenied
|
2018-05-31 00:30:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Attempt to use the token (decrement num_uses)
|
|
|
|
if te != nil {
|
|
|
|
te, err = c.tokenStore.UseToken(ctx, te)
|
|
|
|
if err != nil {
|
|
|
|
c.logger.Error("failed to use token", "error", err)
|
2021-06-11 17:18:16 +00:00
|
|
|
return ErrInternalError
|
2018-05-31 00:30:50 +00:00
|
|
|
}
|
|
|
|
if te == nil {
|
|
|
|
// Token has been revoked
|
2021-06-11 17:18:16 +00:00
|
|
|
return logical.ErrPermissionDenied
|
2018-05-31 00:30:50 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Verify that this operation is allowed
|
|
|
|
authResults := c.performPolicyChecks(ctx, acl, te, req, entity, &PolicyCheckOpts{
|
|
|
|
RootPrivsRequired: true,
|
|
|
|
})
|
|
|
|
if !authResults.Allowed {
|
2018-08-11 02:32:10 +00:00
|
|
|
retErr = multierror.Append(retErr, authResults.Error)
|
2018-08-11 01:05:10 +00:00
|
|
|
if authResults.Error.ErrorOrNil() == nil || authResults.DeniedError {
|
2018-08-11 02:32:10 +00:00
|
|
|
retErr = multierror.Append(retErr, logical.ErrPermissionDenied)
|
2018-08-10 13:59:58 +00:00
|
|
|
}
|
2018-08-11 02:32:10 +00:00
|
|
|
return retErr
|
2018-05-31 00:30:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if te != nil && te.NumUses == tokenRevocationPending {
|
|
|
|
// Token needs to be revoked. We do this immediately here because
|
|
|
|
// we won't have a token store after sealing.
|
2018-08-02 01:39:39 +00:00
|
|
|
leaseID, err := c.expiration.CreateOrFetchRevocationLeaseByToken(c.activeContext, te)
|
2018-05-31 00:30:50 +00:00
|
|
|
if err == nil {
|
2018-08-02 01:39:39 +00:00
|
|
|
err = c.expiration.Revoke(c.activeContext, leaseID)
|
2018-05-31 00:30:50 +00:00
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
c.logger.Error("token needed revocation before step-down but failed to revoke", "error", err)
|
|
|
|
retErr = multierror.Append(retErr, ErrInternalError)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case c.manualStepDownCh <- struct{}{}:
|
|
|
|
default:
|
|
|
|
c.logger.Warn("manual step-down operation already queued")
|
|
|
|
}
|
|
|
|
|
|
|
|
return retErr
|
|
|
|
}
|
|
|
|
|
2018-06-29 23:23:52 +00:00
|
|
|
// runStandby is a long running process that manages a number of the HA
|
|
|
|
// subsystems.
|
2018-05-31 00:30:50 +00:00
|
|
|
func (c *Core) runStandby(doneCh, manualStepDownCh, stopCh chan struct{}) {
|
|
|
|
defer close(doneCh)
|
|
|
|
defer close(manualStepDownCh)
|
|
|
|
c.logger.Info("entering standby mode")
|
|
|
|
|
2018-06-29 23:23:52 +00:00
|
|
|
var g run.Group
|
2018-08-25 21:41:55 +00:00
|
|
|
newLeaderCh := addEnterpriseHaActors(c, &g)
|
2018-06-29 23:23:52 +00:00
|
|
|
{
|
|
|
|
// This will cause all the other actors to close when the stop channel
|
|
|
|
// is closed.
|
|
|
|
g.Add(func() error {
|
|
|
|
<-stopCh
|
|
|
|
return nil
|
|
|
|
}, func(error) {})
|
|
|
|
}
|
|
|
|
{
|
2019-06-20 19:14:58 +00:00
|
|
|
// Monitor for key rotations
|
2018-06-29 23:23:52 +00:00
|
|
|
keyRotateStop := make(chan struct{})
|
|
|
|
|
|
|
|
g.Add(func() error {
|
2019-06-20 19:14:58 +00:00
|
|
|
c.periodicCheckKeyUpgrades(context.Background(), keyRotateStop)
|
2018-06-29 23:23:52 +00:00
|
|
|
return nil
|
|
|
|
}, func(error) {
|
|
|
|
close(keyRotateStop)
|
|
|
|
c.logger.Debug("shutting down periodic key rotation checker")
|
|
|
|
})
|
|
|
|
}
|
|
|
|
{
|
|
|
|
// Monitor for new leadership
|
|
|
|
checkLeaderStop := make(chan struct{})
|
|
|
|
|
|
|
|
g.Add(func() error {
|
2018-08-25 21:41:55 +00:00
|
|
|
c.periodicLeaderRefresh(newLeaderCh, checkLeaderStop)
|
2018-06-29 23:23:52 +00:00
|
|
|
return nil
|
|
|
|
}, func(error) {
|
|
|
|
close(checkLeaderStop)
|
|
|
|
c.logger.Debug("shutting down periodic leader refresh")
|
|
|
|
})
|
|
|
|
}
|
2022-10-07 16:09:08 +00:00
|
|
|
{
|
|
|
|
metricsStop := make(chan struct{})
|
|
|
|
|
|
|
|
g.Add(func() error {
|
|
|
|
c.metricsLoop(metricsStop)
|
|
|
|
return nil
|
|
|
|
}, func(error) {
|
|
|
|
close(metricsStop)
|
|
|
|
c.logger.Debug("shutting down periodic metrics")
|
|
|
|
})
|
|
|
|
}
|
2018-06-29 23:23:52 +00:00
|
|
|
{
|
|
|
|
// Wait for leadership
|
|
|
|
leaderStopCh := make(chan struct{})
|
|
|
|
|
|
|
|
g.Add(func() error {
|
2018-08-25 21:41:55 +00:00
|
|
|
c.waitForLeadership(newLeaderCh, manualStepDownCh, leaderStopCh)
|
2018-06-29 23:23:52 +00:00
|
|
|
return nil
|
|
|
|
}, func(error) {
|
|
|
|
close(leaderStopCh)
|
|
|
|
c.logger.Debug("shutting down leader elections")
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// Start all the actors
|
|
|
|
g.Run()
|
|
|
|
}
|
|
|
|
|
|
|
|
// waitForLeadership is a long running routine that is used when an HA backend
|
|
|
|
// is enabled. It waits until we are leader and switches this Vault to
|
|
|
|
// active.
|
2018-08-25 21:41:55 +00:00
|
|
|
func (c *Core) waitForLeadership(newLeaderCh chan func(), manualStepDownCh, stopCh chan struct{}) {
|
2018-05-31 00:30:50 +00:00
|
|
|
var manualStepDown bool
|
2021-04-08 16:43:39 +00:00
|
|
|
firstIteration := true
|
2018-05-31 00:30:50 +00:00
|
|
|
for {
|
|
|
|
// Check for a shutdown
|
|
|
|
select {
|
|
|
|
case <-stopCh:
|
|
|
|
c.logger.Debug("stop channel triggered in runStandby")
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
// If we've just down, we could instantly grab the lock again. Give
|
|
|
|
// the other nodes a chance.
|
|
|
|
if manualStepDown {
|
|
|
|
time.Sleep(manualStepDownSleepPeriod)
|
|
|
|
manualStepDown = false
|
2020-02-15 02:28:37 +00:00
|
|
|
} else if !firstIteration {
|
|
|
|
// If we restarted the for loop due to an error, wait a second
|
|
|
|
// so that we don't busy loop if the error persists.
|
|
|
|
time.Sleep(1 * time.Second)
|
2018-05-31 00:30:50 +00:00
|
|
|
}
|
|
|
|
}
|
2020-02-15 02:28:37 +00:00
|
|
|
firstIteration = false
|
2018-05-31 00:30:50 +00:00
|
|
|
|
|
|
|
// Create a lock
|
|
|
|
uuid, err := uuid.GenerateUUID()
|
|
|
|
if err != nil {
|
|
|
|
c.logger.Error("failed to generate uuid", "error", err)
|
2020-02-15 02:28:37 +00:00
|
|
|
continue
|
2018-05-31 00:30:50 +00:00
|
|
|
}
|
2018-10-12 16:29:15 +00:00
|
|
|
lock, err := c.ha.LockWith(CoreLockPath, uuid)
|
2018-05-31 00:30:50 +00:00
|
|
|
if err != nil {
|
|
|
|
c.logger.Error("failed to create lock", "error", err)
|
2020-02-15 02:28:37 +00:00
|
|
|
continue
|
2018-05-31 00:30:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Attempt the acquisition
|
|
|
|
leaderLostCh := c.acquireLock(lock, stopCh)
|
|
|
|
|
|
|
|
// Bail if we are being shutdown
|
|
|
|
if leaderLostCh == nil {
|
|
|
|
return
|
|
|
|
}
|
2019-02-06 02:01:18 +00:00
|
|
|
|
|
|
|
if atomic.LoadUint32(c.neverBecomeActive) == 1 {
|
|
|
|
c.heldHALock = nil
|
|
|
|
lock.Unlock()
|
|
|
|
c.logger.Info("marked never become active, giving up active state")
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2018-05-31 00:30:50 +00:00
|
|
|
c.logger.Info("acquired lock, enabling active operation")
|
|
|
|
|
|
|
|
// This is used later to log a metrics event; this can be helpful to
|
|
|
|
// detect flapping
|
|
|
|
activeTime := time.Now()
|
|
|
|
|
2018-08-25 21:41:55 +00:00
|
|
|
continueCh := interruptPerfStandby(newLeaderCh, stopCh)
|
2019-02-06 02:01:18 +00:00
|
|
|
|
2018-08-25 21:41:55 +00:00
|
|
|
// Grab the statelock or stop
|
2022-09-20 15:03:16 +00:00
|
|
|
l := newLockGrabber(c.stateLock.Lock, c.stateLock.Unlock, stopCh)
|
|
|
|
go l.grab()
|
|
|
|
if stopped := l.lockOrStop(); stopped {
|
2018-05-31 00:30:50 +00:00
|
|
|
lock.Unlock()
|
2018-08-25 21:41:55 +00:00
|
|
|
close(continueCh)
|
2018-05-31 00:30:50 +00:00
|
|
|
metrics.MeasureSince([]string{"core", "leadership_setup_failed"}, activeTime)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-07-24 20:57:25 +00:00
|
|
|
if c.Sealed() {
|
2018-05-31 00:30:50 +00:00
|
|
|
c.logger.Warn("grabbed HA lock but already sealed, exiting")
|
|
|
|
lock.Unlock()
|
2018-08-25 21:41:55 +00:00
|
|
|
close(continueCh)
|
2018-05-31 00:30:50 +00:00
|
|
|
c.stateLock.Unlock()
|
|
|
|
metrics.MeasureSince([]string{"core", "leadership_setup_failed"}, activeTime)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Store the lock so that we can manually clear it later if needed
|
|
|
|
c.heldHALock = lock
|
|
|
|
|
2018-08-01 19:07:37 +00:00
|
|
|
// Create the active context
|
2018-08-25 21:41:55 +00:00
|
|
|
activeCtx, activeCtxCancel := context.WithCancel(namespace.RootContext(nil))
|
2018-08-01 19:07:37 +00:00
|
|
|
c.activeContext = activeCtx
|
|
|
|
c.activeContextCancelFunc.Store(activeCtxCancel)
|
2018-05-31 00:30:50 +00:00
|
|
|
|
2020-02-13 21:27:31 +00:00
|
|
|
// Perform seal migration
|
|
|
|
if err := c.migrateSeal(c.activeContext); err != nil {
|
|
|
|
c.logger.Error("seal migration error", "error", err)
|
|
|
|
c.barrier.Seal()
|
|
|
|
c.logger.Warn("vault is sealed")
|
|
|
|
c.heldHALock = nil
|
|
|
|
lock.Unlock()
|
|
|
|
close(continueCh)
|
|
|
|
c.stateLock.Unlock()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-05-31 00:30:50 +00:00
|
|
|
// This block is used to wipe barrier/seal state and verify that
|
|
|
|
// everything is sane. If we have no sanity in the barrier, we actually
|
|
|
|
// seal, as there's little we can do.
|
|
|
|
{
|
2018-08-01 19:07:37 +00:00
|
|
|
c.seal.SetBarrierConfig(activeCtx, nil)
|
2018-05-31 00:30:50 +00:00
|
|
|
if c.seal.RecoveryKeySupported() {
|
2018-08-01 19:07:37 +00:00
|
|
|
c.seal.SetRecoveryConfig(activeCtx, nil)
|
2018-05-31 00:30:50 +00:00
|
|
|
}
|
|
|
|
|
2018-08-01 19:07:37 +00:00
|
|
|
if err := c.performKeyUpgrades(activeCtx); err != nil {
|
2018-05-31 00:30:50 +00:00
|
|
|
c.logger.Error("error performing key upgrades", "error", err)
|
2019-07-05 21:19:15 +00:00
|
|
|
|
|
|
|
// If we fail due to anything other than a context canceled
|
|
|
|
// error we should shutdown as we may have the incorrect Keys.
|
|
|
|
if !strings.Contains(err.Error(), context.Canceled.Error()) {
|
|
|
|
// We call this in a goroutine so that we can give up the
|
|
|
|
// statelock and have this shut us down; sealInternal has a
|
|
|
|
// workflow where it watches for the stopCh to close so we want
|
|
|
|
// to return from here
|
|
|
|
go c.Shutdown()
|
|
|
|
}
|
|
|
|
|
2018-05-31 00:30:50 +00:00
|
|
|
c.heldHALock = nil
|
|
|
|
lock.Unlock()
|
2018-08-25 21:41:55 +00:00
|
|
|
close(continueCh)
|
2018-05-31 00:30:50 +00:00
|
|
|
c.stateLock.Unlock()
|
|
|
|
metrics.MeasureSince([]string{"core", "leadership_setup_failed"}, activeTime)
|
2019-07-05 21:19:15 +00:00
|
|
|
|
|
|
|
// If we are shutting down we should return from this function,
|
|
|
|
// otherwise continue
|
|
|
|
if !strings.Contains(err.Error(), context.Canceled.Error()) {
|
|
|
|
continue
|
|
|
|
} else {
|
|
|
|
return
|
|
|
|
}
|
2018-05-31 00:30:50 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-01 19:07:37 +00:00
|
|
|
{
|
|
|
|
// Clear previous local cluster cert info so we generate new. Since the
|
|
|
|
// UUID will have changed, standbys will know to look for new info
|
|
|
|
c.localClusterParsedCert.Store((*x509.Certificate)(nil))
|
|
|
|
c.localClusterCert.Store(([]byte)(nil))
|
|
|
|
c.localClusterPrivateKey.Store((*ecdsa.PrivateKey)(nil))
|
2018-05-31 00:30:50 +00:00
|
|
|
|
2018-08-01 19:07:37 +00:00
|
|
|
if err := c.setupCluster(activeCtx); err != nil {
|
|
|
|
c.heldHALock = nil
|
|
|
|
lock.Unlock()
|
2018-08-25 21:41:55 +00:00
|
|
|
close(continueCh)
|
2018-08-01 19:07:37 +00:00
|
|
|
c.stateLock.Unlock()
|
|
|
|
c.logger.Error("cluster setup failed", "error", err)
|
|
|
|
metrics.MeasureSince([]string{"core", "leadership_setup_failed"}, activeTime)
|
|
|
|
continue
|
|
|
|
}
|
2018-09-18 03:03:00 +00:00
|
|
|
|
2018-05-31 00:30:50 +00:00
|
|
|
}
|
|
|
|
// Advertise as leader
|
2018-08-01 19:07:37 +00:00
|
|
|
if err := c.advertiseLeader(activeCtx, uuid, leaderLostCh); err != nil {
|
2018-05-31 00:30:50 +00:00
|
|
|
c.heldHALock = nil
|
|
|
|
lock.Unlock()
|
2018-08-25 21:41:55 +00:00
|
|
|
close(continueCh)
|
2018-05-31 00:30:50 +00:00
|
|
|
c.stateLock.Unlock()
|
|
|
|
c.logger.Error("leader advertisement setup failed", "error", err)
|
|
|
|
metrics.MeasureSince([]string{"core", "leadership_setup_failed"}, activeTime)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Attempt the post-unseal process
|
2018-09-18 03:03:00 +00:00
|
|
|
err = c.postUnseal(activeCtx, activeCtxCancel, standardUnsealStrategy{})
|
2018-05-31 00:30:50 +00:00
|
|
|
if err == nil {
|
|
|
|
c.standby = false
|
2019-06-20 19:14:58 +00:00
|
|
|
c.leaderUUID = uuid
|
2020-10-29 17:30:45 +00:00
|
|
|
c.metricSink.SetGaugeWithLabels([]string{"core", "active"}, 1, nil)
|
2018-05-31 00:30:50 +00:00
|
|
|
}
|
|
|
|
|
2018-08-25 21:41:55 +00:00
|
|
|
close(continueCh)
|
2018-05-31 00:30:50 +00:00
|
|
|
c.stateLock.Unlock()
|
|
|
|
|
|
|
|
// Handle a failure to unseal
|
|
|
|
if err != nil {
|
|
|
|
c.logger.Error("post-unseal setup failed", "error", err)
|
|
|
|
lock.Unlock()
|
|
|
|
metrics.MeasureSince([]string{"core", "leadership_setup_failed"}, activeTime)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2018-08-27 17:01:33 +00:00
|
|
|
// Monitor a loss of leadership
|
|
|
|
select {
|
|
|
|
case <-leaderLostCh:
|
|
|
|
c.logger.Warn("leadership lost, stopping active operation")
|
|
|
|
case <-stopCh:
|
|
|
|
case <-manualStepDownCh:
|
|
|
|
manualStepDown = true
|
|
|
|
c.logger.Warn("stepping down from active operation to standby")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Stop Active Duty
|
|
|
|
{
|
|
|
|
// Spawn this in a go routine so we can cancel the context and
|
|
|
|
// unblock any inflight requests that are holding the statelock.
|
2018-08-01 19:07:37 +00:00
|
|
|
go func() {
|
2023-02-06 16:49:01 +00:00
|
|
|
timer := time.NewTimer(DefaultMaxRequestDuration)
|
2018-08-01 19:07:37 +00:00
|
|
|
select {
|
|
|
|
case <-activeCtx.Done():
|
2023-02-06 16:49:01 +00:00
|
|
|
timer.Stop()
|
|
|
|
// Attempt to drain any inflight requests
|
|
|
|
case <-timer.C:
|
2018-08-01 19:07:37 +00:00
|
|
|
activeCtxCancel()
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2018-08-27 17:01:33 +00:00
|
|
|
// Grab lock if we are not stopped
|
2022-09-20 15:03:16 +00:00
|
|
|
l := newLockGrabber(c.stateLock.Lock, c.stateLock.Unlock, stopCh)
|
|
|
|
go l.grab()
|
|
|
|
stopped := l.lockOrStop()
|
2018-08-27 17:01:33 +00:00
|
|
|
|
|
|
|
// Cancel the context incase the above go routine hasn't done it
|
|
|
|
// yet
|
|
|
|
activeCtxCancel()
|
2018-07-24 20:57:25 +00:00
|
|
|
metrics.MeasureSince([]string{"core", "leadership_lost"}, activeTime)
|
2018-05-31 00:30:50 +00:00
|
|
|
|
2018-08-27 17:01:33 +00:00
|
|
|
// Mark as standby
|
2018-07-24 20:57:25 +00:00
|
|
|
c.standby = true
|
2019-06-20 19:14:58 +00:00
|
|
|
c.leaderUUID = ""
|
2020-10-29 17:30:45 +00:00
|
|
|
c.metricSink.SetGaugeWithLabels([]string{"core", "active"}, 0, nil)
|
2018-05-31 00:30:50 +00:00
|
|
|
|
2018-08-27 17:01:33 +00:00
|
|
|
// Seal
|
2018-07-24 20:57:25 +00:00
|
|
|
if err := c.preSeal(); err != nil {
|
|
|
|
c.logger.Error("pre-seal teardown failed", "error", err)
|
|
|
|
}
|
2018-05-31 00:30:50 +00:00
|
|
|
|
2018-08-27 17:01:33 +00:00
|
|
|
// If we are not meant to keep the HA lock, clear it
|
|
|
|
if atomic.LoadUint32(c.keepHALockOnStepDown) == 0 {
|
|
|
|
if err := c.clearLeader(uuid); err != nil {
|
|
|
|
c.logger.Error("clearing leader advertisement failed", "error", err)
|
|
|
|
}
|
2018-07-24 20:57:25 +00:00
|
|
|
|
2019-05-01 00:44:47 +00:00
|
|
|
if err := c.heldHALock.Unlock(); err != nil {
|
|
|
|
c.logger.Error("unlocking HA lock failed", "error", err)
|
|
|
|
}
|
2018-08-27 17:01:33 +00:00
|
|
|
c.heldHALock = nil
|
|
|
|
}
|
2018-07-24 20:57:25 +00:00
|
|
|
|
2018-08-27 17:01:33 +00:00
|
|
|
// If we are stopped return, otherwise unlock the statelock
|
|
|
|
if stopped {
|
|
|
|
return
|
|
|
|
}
|
2018-07-24 20:57:25 +00:00
|
|
|
c.stateLock.Unlock()
|
2018-05-31 00:30:50 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-10 11:50:11 +00:00
|
|
|
// grabLockOrStop returns stopped=false if the lock is acquired. Returns
|
|
|
|
// stopped=true if the lock is not acquired, because stopCh was closed. If the
|
2020-12-12 00:04:00 +00:00
|
|
|
// lock was acquired (stopped=false) then it's up to the caller to unlock. If
|
|
|
|
// the lock was not acquired (stopped=true), the caller does not hold the lock and
|
|
|
|
// should not call unlock.
|
2022-09-20 15:03:16 +00:00
|
|
|
// It's probably better to inline the body of grabLockOrStop into your function
|
|
|
|
// instead of calling it. If multiple functions call grabLockOrStop, when a deadlock
|
|
|
|
// occurs, we have no way of knowing who launched the grab goroutine, complicating
|
|
|
|
// investigation.
|
2018-08-25 21:41:55 +00:00
|
|
|
func grabLockOrStop(lockFunc, unlockFunc func(), stopCh chan struct{}) (stopped bool) {
|
2022-09-20 15:03:16 +00:00
|
|
|
l := newLockGrabber(lockFunc, unlockFunc, stopCh)
|
|
|
|
go l.grab()
|
|
|
|
return l.lockOrStop()
|
|
|
|
}
|
2020-12-10 11:50:11 +00:00
|
|
|
|
2022-09-20 15:03:16 +00:00
|
|
|
type lockGrabber struct {
|
|
|
|
// stopCh provides a way to interrupt the grab-or-stop
|
|
|
|
stopCh chan struct{}
|
2020-12-10 11:50:11 +00:00
|
|
|
// doneCh is closed when the child goroutine is done.
|
2022-09-20 15:03:16 +00:00
|
|
|
doneCh chan struct{}
|
|
|
|
lockFunc func()
|
|
|
|
unlockFunc func()
|
|
|
|
// lock protects these variables which are shared by parent and child.
|
|
|
|
lock sync.Mutex
|
|
|
|
parentWaiting bool
|
|
|
|
locked bool
|
|
|
|
}
|
2018-08-25 21:41:55 +00:00
|
|
|
|
2022-09-20 15:03:16 +00:00
|
|
|
func newLockGrabber(lockFunc, unlockFunc func(), stopCh chan struct{}) *lockGrabber {
|
|
|
|
return &lockGrabber{
|
|
|
|
doneCh: make(chan struct{}),
|
|
|
|
lockFunc: lockFunc,
|
|
|
|
unlockFunc: unlockFunc,
|
|
|
|
parentWaiting: true,
|
|
|
|
stopCh: stopCh,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// lockOrStop waits for grab to get a lock or give up, see grabLockOrStop for how to use it.
|
|
|
|
func (l *lockGrabber) lockOrStop() (stopped bool) {
|
2020-12-10 11:50:11 +00:00
|
|
|
stop := false
|
2018-08-25 21:41:55 +00:00
|
|
|
select {
|
2022-09-20 15:03:16 +00:00
|
|
|
case <-l.stopCh:
|
2020-12-10 11:50:11 +00:00
|
|
|
stop = true
|
2022-09-20 15:03:16 +00:00
|
|
|
case <-l.doneCh:
|
2018-08-25 21:41:55 +00:00
|
|
|
}
|
|
|
|
|
2020-12-10 11:50:11 +00:00
|
|
|
// The child goroutine may not have acquired the lock yet.
|
2022-09-20 15:03:16 +00:00
|
|
|
l.lock.Lock()
|
|
|
|
defer l.lock.Unlock()
|
|
|
|
l.parentWaiting = false
|
2020-12-10 11:50:11 +00:00
|
|
|
if stop {
|
2022-09-20 15:03:16 +00:00
|
|
|
if l.locked {
|
|
|
|
l.unlockFunc()
|
2020-12-10 11:50:11 +00:00
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
2018-08-25 21:41:55 +00:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2022-09-20 15:03:16 +00:00
|
|
|
// grab tries to get a lock, see grabLockOrStop for how to use it.
|
|
|
|
func (l *lockGrabber) grab() {
|
|
|
|
defer close(l.doneCh)
|
|
|
|
l.lockFunc()
|
|
|
|
|
|
|
|
// The parent goroutine may or may not be waiting.
|
|
|
|
l.lock.Lock()
|
|
|
|
defer l.lock.Unlock()
|
|
|
|
if !l.parentWaiting {
|
|
|
|
l.unlockFunc()
|
|
|
|
} else {
|
|
|
|
l.locked = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-31 00:30:50 +00:00
|
|
|
// This checks the leader periodically to ensure that we switch RPC to a new
|
|
|
|
// leader pretty quickly. There is logic in Leader() already to not make this
|
|
|
|
// onerous and avoid more traffic than needed, so we just call that and ignore
|
|
|
|
// the result.
|
2018-08-25 21:41:55 +00:00
|
|
|
func (c *Core) periodicLeaderRefresh(newLeaderCh chan func(), stopCh chan struct{}) {
|
2018-06-09 19:35:22 +00:00
|
|
|
opCount := new(int32)
|
2018-08-25 21:41:55 +00:00
|
|
|
|
|
|
|
clusterAddr := ""
|
2018-05-31 00:30:50 +00:00
|
|
|
for {
|
2023-02-06 16:49:01 +00:00
|
|
|
timer := time.NewTimer(leaderCheckInterval)
|
2018-05-31 00:30:50 +00:00
|
|
|
select {
|
2023-02-06 16:49:01 +00:00
|
|
|
case <-timer.C:
|
2018-06-09 19:35:22 +00:00
|
|
|
count := atomic.AddInt32(opCount, 1)
|
2018-05-31 00:30:50 +00:00
|
|
|
if count > 1 {
|
2018-06-09 19:35:22 +00:00
|
|
|
atomic.AddInt32(opCount, -1)
|
2018-05-31 00:30:50 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
// We do this in a goroutine because otherwise if this refresh is
|
|
|
|
// called while we're shutting down the call to Leader() can
|
|
|
|
// deadlock, which then means stopCh can never been seen and we can
|
|
|
|
// block shutdown
|
|
|
|
go func() {
|
2018-07-13 14:33:42 +00:00
|
|
|
// Bind locally, as the race detector is tripping here
|
|
|
|
lopCount := opCount
|
2018-08-25 21:41:55 +00:00
|
|
|
isLeader, _, newClusterAddr, _ := c.Leader()
|
|
|
|
|
2019-04-10 17:09:36 +00:00
|
|
|
// If we are the leader reset the clusterAddr since the next
|
|
|
|
// failover might go to the node that was previously active.
|
|
|
|
if isLeader {
|
|
|
|
clusterAddr = ""
|
|
|
|
}
|
|
|
|
|
2018-08-25 21:41:55 +00:00
|
|
|
if !isLeader && newClusterAddr != clusterAddr && newLeaderCh != nil {
|
|
|
|
select {
|
|
|
|
case newLeaderCh <- nil:
|
|
|
|
c.logger.Debug("new leader found, triggering new leader channel")
|
|
|
|
clusterAddr = newClusterAddr
|
|
|
|
default:
|
|
|
|
c.logger.Debug("new leader found, but still processing previous leader change")
|
|
|
|
}
|
|
|
|
}
|
2018-07-16 15:34:47 +00:00
|
|
|
atomic.AddInt32(lopCount, -1)
|
2018-05-31 00:30:50 +00:00
|
|
|
}()
|
|
|
|
case <-stopCh:
|
2023-02-06 16:49:01 +00:00
|
|
|
timer.Stop()
|
2018-05-31 00:30:50 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// periodicCheckKeyUpgrade is used to watch for key rotation events as a standby
|
2019-06-20 19:14:58 +00:00
|
|
|
func (c *Core) periodicCheckKeyUpgrades(ctx context.Context, stopCh chan struct{}) {
|
2020-06-23 19:04:13 +00:00
|
|
|
raftBackend := c.getRaftBackend()
|
|
|
|
isRaft := raftBackend != nil
|
|
|
|
|
2018-06-09 19:35:22 +00:00
|
|
|
opCount := new(int32)
|
2018-05-31 00:30:50 +00:00
|
|
|
for {
|
2023-02-06 16:49:01 +00:00
|
|
|
timer := time.NewTimer(keyRotateCheckInterval)
|
2018-05-31 00:30:50 +00:00
|
|
|
select {
|
2023-02-06 16:49:01 +00:00
|
|
|
case <-timer.C:
|
2018-06-09 19:35:22 +00:00
|
|
|
count := atomic.AddInt32(opCount, 1)
|
2018-05-31 00:30:50 +00:00
|
|
|
if count > 1 {
|
2018-06-09 19:35:22 +00:00
|
|
|
atomic.AddInt32(opCount, -1)
|
2018-05-31 00:30:50 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
go func() {
|
2018-07-13 14:33:42 +00:00
|
|
|
// Bind locally, as the race detector is tripping here
|
|
|
|
lopCount := opCount
|
|
|
|
|
2018-05-31 00:30:50 +00:00
|
|
|
// Only check if we are a standby
|
|
|
|
c.stateLock.RLock()
|
|
|
|
standby := c.standby
|
|
|
|
c.stateLock.RUnlock()
|
|
|
|
if !standby {
|
2018-07-16 15:34:47 +00:00
|
|
|
atomic.AddInt32(lopCount, -1)
|
2018-05-31 00:30:50 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check for a poison pill. If we can read it, it means we have stale
|
|
|
|
// keys (e.g. from replication being activated) and we need to seal to
|
|
|
|
// be unsealed again.
|
|
|
|
entry, _ := c.barrier.Get(ctx, poisonPillPath)
|
2022-06-15 00:53:19 +00:00
|
|
|
entryDR, _ := c.barrier.Get(ctx, poisonPillDRPath)
|
|
|
|
if (entry != nil && len(entry.Value) > 0) || (entryDR != nil && len(entryDR.Value) > 0) {
|
2018-05-31 00:30:50 +00:00
|
|
|
c.logger.Warn("encryption keys have changed out from underneath us (possibly due to replication enabling), must be unsealed again")
|
2019-06-21 00:55:10 +00:00
|
|
|
// If we are using raft storage we do not want to shut down
|
|
|
|
// raft during replication secondary enablement. This will
|
|
|
|
// allow us to keep making progress on the raft log.
|
|
|
|
go c.sealInternalWithOptions(true, false, !isRaft)
|
2018-07-16 15:34:47 +00:00
|
|
|
atomic.AddInt32(lopCount, -1)
|
2018-05-31 00:30:50 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := c.checkKeyUpgrades(ctx); err != nil {
|
|
|
|
c.logger.Error("key rotation periodic upgrade check failed", "error", err)
|
|
|
|
}
|
2018-07-16 15:34:47 +00:00
|
|
|
|
2020-06-23 19:04:13 +00:00
|
|
|
if isRaft {
|
|
|
|
hasState, err := raftBackend.HasState()
|
|
|
|
if err != nil {
|
|
|
|
c.logger.Error("could not check raft state", "error", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if raftBackend.Initialized() && hasState {
|
|
|
|
if err := c.checkRaftTLSKeyUpgrades(ctx); err != nil {
|
|
|
|
c.logger.Error("raft tls periodic upgrade check failed", "error", err)
|
|
|
|
}
|
|
|
|
}
|
2019-06-20 19:14:58 +00:00
|
|
|
}
|
|
|
|
|
2018-07-16 15:34:47 +00:00
|
|
|
atomic.AddInt32(lopCount, -1)
|
|
|
|
return
|
2018-05-31 00:30:50 +00:00
|
|
|
}()
|
|
|
|
case <-stopCh:
|
2023-02-06 16:49:01 +00:00
|
|
|
timer.Stop()
|
2018-05-31 00:30:50 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// checkKeyUpgrades is used to check if there have been any key rotations
|
|
|
|
// and if there is a chain of upgrades available
|
|
|
|
func (c *Core) checkKeyUpgrades(ctx context.Context) error {
|
|
|
|
for {
|
|
|
|
// Check for an upgrade
|
|
|
|
didUpgrade, newTerm, err := c.barrier.CheckUpgrade(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Nothing to do if no upgrade
|
|
|
|
if !didUpgrade {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
if c.logger.IsInfo() {
|
|
|
|
c.logger.Info("upgraded to new key term", "term", newTerm)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-12-07 01:12:20 +00:00
|
|
|
func (c *Core) reloadRootKey(ctx context.Context) error {
|
|
|
|
if err := c.barrier.ReloadRootKey(ctx); err != nil {
|
|
|
|
return fmt.Errorf("error reloading root key: %w", err)
|
2019-06-20 19:14:58 +00:00
|
|
|
}
|
2019-10-18 18:46:00 +00:00
|
|
|
return nil
|
|
|
|
}
|
2019-06-20 19:14:58 +00:00
|
|
|
|
2019-10-18 18:46:00 +00:00
|
|
|
func (c *Core) reloadShamirKey(ctx context.Context) error {
|
|
|
|
_ = c.seal.SetBarrierConfig(ctx, nil)
|
|
|
|
if cfg, _ := c.seal.BarrierConfig(ctx); cfg == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
var shamirKey []byte
|
|
|
|
switch c.seal.StoredKeysSupported() {
|
2020-01-11 01:39:52 +00:00
|
|
|
case seal.StoredKeysSupportedGeneric:
|
2019-10-18 18:46:00 +00:00
|
|
|
return nil
|
2021-12-07 01:12:20 +00:00
|
|
|
case seal.StoredKeysSupportedShamirRoot:
|
2019-10-18 18:46:00 +00:00
|
|
|
entry, err := c.barrier.Get(ctx, shamirKekPath)
|
2019-06-20 19:14:58 +00:00
|
|
|
if err != nil {
|
2019-10-18 18:46:00 +00:00
|
|
|
return err
|
2019-06-20 19:14:58 +00:00
|
|
|
}
|
2019-10-18 18:46:00 +00:00
|
|
|
if entry == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
shamirKey = entry.Value
|
2020-01-11 01:39:52 +00:00
|
|
|
case seal.StoredKeysNotSupported:
|
2019-10-18 18:46:00 +00:00
|
|
|
keyring, err := c.barrier.Keyring()
|
2019-06-20 19:14:58 +00:00
|
|
|
if err != nil {
|
2021-05-11 17:12:54 +00:00
|
|
|
return fmt.Errorf("failed to update seal access: %w", err)
|
2019-06-20 19:14:58 +00:00
|
|
|
}
|
2021-12-07 01:12:20 +00:00
|
|
|
shamirKey = keyring.rootKey
|
2019-06-20 19:14:58 +00:00
|
|
|
}
|
2023-05-04 18:22:30 +00:00
|
|
|
shamirWrapper, err := c.seal.GetShamirWrapper()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return shamirWrapper.SetAesGcmKeyBytes(shamirKey)
|
2019-06-20 19:14:58 +00:00
|
|
|
}
|
|
|
|
|
2018-05-31 00:30:50 +00:00
|
|
|
func (c *Core) performKeyUpgrades(ctx context.Context) error {
|
|
|
|
if err := c.checkKeyUpgrades(ctx); err != nil {
|
2021-05-11 17:12:54 +00:00
|
|
|
return fmt.Errorf("error checking for key upgrades: %w", err)
|
2018-05-31 00:30:50 +00:00
|
|
|
}
|
|
|
|
|
2021-12-07 01:12:20 +00:00
|
|
|
if err := c.reloadRootKey(ctx); err != nil {
|
|
|
|
return fmt.Errorf("error reloading root key: %w", err)
|
2018-05-31 00:30:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if err := c.barrier.ReloadKeyring(ctx); err != nil {
|
2021-05-11 17:12:54 +00:00
|
|
|
return fmt.Errorf("error reloading keyring: %w", err)
|
2018-05-31 00:30:50 +00:00
|
|
|
}
|
|
|
|
|
2019-10-18 18:46:00 +00:00
|
|
|
if err := c.reloadShamirKey(ctx); err != nil {
|
2021-05-11 17:12:54 +00:00
|
|
|
return fmt.Errorf("error reloading shamir kek key: %w", err)
|
2019-10-18 18:46:00 +00:00
|
|
|
}
|
|
|
|
|
2018-05-31 00:30:50 +00:00
|
|
|
if err := c.scheduleUpgradeCleanup(ctx); err != nil {
|
2021-05-11 17:12:54 +00:00
|
|
|
return fmt.Errorf("error scheduling upgrade cleanup: %w", err)
|
2018-05-31 00:30:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// scheduleUpgradeCleanup is used to ensure that all the upgrade paths
|
|
|
|
// are cleaned up in a timely manner if a leader failover takes place
|
|
|
|
func (c *Core) scheduleUpgradeCleanup(ctx context.Context) error {
|
|
|
|
// List the upgrades
|
|
|
|
upgrades, err := c.barrier.List(ctx, keyringUpgradePrefix)
|
|
|
|
if err != nil {
|
2021-05-11 17:12:54 +00:00
|
|
|
return fmt.Errorf("failed to list upgrades: %w", err)
|
2018-05-31 00:30:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Nothing to do if no upgrades
|
|
|
|
if len(upgrades) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Schedule cleanup for all of them
|
2020-12-08 18:57:44 +00:00
|
|
|
time.AfterFunc(c.KeyRotateGracePeriod(), func() {
|
2018-05-31 00:30:50 +00:00
|
|
|
sealed, err := c.barrier.Sealed()
|
|
|
|
if err != nil {
|
|
|
|
c.logger.Warn("failed to check barrier status at upgrade cleanup time")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if sealed {
|
|
|
|
c.logger.Warn("barrier sealed at upgrade cleanup time")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
for _, upgrade := range upgrades {
|
|
|
|
path := fmt.Sprintf("%s%s", keyringUpgradePrefix, upgrade)
|
|
|
|
if err := c.barrier.Delete(ctx, path); err != nil {
|
|
|
|
c.logger.Error("failed to cleanup upgrade", "path", path, "error", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// acquireLock blocks until the lock is acquired, returning the leaderLostCh
|
|
|
|
func (c *Core) acquireLock(lock physical.Lock, stopCh <-chan struct{}) <-chan struct{} {
|
|
|
|
for {
|
|
|
|
// Attempt lock acquisition
|
|
|
|
leaderLostCh, err := lock.Lock(stopCh)
|
|
|
|
if err == nil {
|
|
|
|
return leaderLostCh
|
|
|
|
}
|
|
|
|
|
|
|
|
// Retry the acquisition
|
|
|
|
c.logger.Error("failed to acquire lock", "error", err)
|
2023-02-06 16:49:01 +00:00
|
|
|
timer := time.NewTimer(lockRetryInterval)
|
2018-05-31 00:30:50 +00:00
|
|
|
select {
|
2023-02-06 16:49:01 +00:00
|
|
|
case <-timer.C:
|
2018-05-31 00:30:50 +00:00
|
|
|
case <-stopCh:
|
2023-02-06 16:49:01 +00:00
|
|
|
timer.Stop()
|
2018-05-31 00:30:50 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// advertiseLeader is used to advertise the current node as leader
|
|
|
|
func (c *Core) advertiseLeader(ctx context.Context, uuid string, leaderLostCh <-chan struct{}) error {
|
2019-06-20 19:14:58 +00:00
|
|
|
if leaderLostCh != nil {
|
|
|
|
go c.cleanLeaderPrefix(ctx, uuid, leaderLostCh)
|
|
|
|
}
|
2018-05-31 00:30:50 +00:00
|
|
|
|
|
|
|
var key *ecdsa.PrivateKey
|
|
|
|
switch c.localClusterPrivateKey.Load().(type) {
|
|
|
|
case *ecdsa.PrivateKey:
|
|
|
|
key = c.localClusterPrivateKey.Load().(*ecdsa.PrivateKey)
|
|
|
|
default:
|
|
|
|
c.logger.Error("unknown cluster private key type", "key_type", fmt.Sprintf("%T", c.localClusterPrivateKey.Load()))
|
|
|
|
return fmt.Errorf("unknown cluster private key type %T", c.localClusterPrivateKey.Load())
|
|
|
|
}
|
|
|
|
|
2019-02-15 02:14:56 +00:00
|
|
|
keyParams := &certutil.ClusterKeyParams{
|
2018-05-31 00:30:50 +00:00
|
|
|
Type: corePrivateKeyTypeP521,
|
|
|
|
X: key.X,
|
|
|
|
Y: key.Y,
|
|
|
|
D: key.D,
|
|
|
|
}
|
|
|
|
|
|
|
|
locCert := c.localClusterCert.Load().([]byte)
|
|
|
|
localCert := make([]byte, len(locCert))
|
|
|
|
copy(localCert, locCert)
|
|
|
|
adv := &activeAdvertisement{
|
|
|
|
RedirectAddr: c.redirectAddr,
|
2019-06-27 17:00:03 +00:00
|
|
|
ClusterAddr: c.ClusterAddr(),
|
2018-05-31 00:30:50 +00:00
|
|
|
ClusterCert: localCert,
|
|
|
|
ClusterKeyParams: keyParams,
|
|
|
|
}
|
|
|
|
val, err := jsonutil.EncodeJSON(adv)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-01-31 14:25:18 +00:00
|
|
|
ent := &logical.StorageEntry{
|
2018-05-31 00:30:50 +00:00
|
|
|
Key: coreLeaderPrefix + uuid,
|
|
|
|
Value: val,
|
|
|
|
}
|
|
|
|
err = c.barrier.Put(ctx, ent)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-12-06 14:46:39 +00:00
|
|
|
if c.serviceRegistration != nil {
|
2020-01-24 17:42:03 +00:00
|
|
|
if err := c.serviceRegistration.NotifyActiveStateChange(true); err != nil {
|
2018-05-31 00:30:50 +00:00
|
|
|
if c.logger.IsWarn() {
|
|
|
|
c.logger.Warn("failed to notify active status", "error", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Core) cleanLeaderPrefix(ctx context.Context, uuid string, leaderLostCh <-chan struct{}) {
|
|
|
|
keys, err := c.barrier.List(ctx, coreLeaderPrefix)
|
|
|
|
if err != nil {
|
|
|
|
c.logger.Error("failed to list entries in core/leader", "error", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
for len(keys) > 0 {
|
2023-02-06 16:49:01 +00:00
|
|
|
timer := time.NewTimer(leaderPrefixCleanDelay)
|
2018-05-31 00:30:50 +00:00
|
|
|
select {
|
2023-02-06 16:49:01 +00:00
|
|
|
case <-timer.C:
|
2018-05-31 00:30:50 +00:00
|
|
|
if keys[0] != uuid {
|
|
|
|
c.barrier.Delete(ctx, coreLeaderPrefix+keys[0])
|
|
|
|
}
|
|
|
|
keys = keys[1:]
|
|
|
|
case <-leaderLostCh:
|
2023-02-06 16:49:01 +00:00
|
|
|
timer.Stop()
|
2018-05-31 00:30:50 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// clearLeader is used to clear our leadership entry
|
|
|
|
func (c *Core) clearLeader(uuid string) error {
|
|
|
|
key := coreLeaderPrefix + uuid
|
2018-07-13 15:04:23 +00:00
|
|
|
err := c.barrier.Delete(context.Background(), key)
|
2018-05-31 00:30:50 +00:00
|
|
|
|
|
|
|
// Advertise ourselves as a standby
|
2019-12-06 14:46:39 +00:00
|
|
|
if c.serviceRegistration != nil {
|
2020-01-24 17:42:03 +00:00
|
|
|
if err := c.serviceRegistration.NotifyActiveStateChange(false); err != nil {
|
2018-05-31 00:30:50 +00:00
|
|
|
if c.logger.IsWarn() {
|
|
|
|
c.logger.Warn("failed to notify standby status", "error", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return err
|
|
|
|
}
|
2019-02-06 02:01:18 +00:00
|
|
|
|
|
|
|
func (c *Core) SetNeverBecomeActive(on bool) {
|
|
|
|
if on {
|
|
|
|
atomic.StoreUint32(c.neverBecomeActive, 1)
|
|
|
|
} else {
|
|
|
|
atomic.StoreUint32(c.neverBecomeActive, 0)
|
|
|
|
}
|
|
|
|
}
|