api/seal-status: fix deadlock when namespace is set on seal-status calls (#23861) (#23879)

* api/seal-status: fix deadlock when namespace is set on seal-status calls

* changelog

Co-authored-by: Jason O'Donnell <2160810+jasonodonnell@users.noreply.github.com>
This commit is contained in:
hc-github-team-secure-vault-core 2023-10-27 10:47:12 -04:00 committed by GitHub
parent 0ca886beaf
commit bc19a6d305
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 43 additions and 27 deletions

4
changelog/23861.txt Normal file
View File

@ -0,0 +1,4 @@
```release-note:bug
api/seal-status: Fix deadlock on calls to sys/seal-status with a namespace configured
on the request.
```

View File

@ -1492,7 +1492,7 @@ func (c *ServerCommand) Run(args []string) int {
// Vault cluster with multiple servers is configured with auto-unseal but is
// uninitialized. Once one server initializes the storage backend, this
// goroutine will pick up the unseal keys and unseal this instance.
if !core.IsInSealMigrationMode() {
if !core.IsInSealMigrationMode(true) {
go runUnseal(c, core, context.Background())
}

View File

@ -165,7 +165,7 @@ func handleSysSealStatus(core *vault.Core) http.Handler {
func handleSysSealStatusRaw(core *vault.Core, w http.ResponseWriter, r *http.Request) {
ctx := context.Background()
status, err := core.GetSealStatus(ctx)
status, err := core.GetSealStatus(ctx, true)
if err != nil {
respondError(w, http.StatusInternalServerError, err)
return

View File

@ -1466,10 +1466,14 @@ func (c *Core) Sealed() bool {
return atomic.LoadUint32(c.sealed) == 1
}
// SecretProgress returns the number of keys provided so far
func (c *Core) SecretProgress() (int, string) {
c.stateLock.RLock()
defer c.stateLock.RUnlock()
// SecretProgress returns the number of keys provided so far. Lock
// should only be false if the caller is already holding the read
// statelock (such as calls originating from switchedLockHandleRequest).
func (c *Core) SecretProgress(lock bool) (int, string) {
if lock {
c.stateLock.RLock()
defer c.stateLock.RUnlock()
}
switch c.unlockInfo {
case nil:
return 0, ""
@ -3073,22 +3077,30 @@ func (c *Core) unsealKeyToMasterKey(ctx context.Context, seal Seal, combinedKey
// configuration in storage is Shamir but the seal in HCL is not. In this
// mode we should not auto-unseal (even if the migration is done) and we will
// accept unseal requests with and without the `migrate` option, though the migrate
// option is required if we haven't yet performed the seal migration.
func (c *Core) IsInSealMigrationMode() bool {
c.stateLock.RLock()
defer c.stateLock.RUnlock()
// option is required if we haven't yet performed the seal migration. Lock
// should only be false if the caller is already holding the read
// statelock (such as calls originating from switchedLockHandleRequest).
func (c *Core) IsInSealMigrationMode(lock bool) bool {
if lock {
c.stateLock.RLock()
defer c.stateLock.RUnlock()
}
return c.migrationInfo != nil
}
// IsSealMigrated returns true if we're in seal migration mode but migration
// has already been performed (possibly by another node, or prior to this node's
// current invocation.)
func (c *Core) IsSealMigrated() bool {
if !c.IsInSealMigrationMode() {
// current invocation). Lock should only be false if the caller is already
// holding the read statelock (such as calls originating from switchedLockHandleRequest).
func (c *Core) IsSealMigrated(lock bool) bool {
if !c.IsInSealMigrationMode(lock) {
return false
}
c.stateLock.RLock()
defer c.stateLock.RUnlock()
if lock {
c.stateLock.RLock()
defer c.stateLock.RUnlock()
}
done, _ := c.sealMigrated(context.Background())
return done
}

View File

@ -413,7 +413,7 @@ func TestCore_Unseal_MultiShare(t *testing.T) {
t.Fatalf("should be sealed")
}
if prog, _ := c.SecretProgress(); prog != 0 {
if prog, _ := c.SecretProgress(true); prog != 0 {
t.Fatalf("bad progress: %d", prog)
}
@ -432,14 +432,14 @@ func TestCore_Unseal_MultiShare(t *testing.T) {
if !unseal {
t.Fatalf("should be unsealed")
}
if prog, _ := c.SecretProgress(); prog != 0 {
if prog, _ := c.SecretProgress(true); prog != 0 {
t.Fatalf("bad progress: %d", prog)
}
} else {
if unseal {
t.Fatalf("should not be unsealed")
}
if prog, _ := c.SecretProgress(); prog != i+1 {
if prog, _ := c.SecretProgress(true); prog != i+1 {
t.Fatalf("bad progress: %d", prog)
}
}
@ -592,7 +592,7 @@ func TestCore_Unseal_Single(t *testing.T) {
t.Fatalf("should be sealed")
}
if prog, _ := c.SecretProgress(); prog != 0 {
if prog, _ := c.SecretProgress(true); prog != 0 {
t.Fatalf("bad progress: %d", prog)
}
@ -604,7 +604,7 @@ func TestCore_Unseal_Single(t *testing.T) {
if !unseal {
t.Fatalf("should be unsealed")
}
if prog, _ := c.SecretProgress(); prog != 0 {
if prog, _ := c.SecretProgress(true); prog != 0 {
t.Fatalf("bad progress: %d", prog)
}

View File

@ -33,7 +33,7 @@ func (c *NodeStatusReporter) GetNodeStatus(ctx context.Context) (retStatus nodes
var status nodestatus.NodeStatus
sealStatus, err := c.NodeStatusGetter.GetSealStatus(ctx)
sealStatus, err := c.NodeStatusGetter.GetSealStatus(ctx, true)
if err != nil {
return status, err
}

View File

@ -16,7 +16,7 @@ import (
type WrappedCoreNodeStatus interface {
ActiveTime() time.Time
GetSealStatus(ctx context.Context) (*vault.SealStatusResponse, error)
GetSealStatus(ctx context.Context, lock bool) (*vault.SealStatusResponse, error)
IsRaftVoter() bool
ListenerAddresses() ([]string, error)
LogLevel() string

View File

@ -451,7 +451,7 @@ func (c *Core) UnsealWithStoredKeys(ctx context.Context) error {
}
// Disallow auto-unsealing when migrating
if c.IsInSealMigrationMode() && !c.IsSealMigrated() {
if c.IsInSealMigrationMode(true) && !c.IsSealMigrated(true) {
return NewNonFatalError(errors.New("cannot auto-unseal during seal migration"))
}

View File

@ -4710,7 +4710,7 @@ type SealStatusResponse struct {
Warnings []string `json:"warnings,omitempty"`
}
func (core *Core) GetSealStatus(ctx context.Context) (*SealStatusResponse, error) {
func (core *Core) GetSealStatus(ctx context.Context, lock bool) (*SealStatusResponse, error) {
sealed := core.Sealed()
initialized, err := core.Initialized(ctx)
@ -4763,7 +4763,7 @@ func (core *Core) GetSealStatus(ctx context.Context) (*SealStatusResponse, error
clusterID = cluster.ID
}
progress, nonce := core.SecretProgress()
progress, nonce := core.SecretProgress(lock)
s := &SealStatusResponse{
Type: sealConfig.Type,
@ -4775,7 +4775,7 @@ func (core *Core) GetSealStatus(ctx context.Context) (*SealStatusResponse, error
Nonce: nonce,
Version: version.GetVersion().VersionNumber(),
BuildDate: version.BuildDate,
Migration: core.IsInSealMigrationMode() && !core.IsSealMigrated(),
Migration: core.IsInSealMigrationMode(lock) && !core.IsSealMigrated(lock),
ClusterName: clusterName,
ClusterID: clusterID,
RecoverySeal: core.SealAccess().RecoveryKeySupported(),
@ -4837,7 +4837,7 @@ func (core *Core) GetLeaderStatus() (*LeaderResponse, error) {
}
func (b *SystemBackend) handleSealStatus(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
status, err := b.Core.GetSealStatus(ctx)
status, err := b.Core.GetSealStatus(ctx, false)
if err != nil {
return nil, err
}