* api/seal-status: fix deadlock when namespace is set on seal-status calls * changelog Co-authored-by: Jason O'Donnell <2160810+jasonodonnell@users.noreply.github.com>
This commit is contained in:
parent
0ca886beaf
commit
bc19a6d305
|
@ -0,0 +1,4 @@
|
||||||
|
```release-note:bug
|
||||||
|
api/seal-status: Fix deadlock on calls to sys/seal-status with a namespace configured
|
||||||
|
on the request.
|
||||||
|
```
|
|
@ -1492,7 +1492,7 @@ func (c *ServerCommand) Run(args []string) int {
|
||||||
// Vault cluster with multiple servers is configured with auto-unseal but is
|
// Vault cluster with multiple servers is configured with auto-unseal but is
|
||||||
// uninitialized. Once one server initializes the storage backend, this
|
// uninitialized. Once one server initializes the storage backend, this
|
||||||
// goroutine will pick up the unseal keys and unseal this instance.
|
// goroutine will pick up the unseal keys and unseal this instance.
|
||||||
if !core.IsInSealMigrationMode() {
|
if !core.IsInSealMigrationMode(true) {
|
||||||
go runUnseal(c, core, context.Background())
|
go runUnseal(c, core, context.Background())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -165,7 +165,7 @@ func handleSysSealStatus(core *vault.Core) http.Handler {
|
||||||
|
|
||||||
func handleSysSealStatusRaw(core *vault.Core, w http.ResponseWriter, r *http.Request) {
|
func handleSysSealStatusRaw(core *vault.Core, w http.ResponseWriter, r *http.Request) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
status, err := core.GetSealStatus(ctx)
|
status, err := core.GetSealStatus(ctx, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
respondError(w, http.StatusInternalServerError, err)
|
respondError(w, http.StatusInternalServerError, err)
|
||||||
return
|
return
|
||||||
|
|
|
@ -1466,10 +1466,14 @@ func (c *Core) Sealed() bool {
|
||||||
return atomic.LoadUint32(c.sealed) == 1
|
return atomic.LoadUint32(c.sealed) == 1
|
||||||
}
|
}
|
||||||
|
|
||||||
// SecretProgress returns the number of keys provided so far
|
// SecretProgress returns the number of keys provided so far. Lock
|
||||||
func (c *Core) SecretProgress() (int, string) {
|
// should only be false if the caller is already holding the read
|
||||||
c.stateLock.RLock()
|
// statelock (such as calls originating from switchedLockHandleRequest).
|
||||||
defer c.stateLock.RUnlock()
|
func (c *Core) SecretProgress(lock bool) (int, string) {
|
||||||
|
if lock {
|
||||||
|
c.stateLock.RLock()
|
||||||
|
defer c.stateLock.RUnlock()
|
||||||
|
}
|
||||||
switch c.unlockInfo {
|
switch c.unlockInfo {
|
||||||
case nil:
|
case nil:
|
||||||
return 0, ""
|
return 0, ""
|
||||||
|
@ -3073,22 +3077,30 @@ func (c *Core) unsealKeyToMasterKey(ctx context.Context, seal Seal, combinedKey
|
||||||
// configuration in storage is Shamir but the seal in HCL is not. In this
|
// configuration in storage is Shamir but the seal in HCL is not. In this
|
||||||
// mode we should not auto-unseal (even if the migration is done) and we will
|
// mode we should not auto-unseal (even if the migration is done) and we will
|
||||||
// accept unseal requests with and without the `migrate` option, though the migrate
|
// accept unseal requests with and without the `migrate` option, though the migrate
|
||||||
// option is required if we haven't yet performed the seal migration.
|
// option is required if we haven't yet performed the seal migration. Lock
|
||||||
func (c *Core) IsInSealMigrationMode() bool {
|
// should only be false if the caller is already holding the read
|
||||||
c.stateLock.RLock()
|
// statelock (such as calls originating from switchedLockHandleRequest).
|
||||||
defer c.stateLock.RUnlock()
|
func (c *Core) IsInSealMigrationMode(lock bool) bool {
|
||||||
|
if lock {
|
||||||
|
c.stateLock.RLock()
|
||||||
|
defer c.stateLock.RUnlock()
|
||||||
|
}
|
||||||
return c.migrationInfo != nil
|
return c.migrationInfo != nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsSealMigrated returns true if we're in seal migration mode but migration
|
// IsSealMigrated returns true if we're in seal migration mode but migration
|
||||||
// has already been performed (possibly by another node, or prior to this node's
|
// has already been performed (possibly by another node, or prior to this node's
|
||||||
// current invocation.)
|
// current invocation). Lock should only be false if the caller is already
|
||||||
func (c *Core) IsSealMigrated() bool {
|
// holding the read statelock (such as calls originating from switchedLockHandleRequest).
|
||||||
if !c.IsInSealMigrationMode() {
|
func (c *Core) IsSealMigrated(lock bool) bool {
|
||||||
|
if !c.IsInSealMigrationMode(lock) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
c.stateLock.RLock()
|
|
||||||
defer c.stateLock.RUnlock()
|
if lock {
|
||||||
|
c.stateLock.RLock()
|
||||||
|
defer c.stateLock.RUnlock()
|
||||||
|
}
|
||||||
done, _ := c.sealMigrated(context.Background())
|
done, _ := c.sealMigrated(context.Background())
|
||||||
return done
|
return done
|
||||||
}
|
}
|
||||||
|
|
|
@ -413,7 +413,7 @@ func TestCore_Unseal_MultiShare(t *testing.T) {
|
||||||
t.Fatalf("should be sealed")
|
t.Fatalf("should be sealed")
|
||||||
}
|
}
|
||||||
|
|
||||||
if prog, _ := c.SecretProgress(); prog != 0 {
|
if prog, _ := c.SecretProgress(true); prog != 0 {
|
||||||
t.Fatalf("bad progress: %d", prog)
|
t.Fatalf("bad progress: %d", prog)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -432,14 +432,14 @@ func TestCore_Unseal_MultiShare(t *testing.T) {
|
||||||
if !unseal {
|
if !unseal {
|
||||||
t.Fatalf("should be unsealed")
|
t.Fatalf("should be unsealed")
|
||||||
}
|
}
|
||||||
if prog, _ := c.SecretProgress(); prog != 0 {
|
if prog, _ := c.SecretProgress(true); prog != 0 {
|
||||||
t.Fatalf("bad progress: %d", prog)
|
t.Fatalf("bad progress: %d", prog)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if unseal {
|
if unseal {
|
||||||
t.Fatalf("should not be unsealed")
|
t.Fatalf("should not be unsealed")
|
||||||
}
|
}
|
||||||
if prog, _ := c.SecretProgress(); prog != i+1 {
|
if prog, _ := c.SecretProgress(true); prog != i+1 {
|
||||||
t.Fatalf("bad progress: %d", prog)
|
t.Fatalf("bad progress: %d", prog)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -592,7 +592,7 @@ func TestCore_Unseal_Single(t *testing.T) {
|
||||||
t.Fatalf("should be sealed")
|
t.Fatalf("should be sealed")
|
||||||
}
|
}
|
||||||
|
|
||||||
if prog, _ := c.SecretProgress(); prog != 0 {
|
if prog, _ := c.SecretProgress(true); prog != 0 {
|
||||||
t.Fatalf("bad progress: %d", prog)
|
t.Fatalf("bad progress: %d", prog)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -604,7 +604,7 @@ func TestCore_Unseal_Single(t *testing.T) {
|
||||||
if !unseal {
|
if !unseal {
|
||||||
t.Fatalf("should be unsealed")
|
t.Fatalf("should be unsealed")
|
||||||
}
|
}
|
||||||
if prog, _ := c.SecretProgress(); prog != 0 {
|
if prog, _ := c.SecretProgress(true); prog != 0 {
|
||||||
t.Fatalf("bad progress: %d", prog)
|
t.Fatalf("bad progress: %d", prog)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -33,7 +33,7 @@ func (c *NodeStatusReporter) GetNodeStatus(ctx context.Context) (retStatus nodes
|
||||||
|
|
||||||
var status nodestatus.NodeStatus
|
var status nodestatus.NodeStatus
|
||||||
|
|
||||||
sealStatus, err := c.NodeStatusGetter.GetSealStatus(ctx)
|
sealStatus, err := c.NodeStatusGetter.GetSealStatus(ctx, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return status, err
|
return status, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -16,7 +16,7 @@ import (
|
||||||
|
|
||||||
type WrappedCoreNodeStatus interface {
|
type WrappedCoreNodeStatus interface {
|
||||||
ActiveTime() time.Time
|
ActiveTime() time.Time
|
||||||
GetSealStatus(ctx context.Context) (*vault.SealStatusResponse, error)
|
GetSealStatus(ctx context.Context, lock bool) (*vault.SealStatusResponse, error)
|
||||||
IsRaftVoter() bool
|
IsRaftVoter() bool
|
||||||
ListenerAddresses() ([]string, error)
|
ListenerAddresses() ([]string, error)
|
||||||
LogLevel() string
|
LogLevel() string
|
||||||
|
|
|
@ -451,7 +451,7 @@ func (c *Core) UnsealWithStoredKeys(ctx context.Context) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Disallow auto-unsealing when migrating
|
// Disallow auto-unsealing when migrating
|
||||||
if c.IsInSealMigrationMode() && !c.IsSealMigrated() {
|
if c.IsInSealMigrationMode(true) && !c.IsSealMigrated(true) {
|
||||||
return NewNonFatalError(errors.New("cannot auto-unseal during seal migration"))
|
return NewNonFatalError(errors.New("cannot auto-unseal during seal migration"))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -4710,7 +4710,7 @@ type SealStatusResponse struct {
|
||||||
Warnings []string `json:"warnings,omitempty"`
|
Warnings []string `json:"warnings,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (core *Core) GetSealStatus(ctx context.Context) (*SealStatusResponse, error) {
|
func (core *Core) GetSealStatus(ctx context.Context, lock bool) (*SealStatusResponse, error) {
|
||||||
sealed := core.Sealed()
|
sealed := core.Sealed()
|
||||||
|
|
||||||
initialized, err := core.Initialized(ctx)
|
initialized, err := core.Initialized(ctx)
|
||||||
|
@ -4763,7 +4763,7 @@ func (core *Core) GetSealStatus(ctx context.Context) (*SealStatusResponse, error
|
||||||
clusterID = cluster.ID
|
clusterID = cluster.ID
|
||||||
}
|
}
|
||||||
|
|
||||||
progress, nonce := core.SecretProgress()
|
progress, nonce := core.SecretProgress(lock)
|
||||||
|
|
||||||
s := &SealStatusResponse{
|
s := &SealStatusResponse{
|
||||||
Type: sealConfig.Type,
|
Type: sealConfig.Type,
|
||||||
|
@ -4775,7 +4775,7 @@ func (core *Core) GetSealStatus(ctx context.Context) (*SealStatusResponse, error
|
||||||
Nonce: nonce,
|
Nonce: nonce,
|
||||||
Version: version.GetVersion().VersionNumber(),
|
Version: version.GetVersion().VersionNumber(),
|
||||||
BuildDate: version.BuildDate,
|
BuildDate: version.BuildDate,
|
||||||
Migration: core.IsInSealMigrationMode() && !core.IsSealMigrated(),
|
Migration: core.IsInSealMigrationMode(lock) && !core.IsSealMigrated(lock),
|
||||||
ClusterName: clusterName,
|
ClusterName: clusterName,
|
||||||
ClusterID: clusterID,
|
ClusterID: clusterID,
|
||||||
RecoverySeal: core.SealAccess().RecoveryKeySupported(),
|
RecoverySeal: core.SealAccess().RecoveryKeySupported(),
|
||||||
|
@ -4837,7 +4837,7 @@ func (core *Core) GetLeaderStatus() (*LeaderResponse, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *SystemBackend) handleSealStatus(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
|
func (b *SystemBackend) handleSealStatus(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
|
||||||
status, err := b.Core.GetSealStatus(ctx)
|
status, err := b.Core.GetSealStatus(ctx, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue