diff --git a/helper/keysutil/lock_manager.go b/helper/keysutil/lock_manager.go index df8ec3339..6640d75ef 100644 --- a/helper/keysutil/lock_manager.go +++ b/helper/keysutil/lock_manager.go @@ -74,7 +74,9 @@ func (lm *LockManager) CacheActive() bool { } func (lm *LockManager) InvalidatePolicy(name string) { - lm.cache.Delete(name) + if lm.useCache { + lm.cache.Delete(name) + } } // RestorePolicy acquires an exclusive lock on the policy name and restores the @@ -103,12 +105,17 @@ func (lm *LockManager) RestorePolicy(ctx context.Context, storage logical.Storag lock.Lock() defer lock.Unlock() + var ok bool + var pRaw interface{} + // If the policy is in cache and 'force' is not specified, error out. Anywhere // that would put it in the cache will also be protected by the mutex above, // so we don't need to re-check the cache later. - pRaw, ok := lm.cache.Load(name) - if ok && !force { - return fmt.Errorf("key %q already exists", name) + if lm.useCache { + pRaw, ok = lm.cache.Load(name) + if ok && !force { + return fmt.Errorf("key %q already exists", name) + } } // Conditionally look up the policy from storage, depending on the use of @@ -168,7 +175,9 @@ func (lm *LockManager) RestorePolicy(ctx context.Context, storage logical.Storag keyData.Policy.l = new(sync.RWMutex) // Update the cache to contain the restored policy - lm.cache.Store(name, keyData.Policy) + if lm.useCache { + lm.cache.Store(name, keyData.Policy) + } return nil } @@ -183,7 +192,12 @@ func (lm *LockManager) BackupPolicy(ctx context.Context, storage logical.Storage lock.Lock() defer lock.Unlock() - pRaw, ok := lm.cache.Load(name) + var ok bool + var pRaw interface{} + + if lm.useCache { + pRaw, ok = lm.cache.Load(name) + } if ok { p = pRaw.(*Policy) p.l.Lock() @@ -216,9 +230,13 @@ func (lm *LockManager) BackupPolicy(ctx context.Context, storage logical.Storage func (lm *LockManager) GetPolicy(ctx context.Context, req PolicyRequest) (retP *Policy, retUpserted bool, retErr error) { var p *Policy var err error + var ok bool + var pRaw interface{} // Check if it's in our cache. If so, return right away. - pRaw, ok := lm.cache.Load(req.Name) + if lm.useCache { + pRaw, ok = lm.cache.Load(req.Name) + } if ok { p = pRaw.(*Policy) if atomic.LoadUint32(&p.deleted) == 1 { @@ -241,15 +259,18 @@ func (lm *LockManager) GetPolicy(ctx context.Context, req PolicyRequest) (retP * // themselves case lm.useCache: lock.Unlock() - // If not using the cache, if we aren't returning a policy the caller - // doesn't have a lock, so we must unlock + + // If not using the cache, if we aren't returning a policy the caller + // doesn't have a lock, so we must unlock case retP == nil: lock.Unlock() } } // Check the cache again - pRaw, ok = lm.cache.Load(req.Name) + if lm.useCache { + pRaw, ok = lm.cache.Load(req.Name) + } if ok { p = pRaw.(*Policy) if atomic.LoadUint32(&p.deleted) == 1 { @@ -378,6 +399,8 @@ func (lm *LockManager) GetPolicy(ctx context.Context, req PolicyRequest) (retP * func (lm *LockManager) DeletePolicy(ctx context.Context, storage logical.Storage, name string) error { var p *Policy var err error + var ok bool + var pRaw interface{} // We may be writing to disk, so grab an exclusive lock. This prevents bad // behavior when the cache is turned off. We also lock the shared policy @@ -386,7 +409,9 @@ func (lm *LockManager) DeletePolicy(ctx context.Context, storage logical.Storage lock.Lock() defer lock.Unlock() - pRaw, ok := lm.cache.Load(name) + if lm.useCache { + pRaw, ok = lm.cache.Load(name) + } if ok { p = pRaw.(*Policy) p.l.Lock() @@ -409,7 +434,9 @@ func (lm *LockManager) DeletePolicy(ctx context.Context, storage logical.Storage atomic.StoreUint32(&p.deleted, 1) - lm.cache.Delete(name) + if lm.useCache { + lm.cache.Delete(name) + } err = storage.Delete(ctx, "policy/"+name) if err != nil {