2016-10-26 23:52:31 +00:00
|
|
|
package keysutil
|
2016-04-26 15:39:19 +00:00
|
|
|
|
|
|
|
import (
|
2018-01-19 06:44:44 +00:00
|
|
|
"context"
|
2017-12-14 17:51:50 +00:00
|
|
|
"encoding/base64"
|
2016-05-03 03:46:39 +00:00
|
|
|
"errors"
|
2016-04-26 15:39:19 +00:00
|
|
|
"fmt"
|
|
|
|
"sync"
|
2018-06-12 16:24:12 +00:00
|
|
|
"sync/atomic"
|
2017-12-14 17:51:50 +00:00
|
|
|
"time"
|
2016-04-26 15:39:19 +00:00
|
|
|
|
2018-04-05 15:49:21 +00:00
|
|
|
"github.com/hashicorp/errwrap"
|
2016-07-06 16:25:40 +00:00
|
|
|
"github.com/hashicorp/vault/helper/jsonutil"
|
2018-06-12 16:24:12 +00:00
|
|
|
"github.com/hashicorp/vault/helper/locksutil"
|
2016-04-26 15:39:19 +00:00
|
|
|
"github.com/hashicorp/vault/logical"
|
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
2018-06-05 22:51:35 +00:00
|
|
|
shared = false
|
|
|
|
exclusive = true
|
|
|
|
currentConvergentVersion = 3
|
2016-04-26 15:39:19 +00:00
|
|
|
)
|
|
|
|
|
2016-05-03 03:46:39 +00:00
|
|
|
var (
|
|
|
|
errNeedExclusiveLock = errors.New("an exclusive lock is needed for this operation")
|
|
|
|
)
|
|
|
|
|
2016-10-26 23:52:31 +00:00
|
|
|
// PolicyRequest holds values used when requesting a policy. Most values are
|
2016-09-21 14:29:42 +00:00
|
|
|
// only used during an upsert.
|
2016-10-26 23:52:31 +00:00
|
|
|
type PolicyRequest struct {
|
2016-09-21 14:29:42 +00:00
|
|
|
// The storage to use
|
2016-10-26 23:52:31 +00:00
|
|
|
Storage logical.Storage
|
2016-09-21 14:29:42 +00:00
|
|
|
|
|
|
|
// The name of the policy
|
2016-10-26 23:52:31 +00:00
|
|
|
Name string
|
2016-09-21 14:29:42 +00:00
|
|
|
|
|
|
|
// The key type
|
2016-10-26 23:52:31 +00:00
|
|
|
KeyType KeyType
|
2016-09-21 14:29:42 +00:00
|
|
|
|
|
|
|
// Whether it should be derived
|
2016-10-26 23:52:31 +00:00
|
|
|
Derived bool
|
2016-09-21 14:29:42 +00:00
|
|
|
|
|
|
|
// Whether to enable convergent encryption
|
2016-10-26 23:52:31 +00:00
|
|
|
Convergent bool
|
2016-09-21 14:29:42 +00:00
|
|
|
|
2017-01-23 16:04:43 +00:00
|
|
|
// Whether to allow export
|
|
|
|
Exportable bool
|
|
|
|
|
2016-09-21 14:29:42 +00:00
|
|
|
// Whether to upsert
|
2016-10-26 23:52:31 +00:00
|
|
|
Upsert bool
|
2017-12-15 14:08:28 +00:00
|
|
|
|
|
|
|
// Whether to allow plaintext backup
|
|
|
|
AllowPlaintextBackup bool
|
2016-09-21 14:29:42 +00:00
|
|
|
}
|
|
|
|
|
2016-10-26 23:52:31 +00:00
|
|
|
type LockManager struct {
|
2018-06-12 16:24:12 +00:00
|
|
|
useCache bool
|
2016-04-26 15:39:19 +00:00
|
|
|
// If caching is enabled, the map of name to in-memory policy cache
|
2018-06-12 16:24:12 +00:00
|
|
|
cache sync.Map
|
2016-04-26 15:39:19 +00:00
|
|
|
|
2018-06-12 16:24:12 +00:00
|
|
|
keyLocks []*locksutil.LockEntry
|
2016-04-26 15:39:19 +00:00
|
|
|
}
|
|
|
|
|
2016-10-26 23:52:31 +00:00
|
|
|
func NewLockManager(cacheDisabled bool) *LockManager {
|
|
|
|
lm := &LockManager{
|
2018-06-12 16:24:12 +00:00
|
|
|
useCache: !cacheDisabled,
|
|
|
|
keyLocks: locksutil.CreateLocks(),
|
2016-04-26 15:39:19 +00:00
|
|
|
}
|
|
|
|
return lm
|
|
|
|
}
|
|
|
|
|
2016-10-26 23:52:31 +00:00
|
|
|
func (lm *LockManager) CacheActive() bool {
|
2018-06-12 16:24:12 +00:00
|
|
|
return lm.useCache
|
2016-04-26 15:39:19 +00:00
|
|
|
}
|
|
|
|
|
2017-02-16 21:29:30 +00:00
|
|
|
func (lm *LockManager) InvalidatePolicy(name string) {
|
2018-06-12 16:24:12 +00:00
|
|
|
lm.cache.Delete(name)
|
2016-04-26 15:39:19 +00:00
|
|
|
}
|
|
|
|
|
2017-12-14 17:51:50 +00:00
|
|
|
// RestorePolicy acquires an exclusive lock on the policy name and restores the
|
|
|
|
// given policy along with the archive.
|
2018-09-25 20:20:59 +00:00
|
|
|
func (lm *LockManager) RestorePolicy(ctx context.Context, storage logical.Storage, name, backup string, force bool) error {
|
2017-12-14 17:51:50 +00:00
|
|
|
backupBytes, err := base64.StdEncoding.DecodeString(backup)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
var keyData KeyData
|
|
|
|
err = jsonutil.DecodeJSON(backupBytes, &keyData)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set a different name if desired
|
|
|
|
if name != "" {
|
|
|
|
keyData.Policy.Name = name
|
|
|
|
}
|
|
|
|
|
|
|
|
name = keyData.Policy.Name
|
|
|
|
|
2018-06-12 16:24:12 +00:00
|
|
|
// Grab the exclusive lock as we'll be modifying disk
|
|
|
|
lock := locksutil.LockForKey(lm.keyLocks, name)
|
|
|
|
lock.Lock()
|
|
|
|
defer lock.Unlock()
|
2017-12-14 17:51:50 +00:00
|
|
|
|
2018-09-25 20:20:59 +00:00
|
|
|
// If the policy is in cache and 'force' is not specified, error out. Anywhere
|
|
|
|
// that would put it in the cache will also be protected by the mutex above,
|
|
|
|
// so we don't need to re-check the cache later.
|
|
|
|
pRaw, ok := lm.cache.Load(name)
|
|
|
|
if ok && !force {
|
|
|
|
return fmt.Errorf("key %q already exists", name)
|
2017-12-14 17:51:50 +00:00
|
|
|
}
|
|
|
|
|
2018-09-25 20:20:59 +00:00
|
|
|
// Conditionally look up the policy from storage, depending on the use of
|
|
|
|
// 'force' and if the policy was found in cache.
|
|
|
|
//
|
|
|
|
// - If was not found in cache and we are not using 'force', look for it in
|
|
|
|
// storage. If found, error out.
|
|
|
|
//
|
|
|
|
// - If it was found in cache and we are using 'force', pRaw will not be nil
|
|
|
|
// and we do not look the policy up from storage
|
|
|
|
//
|
|
|
|
// - If it was found in cache and we are not using 'force', we should have
|
|
|
|
// returned above wih error
|
|
|
|
var p *Policy
|
|
|
|
if pRaw == nil {
|
|
|
|
p, err = lm.getPolicyFromStorage(ctx, storage, name)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if p != nil && !force {
|
|
|
|
return fmt.Errorf("key %q already exists", name)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If both pRaw and p above are nil and 'force' is specified, we don't need to
|
|
|
|
// grab policy locks as we have ensured it doesn't already exist, so there
|
|
|
|
// will be no races as nothing else has this pointer. If 'force' was not used,
|
|
|
|
// an error would have been returned by now if the policy already existed
|
|
|
|
if pRaw != nil {
|
|
|
|
p = pRaw.(*Policy)
|
2017-12-14 17:51:50 +00:00
|
|
|
}
|
|
|
|
if p != nil {
|
2018-09-25 20:20:59 +00:00
|
|
|
p.l.Lock()
|
|
|
|
defer p.l.Unlock()
|
2017-12-14 17:51:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Restore the archived keys
|
|
|
|
if keyData.ArchivedKeys != nil {
|
2018-01-19 06:44:44 +00:00
|
|
|
err = keyData.Policy.storeArchive(ctx, storage, keyData.ArchivedKeys)
|
2017-12-14 17:51:50 +00:00
|
|
|
if err != nil {
|
2018-06-12 16:24:12 +00:00
|
|
|
return errwrap.Wrapf(fmt.Sprintf("failed to restore archived keys for key %q: {{err}}", name), err)
|
2017-12-14 17:51:50 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Mark that policy as a restored key
|
|
|
|
keyData.Policy.RestoreInfo = &RestoreInfo{
|
|
|
|
Time: time.Now(),
|
|
|
|
Version: keyData.Policy.LatestVersion,
|
|
|
|
}
|
|
|
|
|
|
|
|
// Restore the policy. This will also attempt to adjust the archive.
|
2018-01-19 06:44:44 +00:00
|
|
|
err = keyData.Policy.Persist(ctx, storage)
|
2017-12-14 17:51:50 +00:00
|
|
|
if err != nil {
|
2018-04-05 15:49:21 +00:00
|
|
|
return errwrap.Wrapf(fmt.Sprintf("failed to restore the policy %q: {{err}}", name), err)
|
2017-12-14 17:51:50 +00:00
|
|
|
}
|
|
|
|
|
2018-06-12 16:24:12 +00:00
|
|
|
keyData.Policy.l = new(sync.RWMutex)
|
|
|
|
|
2017-12-14 17:51:50 +00:00
|
|
|
// Update the cache to contain the restored policy
|
2018-06-12 16:24:12 +00:00
|
|
|
lm.cache.Store(name, keyData.Policy)
|
2017-12-14 17:51:50 +00:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-01-19 06:44:44 +00:00
|
|
|
func (lm *LockManager) BackupPolicy(ctx context.Context, storage logical.Storage, name string) (string, error) {
|
2018-06-12 16:24:12 +00:00
|
|
|
var p *Policy
|
|
|
|
var err error
|
|
|
|
|
|
|
|
// Backup writes information about when the bacup took place, so we get an
|
|
|
|
// exclusive lock here
|
|
|
|
lock := locksutil.LockForKey(lm.keyLocks, name)
|
|
|
|
lock.Lock()
|
|
|
|
defer lock.Unlock()
|
|
|
|
|
|
|
|
pRaw, ok := lm.cache.Load(name)
|
|
|
|
if ok {
|
|
|
|
p = pRaw.(*Policy)
|
|
|
|
p.l.Lock()
|
|
|
|
defer p.l.Unlock()
|
|
|
|
} else {
|
|
|
|
// If the policy doesn't exit in storage, error out
|
|
|
|
p, err = lm.getPolicyFromStorage(ctx, storage, name)
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
if p == nil {
|
|
|
|
return "", fmt.Errorf(fmt.Sprintf("key %q not found", name))
|
|
|
|
}
|
2017-12-14 17:51:50 +00:00
|
|
|
}
|
2018-06-12 16:24:12 +00:00
|
|
|
|
|
|
|
if atomic.LoadUint32(&p.deleted) == 1 {
|
|
|
|
return "", fmt.Errorf(fmt.Sprintf("key %q not found", name))
|
2017-12-14 17:51:50 +00:00
|
|
|
}
|
|
|
|
|
2018-01-19 06:44:44 +00:00
|
|
|
backup, err := p.Backup(ctx, storage)
|
2017-12-14 17:51:50 +00:00
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
|
|
|
return backup, nil
|
|
|
|
}
|
|
|
|
|
2018-06-12 16:24:12 +00:00
|
|
|
// When the function returns, if caching was disabled, the Policy's lock must
|
|
|
|
// be unlocked when the caller is done (and it should not be re-locked).
|
|
|
|
func (lm *LockManager) GetPolicy(ctx context.Context, req PolicyRequest) (retP *Policy, retUpserted bool, retErr error) {
|
2016-10-26 23:52:31 +00:00
|
|
|
var p *Policy
|
2016-05-03 03:46:39 +00:00
|
|
|
var err error
|
2016-04-26 15:39:19 +00:00
|
|
|
|
2016-05-03 03:46:39 +00:00
|
|
|
// Check if it's in our cache. If so, return right away.
|
2018-06-12 16:24:12 +00:00
|
|
|
pRaw, ok := lm.cache.Load(req.Name)
|
|
|
|
if ok {
|
|
|
|
p = pRaw.(*Policy)
|
|
|
|
if atomic.LoadUint32(&p.deleted) == 1 {
|
|
|
|
return nil, false, nil
|
|
|
|
}
|
|
|
|
return p, false, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// We're not using the cache, or it wasn't found; get an exclusive lock.
|
|
|
|
// This ensures that any other process writing the actual storage will be
|
|
|
|
// finished before we load from storage.
|
|
|
|
lock := locksutil.LockForKey(lm.keyLocks, req.Name)
|
|
|
|
lock.Lock()
|
|
|
|
|
|
|
|
// If we are using the cache, defer the lock unlock; otherwise we will
|
|
|
|
// return from here with the lock still held.
|
|
|
|
cleanup := func() {
|
|
|
|
switch {
|
|
|
|
// If using the cache we always unlock, the caller locks the policy
|
|
|
|
// themselves
|
|
|
|
case lm.useCache:
|
|
|
|
lock.Unlock()
|
|
|
|
// If not using the cache, if we aren't returning a policy the caller
|
|
|
|
// doesn't have a lock, so we must unlock
|
|
|
|
case retP == nil:
|
|
|
|
lock.Unlock()
|
2016-04-26 15:39:19 +00:00
|
|
|
}
|
2018-06-12 16:24:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Check the cache again
|
|
|
|
pRaw, ok = lm.cache.Load(req.Name)
|
|
|
|
if ok {
|
|
|
|
p = pRaw.(*Policy)
|
|
|
|
if atomic.LoadUint32(&p.deleted) == 1 {
|
|
|
|
cleanup()
|
|
|
|
return nil, false, nil
|
|
|
|
}
|
|
|
|
retP = p
|
|
|
|
cleanup()
|
|
|
|
return
|
2016-04-26 15:39:19 +00:00
|
|
|
}
|
|
|
|
|
2016-05-03 03:46:39 +00:00
|
|
|
// Load it from storage
|
2018-06-12 16:24:12 +00:00
|
|
|
p, err = lm.getPolicyFromStorage(ctx, req.Storage, req.Name)
|
2016-04-26 15:39:19 +00:00
|
|
|
if err != nil {
|
2018-06-12 16:24:12 +00:00
|
|
|
cleanup()
|
|
|
|
return nil, false, err
|
2016-04-26 15:39:19 +00:00
|
|
|
}
|
2018-06-12 16:24:12 +00:00
|
|
|
// We don't need to lock the policy as there would be no other holders of
|
|
|
|
// the pointer
|
2016-04-26 15:39:19 +00:00
|
|
|
|
|
|
|
if p == nil {
|
2016-05-03 03:46:39 +00:00
|
|
|
// This is the only place we upsert a new policy, so if upsert is not
|
2016-08-30 20:29:09 +00:00
|
|
|
// specified, or the lock type is wrong, unlock before returning
|
2016-10-26 23:52:31 +00:00
|
|
|
if !req.Upsert {
|
2018-06-12 16:24:12 +00:00
|
|
|
cleanup()
|
|
|
|
return nil, false, nil
|
2016-04-26 15:39:19 +00:00
|
|
|
}
|
|
|
|
|
2018-06-12 16:24:12 +00:00
|
|
|
// We create the policy here, then at the end we do a LoadOrStore. If
|
|
|
|
// it's been loaded since we last checked the cache, we return an error
|
|
|
|
// to the user to let them know that their request can't be satisfied
|
|
|
|
// because we don't know if the parameters match.
|
2016-04-26 15:39:19 +00:00
|
|
|
|
2016-10-26 23:52:31 +00:00
|
|
|
switch req.KeyType {
|
2018-02-14 16:59:46 +00:00
|
|
|
case KeyType_AES256_GCM96, KeyType_ChaCha20_Poly1305:
|
2016-10-26 23:52:31 +00:00
|
|
|
if req.Convergent && !req.Derived {
|
2018-06-12 16:24:12 +00:00
|
|
|
cleanup()
|
|
|
|
return nil, false, fmt.Errorf("convergent encryption requires derivation to be enabled")
|
2016-09-21 14:29:42 +00:00
|
|
|
}
|
|
|
|
|
2016-10-26 23:52:31 +00:00
|
|
|
case KeyType_ECDSA_P256:
|
|
|
|
if req.Derived || req.Convergent {
|
2018-06-12 16:24:12 +00:00
|
|
|
cleanup()
|
|
|
|
return nil, false, fmt.Errorf("key derivation and convergent encryption not supported for keys of type %v", req.KeyType)
|
2016-09-21 14:29:42 +00:00
|
|
|
}
|
|
|
|
|
2017-06-05 19:00:39 +00:00
|
|
|
case KeyType_ED25519:
|
|
|
|
if req.Convergent {
|
2018-06-12 16:24:12 +00:00
|
|
|
cleanup()
|
|
|
|
return nil, false, fmt.Errorf("convergent encryption not supported for keys of type %v", req.KeyType)
|
2017-11-03 14:45:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
case KeyType_RSA2048, KeyType_RSA4096:
|
|
|
|
if req.Derived || req.Convergent {
|
2018-06-12 16:24:12 +00:00
|
|
|
cleanup()
|
|
|
|
return nil, false, fmt.Errorf("key derivation and convergent encryption not supported for keys of type %v", req.KeyType)
|
2017-06-05 19:00:39 +00:00
|
|
|
}
|
|
|
|
|
2016-09-21 14:29:42 +00:00
|
|
|
default:
|
2018-06-12 16:24:12 +00:00
|
|
|
cleanup()
|
|
|
|
return nil, false, fmt.Errorf("unsupported key type %v", req.KeyType)
|
2016-06-20 17:17:48 +00:00
|
|
|
}
|
|
|
|
|
2016-10-26 23:52:31 +00:00
|
|
|
p = &Policy{
|
2018-06-12 16:24:12 +00:00
|
|
|
l: new(sync.RWMutex),
|
2017-12-15 14:08:28 +00:00
|
|
|
Name: req.Name,
|
|
|
|
Type: req.KeyType,
|
|
|
|
Derived: req.Derived,
|
|
|
|
Exportable: req.Exportable,
|
|
|
|
AllowPlaintextBackup: req.AllowPlaintextBackup,
|
2016-04-26 15:39:19 +00:00
|
|
|
}
|
2018-06-12 16:24:12 +00:00
|
|
|
|
2016-10-26 23:52:31 +00:00
|
|
|
if req.Derived {
|
|
|
|
p.KDF = Kdf_hkdf_sha256
|
2018-06-05 22:51:35 +00:00
|
|
|
if req.Convergent {
|
|
|
|
p.ConvergentEncryption = true
|
|
|
|
// As of version 3 we store the version within each key, so we
|
|
|
|
// set to -1 to indicate that the value in the policy has no
|
|
|
|
// meaning. We still, for backwards compatibility, fall back to
|
|
|
|
// this value if the key doesn't have one, which means it will
|
|
|
|
// only be -1 in the case where every key version is >= 3
|
|
|
|
p.ConvergentVersion = -1
|
|
|
|
}
|
2016-04-26 15:39:19 +00:00
|
|
|
}
|
|
|
|
|
2018-06-12 16:24:12 +00:00
|
|
|
// Performs the actual persist and does setup
|
2018-01-19 06:44:44 +00:00
|
|
|
err = p.Rotate(ctx, req.Storage)
|
2016-04-26 15:39:19 +00:00
|
|
|
if err != nil {
|
2018-06-12 16:24:12 +00:00
|
|
|
cleanup()
|
|
|
|
return nil, false, err
|
2016-05-03 03:46:39 +00:00
|
|
|
}
|
|
|
|
|
2018-06-12 16:24:12 +00:00
|
|
|
if lm.useCache {
|
|
|
|
lm.cache.Store(req.Name, p)
|
|
|
|
} else {
|
|
|
|
p.l = &lock.RWMutex
|
|
|
|
p.writeLocked = true
|
2016-04-26 15:39:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// We don't need to worry about upgrading since it will be a new policy
|
2018-06-12 16:24:12 +00:00
|
|
|
retP = p
|
|
|
|
retUpserted = true
|
|
|
|
cleanup()
|
|
|
|
return
|
2016-04-26 15:39:19 +00:00
|
|
|
}
|
|
|
|
|
2016-10-26 23:52:31 +00:00
|
|
|
if p.NeedsUpgrade() {
|
2018-06-12 16:24:12 +00:00
|
|
|
if err := p.Upgrade(ctx, req.Storage); err != nil {
|
|
|
|
cleanup()
|
|
|
|
return nil, false, err
|
2016-04-26 15:39:19 +00:00
|
|
|
}
|
2016-05-03 03:46:39 +00:00
|
|
|
}
|
2016-04-26 15:39:19 +00:00
|
|
|
|
2018-06-12 16:24:12 +00:00
|
|
|
if lm.useCache {
|
|
|
|
lm.cache.Store(req.Name, p)
|
|
|
|
} else {
|
|
|
|
p.l = &lock.RWMutex
|
|
|
|
p.writeLocked = true
|
2016-04-26 15:39:19 +00:00
|
|
|
}
|
|
|
|
|
2018-06-12 16:24:12 +00:00
|
|
|
retP = p
|
|
|
|
cleanup()
|
|
|
|
return
|
2016-04-26 15:39:19 +00:00
|
|
|
}
|
|
|
|
|
2018-01-19 06:44:44 +00:00
|
|
|
func (lm *LockManager) DeletePolicy(ctx context.Context, storage logical.Storage, name string) error {
|
2016-10-26 23:52:31 +00:00
|
|
|
var p *Policy
|
2016-04-26 15:39:19 +00:00
|
|
|
var err error
|
|
|
|
|
2018-06-12 16:24:12 +00:00
|
|
|
// We may be writing to disk, so grab an exclusive lock. This prevents bad
|
|
|
|
// behavior when the cache is turned off. We also lock the shared policy
|
|
|
|
// object to make sure no requests are in flight.
|
|
|
|
lock := locksutil.LockForKey(lm.keyLocks, name)
|
|
|
|
lock.Lock()
|
|
|
|
defer lock.Unlock()
|
|
|
|
|
|
|
|
pRaw, ok := lm.cache.Load(name)
|
|
|
|
if ok {
|
|
|
|
p = pRaw.(*Policy)
|
|
|
|
p.l.Lock()
|
|
|
|
defer p.l.Unlock()
|
2016-05-03 03:46:39 +00:00
|
|
|
}
|
2018-06-12 16:24:12 +00:00
|
|
|
|
2016-05-03 03:46:39 +00:00
|
|
|
if p == nil {
|
2018-06-12 16:24:12 +00:00
|
|
|
p, err = lm.getPolicyFromStorage(ctx, storage, name)
|
2016-04-26 15:39:19 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if p == nil {
|
2018-06-12 16:24:12 +00:00
|
|
|
return fmt.Errorf("could not delete key; not found")
|
2016-04-26 15:39:19 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if !p.DeletionAllowed {
|
2018-06-12 16:24:12 +00:00
|
|
|
return fmt.Errorf("deletion is not allowed for this key")
|
2016-04-26 15:39:19 +00:00
|
|
|
}
|
|
|
|
|
2018-06-12 16:24:12 +00:00
|
|
|
atomic.StoreUint32(&p.deleted, 1)
|
|
|
|
|
|
|
|
lm.cache.Delete(name)
|
|
|
|
|
2018-01-19 06:44:44 +00:00
|
|
|
err = storage.Delete(ctx, "policy/"+name)
|
2016-04-26 15:39:19 +00:00
|
|
|
if err != nil {
|
2018-06-12 16:24:12 +00:00
|
|
|
return errwrap.Wrapf(fmt.Sprintf("error deleting key %q: {{err}}", name), err)
|
2016-04-26 15:39:19 +00:00
|
|
|
}
|
|
|
|
|
2018-01-19 06:44:44 +00:00
|
|
|
err = storage.Delete(ctx, "archive/"+name)
|
2016-04-26 15:39:19 +00:00
|
|
|
if err != nil {
|
2018-06-12 16:24:12 +00:00
|
|
|
return errwrap.Wrapf(fmt.Sprintf("error deleting key %q archive: {{err}}", name), err)
|
2016-04-26 15:39:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-06-12 16:24:12 +00:00
|
|
|
func (lm *LockManager) getPolicyFromStorage(ctx context.Context, storage logical.Storage, name string) (*Policy, error) {
|
2018-03-09 19:01:24 +00:00
|
|
|
return LoadPolicy(ctx, storage, "policy/"+name)
|
2016-04-26 15:39:19 +00:00
|
|
|
}
|