2015-04-14 18:00:51 +00:00
|
|
|
package physical
|
|
|
|
|
2015-04-15 20:48:49 +00:00
|
|
|
import (
|
2017-03-06 18:11:14 +00:00
|
|
|
"crypto/md5"
|
2017-02-17 14:15:35 +00:00
|
|
|
"encoding/hex"
|
|
|
|
"fmt"
|
2015-04-15 20:48:49 +00:00
|
|
|
"strings"
|
2017-02-17 14:15:35 +00:00
|
|
|
"sync"
|
2015-04-15 20:48:49 +00:00
|
|
|
|
|
|
|
"github.com/hashicorp/golang-lru"
|
2017-02-17 14:15:35 +00:00
|
|
|
"github.com/hashicorp/vault/helper/locksutil"
|
|
|
|
"github.com/hashicorp/vault/helper/strutil"
|
2016-08-26 14:27:06 +00:00
|
|
|
log "github.com/mgutz/logxi/v1"
|
2015-04-15 20:48:49 +00:00
|
|
|
)
|
2015-04-14 18:00:51 +00:00
|
|
|
|
|
|
|
const (
|
2015-04-14 18:03:18 +00:00
|
|
|
// DefaultCacheSize is used if no cache size is specified for NewCache
|
2016-11-01 14:24:35 +00:00
|
|
|
DefaultCacheSize = 32 * 1024
|
2015-04-14 18:00:51 +00:00
|
|
|
)
|
|
|
|
|
2015-04-14 18:03:18 +00:00
|
|
|
// Cache is used to wrap an underlying physical backend
|
2015-04-14 18:00:51 +00:00
|
|
|
// and provide an LRU cache layer on top. Most of the reads done by
|
|
|
|
// Vault are for policy objects so there is a large read reduction
|
|
|
|
// by using a simple write-through cache.
|
2015-04-14 18:03:18 +00:00
|
|
|
type Cache struct {
|
2017-02-17 14:15:35 +00:00
|
|
|
backend Backend
|
|
|
|
transactional Transactional
|
|
|
|
lru *lru.TwoQueueCache
|
|
|
|
locks map[string]*sync.RWMutex
|
|
|
|
logger log.Logger
|
2015-04-14 18:00:51 +00:00
|
|
|
}
|
|
|
|
|
2015-04-14 18:03:18 +00:00
|
|
|
// NewCache returns a physical cache of the given size.
|
2015-04-14 18:00:51 +00:00
|
|
|
// If no size is provided, the default size is used.
|
2016-08-26 14:27:06 +00:00
|
|
|
func NewCache(b Backend, size int, logger log.Logger) *Cache {
|
2015-04-14 18:00:51 +00:00
|
|
|
if size <= 0 {
|
|
|
|
size = DefaultCacheSize
|
|
|
|
}
|
2016-08-26 14:27:06 +00:00
|
|
|
if logger.IsTrace() {
|
|
|
|
logger.Trace("physical/cache: creating LRU cache", "size", size)
|
|
|
|
}
|
2016-01-07 14:21:33 +00:00
|
|
|
cache, _ := lru.New2Q(size)
|
2015-04-14 18:03:18 +00:00
|
|
|
c := &Cache{
|
2015-04-14 18:00:51 +00:00
|
|
|
backend: b,
|
|
|
|
lru: cache,
|
2017-02-17 14:15:35 +00:00
|
|
|
locks: make(map[string]*sync.RWMutex, 256),
|
|
|
|
logger: logger,
|
2015-04-14 18:00:51 +00:00
|
|
|
}
|
2017-02-17 14:15:35 +00:00
|
|
|
if err := locksutil.CreateLocks(c.locks, 256); err != nil {
|
|
|
|
logger.Error("physical/cache: error creating locks", "error", err)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
if txnl, ok := c.backend.(Transactional); ok {
|
|
|
|
c.transactional = txnl
|
|
|
|
}
|
|
|
|
|
2015-04-14 18:00:51 +00:00
|
|
|
return c
|
|
|
|
}
|
|
|
|
|
2017-02-17 14:15:35 +00:00
|
|
|
func (c *Cache) lockHashForKey(key string) string {
|
2017-03-06 18:11:14 +00:00
|
|
|
hf := md5.New()
|
2017-02-17 14:15:35 +00:00
|
|
|
hf.Write([]byte(key))
|
|
|
|
return strings.ToLower(hex.EncodeToString(hf.Sum(nil))[:2])
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Cache) lockForKey(key string) *sync.RWMutex {
|
|
|
|
return c.locks[c.lockHashForKey(key)]
|
|
|
|
}
|
|
|
|
|
2015-04-14 18:00:51 +00:00
|
|
|
// Purge is used to clear the cache
|
2015-04-14 18:03:18 +00:00
|
|
|
func (c *Cache) Purge() {
|
2017-02-17 14:15:35 +00:00
|
|
|
// Lock the world
|
|
|
|
lockHashes := make([]string, 0, len(c.locks))
|
|
|
|
for hash := range c.locks {
|
|
|
|
lockHashes = append(lockHashes, hash)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Sort and deduplicate. This ensures we don't try to grab the same lock
|
|
|
|
// twice, and enforcing a sort means we'll not have multiple goroutines
|
|
|
|
// deadlock by acquiring in different orders.
|
|
|
|
lockHashes = strutil.RemoveDuplicates(lockHashes)
|
|
|
|
|
|
|
|
for _, lockHash := range lockHashes {
|
|
|
|
lock := c.locks[lockHash]
|
|
|
|
lock.Lock()
|
|
|
|
defer lock.Unlock()
|
|
|
|
}
|
|
|
|
|
2015-04-14 18:00:51 +00:00
|
|
|
c.lru.Purge()
|
|
|
|
}
|
|
|
|
|
2015-04-14 18:03:18 +00:00
|
|
|
func (c *Cache) Put(entry *Entry) error {
|
2017-02-17 14:15:35 +00:00
|
|
|
lock := c.lockForKey(entry.Key)
|
|
|
|
lock.Lock()
|
|
|
|
defer lock.Unlock()
|
|
|
|
|
2015-04-14 18:00:51 +00:00
|
|
|
err := c.backend.Put(entry)
|
2017-03-03 21:04:31 +00:00
|
|
|
if err == nil && !strings.HasPrefix(entry.Key, "core/") {
|
2016-10-28 16:55:56 +00:00
|
|
|
c.lru.Add(entry.Key, entry)
|
|
|
|
}
|
2015-04-14 18:00:51 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2015-04-14 18:03:18 +00:00
|
|
|
func (c *Cache) Get(key string) (*Entry, error) {
|
2017-02-17 14:15:35 +00:00
|
|
|
lock := c.lockForKey(key)
|
|
|
|
lock.RLock()
|
|
|
|
defer lock.RUnlock()
|
|
|
|
|
2017-03-03 21:04:31 +00:00
|
|
|
// We do NOT cache negative results for keys in the 'core/' prefix
|
|
|
|
// otherwise we risk certain race conditions upstream. The primary issue is
|
|
|
|
// with the HA mode, we could potentially negatively cache the leader entry
|
|
|
|
// and cause leader discovery to fail.
|
|
|
|
if strings.HasPrefix(key, "core/") {
|
|
|
|
return c.backend.Get(key)
|
|
|
|
}
|
|
|
|
|
2015-04-14 18:00:51 +00:00
|
|
|
// Check the LRU first
|
|
|
|
if raw, ok := c.lru.Get(key); ok {
|
|
|
|
if raw == nil {
|
|
|
|
return nil, nil
|
|
|
|
} else {
|
|
|
|
return raw.(*Entry), nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Read from the underlying backend
|
|
|
|
ent, err := c.backend.Get(key)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2017-03-03 21:04:31 +00:00
|
|
|
// Cache the result
|
|
|
|
if ent != nil {
|
2015-04-15 20:48:49 +00:00
|
|
|
c.lru.Add(key, ent)
|
|
|
|
}
|
2017-03-03 21:04:31 +00:00
|
|
|
|
|
|
|
return ent, nil
|
2015-04-14 18:00:51 +00:00
|
|
|
}
|
|
|
|
|
2015-04-14 18:03:18 +00:00
|
|
|
func (c *Cache) Delete(key string) error {
|
2017-02-17 14:15:35 +00:00
|
|
|
lock := c.lockForKey(key)
|
|
|
|
lock.Lock()
|
|
|
|
defer lock.Unlock()
|
|
|
|
|
2015-04-14 18:00:51 +00:00
|
|
|
err := c.backend.Delete(key)
|
2017-03-03 21:04:31 +00:00
|
|
|
if err == nil && !strings.HasPrefix(key, "core/") {
|
2016-10-28 16:55:56 +00:00
|
|
|
c.lru.Remove(key)
|
|
|
|
}
|
2015-04-14 18:00:51 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2015-04-14 18:03:18 +00:00
|
|
|
func (c *Cache) List(prefix string) ([]string, error) {
|
2017-02-17 14:15:35 +00:00
|
|
|
// Always pass-through as this would be difficult to cache. For the same
|
|
|
|
// reason we don't lock as we can't reasonably know which locks to readlock
|
|
|
|
// ahead of time.
|
2015-04-14 18:00:51 +00:00
|
|
|
return c.backend.List(prefix)
|
|
|
|
}
|
2017-02-17 14:15:35 +00:00
|
|
|
|
|
|
|
func (c *Cache) Transaction(txns []TxnEntry) error {
|
|
|
|
if c.transactional == nil {
|
|
|
|
return fmt.Errorf("physical/cache: underlying backend does not support transactions")
|
|
|
|
}
|
|
|
|
|
|
|
|
var lockHashes []string
|
|
|
|
for _, txn := range txns {
|
|
|
|
lockHashes = append(lockHashes, c.lockHashForKey(txn.Entry.Key))
|
|
|
|
}
|
|
|
|
|
|
|
|
// Sort and deduplicate. This ensures we don't try to grab the same lock
|
|
|
|
// twice, and enforcing a sort means we'll not have multiple goroutines
|
|
|
|
// deadlock by acquiring in different orders.
|
|
|
|
lockHashes = strutil.RemoveDuplicates(lockHashes)
|
|
|
|
|
|
|
|
for _, lockHash := range lockHashes {
|
|
|
|
lock := c.locks[lockHash]
|
|
|
|
lock.Lock()
|
|
|
|
defer lock.Unlock()
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := c.transactional.Transaction(txns); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, txn := range txns {
|
|
|
|
switch txn.Operation {
|
|
|
|
case PutOperation:
|
|
|
|
c.lru.Add(txn.Entry.Key, txn.Entry)
|
|
|
|
case DeleteOperation:
|
|
|
|
c.lru.Remove(txn.Entry.Key)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|