open-vault/physical/cache.go

196 lines
4.6 KiB
Go
Raw Normal View History

package physical
import (
"strings"
iradix "github.com/hashicorp/go-immutable-radix"
"github.com/hashicorp/golang-lru"
2017-02-17 14:15:35 +00:00
"github.com/hashicorp/vault/helper/locksutil"
log "github.com/mgutz/logxi/v1"
)
const (
2015-04-14 18:03:18 +00:00
// DefaultCacheSize is used if no cache size is specified for NewCache
2016-11-01 14:24:35 +00:00
DefaultCacheSize = 32 * 1024
)
2015-04-14 18:03:18 +00:00
// Cache is used to wrap an underlying physical backend
// and provide an LRU cache layer on top. Most of the reads done by
// Vault are for policy objects so there is a large read reduction
// by using a simple write-through cache.
2015-04-14 18:03:18 +00:00
type Cache struct {
backend Backend
lru *lru.TwoQueueCache
locks []*locksutil.LockEntry
exceptions *iradix.Tree
logger log.Logger
}
// TransactionalCache is a Cache that wraps the physical that is transactional
type TransactionalCache struct {
*Cache
Transactional
}
2015-04-14 18:03:18 +00:00
// NewCache returns a physical cache of the given size.
// If no size is provided, the default size is used.
func NewCache(b Backend, size int, coreExceptions []string, logger log.Logger) *Cache {
if logger.IsTrace() {
logger.Trace("physical/cache: creating LRU cache", "size", size)
}
if size <= 0 {
size = DefaultCacheSize
}
cacheExceptions := iradix.New()
for _, key := range coreExceptions {
cacheValue := true
if strings.HasPrefix(key, "!") {
key = strings.TrimPrefix(key, "!")
cacheValue = false
}
cacheExceptions, _, _ = cacheExceptions.Insert([]byte(key), cacheValue)
}
cache, _ := lru.New2Q(size)
2015-04-14 18:03:18 +00:00
c := &Cache{
backend: b,
lru: cache,
locks: locksutil.CreateLocks(),
exceptions: cacheExceptions,
logger: logger,
}
return c
}
2017-02-17 14:15:35 +00:00
func NewTransactionalCache(b Backend, size int, coreExceptions []string, logger log.Logger) *TransactionalCache {
c := &TransactionalCache{
Cache: NewCache(b, size, coreExceptions, logger),
Transactional: b.(Transactional),
}
return c
}
// Purge is used to clear the cache
2015-04-14 18:03:18 +00:00
func (c *Cache) Purge() {
2017-02-17 14:15:35 +00:00
// Lock the world
for _, lock := range c.locks {
2017-02-17 14:15:35 +00:00
lock.Lock()
defer lock.Unlock()
}
c.lru.Purge()
}
2015-04-14 18:03:18 +00:00
func (c *Cache) Put(entry *Entry) error {
lock := locksutil.LockForKey(c.locks, entry.Key)
2017-02-17 14:15:35 +00:00
lock.Lock()
defer lock.Unlock()
err := c.backend.Put(entry)
if err == nil && c.shouldCache(entry.Key) {
c.lru.Add(entry.Key, entry)
}
return err
}
2015-04-14 18:03:18 +00:00
func (c *Cache) Get(key string) (*Entry, error) {
lock := locksutil.LockForKey(c.locks, key)
2017-02-17 14:15:35 +00:00
lock.RLock()
defer lock.RUnlock()
// We do NOT cache negative results for keys in the 'core/' prefix
// otherwise we risk certain race conditions upstream. The primary issue is
// with the HA mode, we could potentially negatively cache the leader entry
// and cause leader discovery to fail.
if !c.shouldCache(key) {
return c.backend.Get(key)
}
// Check the LRU first
if raw, ok := c.lru.Get(key); ok {
if raw == nil {
return nil, nil
}
return raw.(*Entry), nil
}
// Read from the underlying backend
ent, err := c.backend.Get(key)
if err != nil {
return nil, err
}
// Cache the result
if ent != nil {
c.lru.Add(key, ent)
}
return ent, nil
}
2015-04-14 18:03:18 +00:00
func (c *Cache) Delete(key string) error {
lock := locksutil.LockForKey(c.locks, key)
2017-02-17 14:15:35 +00:00
lock.Lock()
defer lock.Unlock()
err := c.backend.Delete(key)
if err == nil && c.shouldCache(key) {
c.lru.Remove(key)
}
return err
}
2015-04-14 18:03:18 +00:00
func (c *Cache) List(prefix string) ([]string, error) {
2017-02-17 14:15:35 +00:00
// Always pass-through as this would be difficult to cache. For the same
// reason we don't lock as we can't reasonably know which locks to readlock
// ahead of time.
return c.backend.List(prefix)
}
2017-02-17 14:15:35 +00:00
2017-10-23 20:42:56 +00:00
func (c *TransactionalCache) Transaction(txns []*TxnEntry) error {
// Collect keys that need to be locked
var keys []string
for _, curr := range txns {
keys = append(keys, curr.Entry.Key)
}
// Lock the keys
for _, l := range locksutil.LocksForKeys(c.locks, keys) {
l.Lock()
defer l.Unlock()
2017-02-17 14:15:35 +00:00
}
if err := c.Transactional.Transaction(txns); err != nil {
2017-02-17 14:15:35 +00:00
return err
}
for _, txn := range txns {
if c.shouldCache(txn.Entry.Key) {
switch txn.Operation {
case PutOperation:
c.lru.Add(txn.Entry.Key, txn.Entry)
case DeleteOperation:
c.lru.Remove(txn.Entry.Key)
}
2017-02-17 14:15:35 +00:00
}
}
return nil
}
// shouldCache checks for any cache exceptions
func (c *Cache) shouldCache(key string) bool {
// prefix match if nested under core/
if strings.HasPrefix(key, "core/") {
if prefix, val, found := c.exceptions.Root().LongestPrefix([]byte(key)); found {
strPrefix := string(prefix)
if strings.HasSuffix(strPrefix, "/") || strPrefix == key {
return val.(bool)
}
}
// default for core/ values is false
return false
}
// default is true
return true
}