2017-10-11 17:21:20 +00:00
|
|
|
package storagepacker
|
|
|
|
|
|
|
|
import (
|
2018-01-19 06:44:44 +00:00
|
|
|
"context"
|
2017-10-11 17:21:20 +00:00
|
|
|
"crypto/md5"
|
|
|
|
"fmt"
|
|
|
|
"strconv"
|
|
|
|
"strings"
|
|
|
|
|
|
|
|
"github.com/golang/protobuf/proto"
|
|
|
|
"github.com/hashicorp/errwrap"
|
2019-06-20 20:02:11 +00:00
|
|
|
"github.com/hashicorp/go-hclog"
|
2018-04-03 00:46:59 +00:00
|
|
|
log "github.com/hashicorp/go-hclog"
|
2019-04-12 21:54:35 +00:00
|
|
|
"github.com/hashicorp/vault/sdk/helper/compressutil"
|
|
|
|
"github.com/hashicorp/vault/sdk/helper/locksutil"
|
|
|
|
"github.com/hashicorp/vault/sdk/logical"
|
2017-10-11 17:21:20 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
2019-05-07 21:13:42 +00:00
|
|
|
bucketCount = 256
|
|
|
|
// StoragePackerBucketsPrefix is the default storage key prefix under which
|
|
|
|
// bucket data will be stored.
|
2017-10-11 17:21:20 +00:00
|
|
|
StoragePackerBucketsPrefix = "packer/buckets/"
|
|
|
|
)
|
|
|
|
|
2019-05-07 19:29:51 +00:00
|
|
|
// StoragePacker packs items into a specific number of buckets by hashing
|
|
|
|
// its identifier and indexing on it. Currently this supports only 256 bucket entries and
|
|
|
|
// hence relies on the first byte of the hash value for indexing.
|
2017-10-11 17:21:20 +00:00
|
|
|
type StoragePacker struct {
|
|
|
|
view logical.Storage
|
|
|
|
logger log.Logger
|
|
|
|
storageLocks []*locksutil.LockEntry
|
|
|
|
viewPrefix string
|
|
|
|
}
|
|
|
|
|
|
|
|
// View returns the storage view configured to be used by the packer
|
|
|
|
func (s *StoragePacker) View() logical.Storage {
|
|
|
|
return s.view
|
|
|
|
}
|
|
|
|
|
2019-05-07 21:13:42 +00:00
|
|
|
// GetBucket returns a bucket for a given key
|
2017-10-11 17:21:20 +00:00
|
|
|
func (s *StoragePacker) GetBucket(key string) (*Bucket, error) {
|
|
|
|
if key == "" {
|
|
|
|
return nil, fmt.Errorf("missing bucket key")
|
|
|
|
}
|
|
|
|
|
|
|
|
lock := locksutil.LockForKey(s.storageLocks, key)
|
|
|
|
lock.RLock()
|
|
|
|
defer lock.RUnlock()
|
|
|
|
|
2019-05-07 19:29:51 +00:00
|
|
|
// Read from storage
|
2018-01-19 06:44:44 +00:00
|
|
|
storageEntry, err := s.view.Get(context.Background(), key)
|
2017-10-11 17:21:20 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, errwrap.Wrapf("failed to read packed storage entry: {{err}}", err)
|
|
|
|
}
|
|
|
|
if storageEntry == nil {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
uncompressedData, notCompressed, err := compressutil.Decompress(storageEntry.Value)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errwrap.Wrapf("failed to decompress packed storage entry: {{err}}", err)
|
|
|
|
}
|
|
|
|
if notCompressed {
|
|
|
|
uncompressedData = storageEntry.Value
|
|
|
|
}
|
|
|
|
|
|
|
|
var bucket Bucket
|
|
|
|
err = proto.Unmarshal(uncompressedData, &bucket)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errwrap.Wrapf("failed to decode packed storage entry: {{err}}", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return &bucket, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// upsert either inserts a new item into the bucket or updates an existing one
|
|
|
|
// if an item with a matching key is already present.
|
|
|
|
func (s *Bucket) upsert(item *Item) error {
|
|
|
|
if s == nil {
|
|
|
|
return fmt.Errorf("nil storage bucket")
|
|
|
|
}
|
|
|
|
|
|
|
|
if item == nil {
|
|
|
|
return fmt.Errorf("nil item")
|
|
|
|
}
|
|
|
|
|
|
|
|
if item.ID == "" {
|
|
|
|
return fmt.Errorf("missing item ID")
|
|
|
|
}
|
|
|
|
|
2019-05-07 19:29:51 +00:00
|
|
|
// Look for an item with matching key and don't modify the collection while
|
|
|
|
// iterating
|
2017-10-11 17:21:20 +00:00
|
|
|
foundIdx := -1
|
|
|
|
for itemIdx, bucketItems := range s.Items {
|
|
|
|
if bucketItems.ID == item.ID {
|
|
|
|
foundIdx = itemIdx
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If there is no match, append the item, otherwise update it
|
|
|
|
if foundIdx == -1 {
|
|
|
|
s.Items = append(s.Items, item)
|
|
|
|
} else {
|
|
|
|
s.Items[foundIdx] = item
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-05-07 19:29:51 +00:00
|
|
|
// BucketKey returns the storage key of the bucket where the given item will be
|
|
|
|
// stored.
|
2017-10-11 17:21:20 +00:00
|
|
|
func (s *StoragePacker) BucketKey(itemID string) string {
|
2019-05-07 19:29:51 +00:00
|
|
|
hf := md5.New()
|
2019-05-07 21:13:42 +00:00
|
|
|
input := []byte(itemID)
|
|
|
|
n, err := hf.Write(input)
|
|
|
|
// Make linter happy
|
|
|
|
if err != nil || n != len(input) {
|
|
|
|
return ""
|
|
|
|
}
|
2019-05-07 19:29:51 +00:00
|
|
|
index := uint8(hf.Sum(nil)[0])
|
|
|
|
return s.viewPrefix + strconv.Itoa(int(index))
|
2017-10-11 17:21:20 +00:00
|
|
|
}
|
|
|
|
|
2019-05-07 19:29:51 +00:00
|
|
|
// DeleteItem removes the item from the respective bucket
|
2019-05-01 17:47:41 +00:00
|
|
|
func (s *StoragePacker) DeleteItem(_ context.Context, itemID string) error {
|
2019-06-20 20:02:11 +00:00
|
|
|
return s.DeleteMultipleItems(context.Background(), nil, itemID)
|
|
|
|
}
|
2017-10-11 17:21:20 +00:00
|
|
|
|
2019-06-20 20:02:11 +00:00
|
|
|
func (s *StoragePacker) DeleteMultipleItems(ctx context.Context, logger hclog.Logger, itemIDs ...string) error {
|
|
|
|
var err error
|
|
|
|
switch len(itemIDs) {
|
|
|
|
case 0:
|
2019-06-20 21:46:58 +00:00
|
|
|
// Nothing
|
|
|
|
return nil
|
2019-06-20 20:02:11 +00:00
|
|
|
|
|
|
|
case 1:
|
|
|
|
logger = hclog.NewNullLogger()
|
|
|
|
fallthrough
|
|
|
|
|
|
|
|
default:
|
|
|
|
lockIndexes := make(map[string]struct{}, len(s.storageLocks))
|
|
|
|
for _, itemID := range itemIDs {
|
|
|
|
bucketKey := s.BucketKey(itemID)
|
|
|
|
if _, ok := lockIndexes[bucketKey]; !ok {
|
|
|
|
lockIndexes[bucketKey] = struct{}{}
|
|
|
|
}
|
|
|
|
}
|
2017-10-11 17:21:20 +00:00
|
|
|
|
2019-06-20 20:02:11 +00:00
|
|
|
lockKeys := make([]string, 0, len(lockIndexes))
|
|
|
|
for k := range lockIndexes {
|
|
|
|
lockKeys = append(lockKeys, k)
|
|
|
|
}
|
2017-10-11 17:21:20 +00:00
|
|
|
|
2019-06-20 20:02:11 +00:00
|
|
|
locks := locksutil.LocksForKeys(s.storageLocks, lockKeys)
|
|
|
|
for _, lock := range locks {
|
|
|
|
lock.Lock()
|
|
|
|
defer lock.Unlock()
|
|
|
|
}
|
2017-10-11 17:21:20 +00:00
|
|
|
}
|
|
|
|
|
2019-06-20 20:02:11 +00:00
|
|
|
if logger == nil {
|
|
|
|
logger = hclog.NewNullLogger()
|
2017-10-11 17:21:20 +00:00
|
|
|
}
|
|
|
|
|
2019-06-20 20:02:11 +00:00
|
|
|
bucketCache := make(map[string]*Bucket, len(s.storageLocks))
|
|
|
|
|
|
|
|
logger.Debug("deleting multiple items from storagepacker; caching and deleting from buckets", "total_items", len(itemIDs))
|
|
|
|
|
|
|
|
var pctDone int
|
|
|
|
for idx, itemID := range itemIDs {
|
|
|
|
bucketKey := s.BucketKey(itemID)
|
|
|
|
|
|
|
|
bucket, bucketFound := bucketCache[bucketKey]
|
|
|
|
if !bucketFound {
|
|
|
|
// Read from storage
|
|
|
|
storageEntry, err := s.view.Get(context.Background(), bucketKey)
|
|
|
|
if err != nil {
|
|
|
|
return errwrap.Wrapf("failed to read packed storage value: {{err}}", err)
|
|
|
|
}
|
|
|
|
if storageEntry == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
uncompressedData, notCompressed, err := compressutil.Decompress(storageEntry.Value)
|
|
|
|
if err != nil {
|
|
|
|
return errwrap.Wrapf("failed to decompress packed storage value: {{err}}", err)
|
|
|
|
}
|
|
|
|
if notCompressed {
|
|
|
|
uncompressedData = storageEntry.Value
|
|
|
|
}
|
|
|
|
|
|
|
|
bucket = new(Bucket)
|
|
|
|
err = proto.Unmarshal(uncompressedData, bucket)
|
|
|
|
if err != nil {
|
|
|
|
return errwrap.Wrapf("failed decoding packed storage entry: {{err}}", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Look for a matching storage entry
|
|
|
|
foundIdx := -1
|
|
|
|
for itemIdx, item := range bucket.Items {
|
|
|
|
if item.ID == itemID {
|
|
|
|
foundIdx = itemIdx
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If there is a match, remove it from the collection and persist the
|
|
|
|
// resulting collection
|
|
|
|
if foundIdx != -1 {
|
|
|
|
bucket.Items[foundIdx] = bucket.Items[len(bucket.Items)-1]
|
|
|
|
bucket.Items = bucket.Items[:len(bucket.Items)-1]
|
|
|
|
if !bucketFound {
|
|
|
|
bucketCache[bucketKey] = bucket
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
newPctDone := idx * 100.0 / len(itemIDs)
|
|
|
|
if int(newPctDone) > pctDone {
|
|
|
|
pctDone = int(newPctDone)
|
|
|
|
logger.Trace("bucket item removal progress", "percent", pctDone, "items_removed", idx)
|
2017-10-11 17:21:20 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-20 20:02:11 +00:00
|
|
|
logger.Debug("persisting buckets", "total_buckets", len(bucketCache))
|
2017-10-11 17:21:20 +00:00
|
|
|
|
2019-06-20 20:02:11 +00:00
|
|
|
// Persist all buckets in the cache; these will be the ones that had
|
|
|
|
// deletions
|
|
|
|
pctDone = 0
|
|
|
|
idx := 0
|
|
|
|
for _, bucket := range bucketCache {
|
|
|
|
// Fail if the context is canceled, the storage calls will fail anyways
|
|
|
|
if ctx.Err() != nil {
|
|
|
|
return ctx.Err()
|
|
|
|
}
|
|
|
|
|
|
|
|
err = s.putBucket(ctx, bucket)
|
2017-10-11 17:21:20 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-06-20 20:02:11 +00:00
|
|
|
|
|
|
|
newPctDone := idx * 100.0 / len(bucketCache)
|
|
|
|
if int(newPctDone) > pctDone {
|
|
|
|
pctDone = int(newPctDone)
|
|
|
|
logger.Trace("bucket persistence progress", "percent", pctDone, "buckets_persisted", idx)
|
|
|
|
}
|
|
|
|
|
|
|
|
idx++
|
2017-10-11 17:21:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-06-20 20:02:11 +00:00
|
|
|
func (s *StoragePacker) putBucket(ctx context.Context, bucket *Bucket) error {
|
2017-10-11 17:21:20 +00:00
|
|
|
if bucket == nil {
|
|
|
|
return fmt.Errorf("nil bucket entry")
|
|
|
|
}
|
|
|
|
|
|
|
|
if bucket.Key == "" {
|
|
|
|
return fmt.Errorf("missing key")
|
|
|
|
}
|
|
|
|
|
|
|
|
if !strings.HasPrefix(bucket.Key, s.viewPrefix) {
|
|
|
|
return fmt.Errorf("incorrect prefix; bucket entry key should have %q prefix", s.viewPrefix)
|
|
|
|
}
|
|
|
|
|
|
|
|
marshaledBucket, err := proto.Marshal(bucket)
|
|
|
|
if err != nil {
|
|
|
|
return errwrap.Wrapf("failed to marshal bucket: {{err}}", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
compressedBucket, err := compressutil.Compress(marshaledBucket, &compressutil.CompressionConfig{
|
|
|
|
Type: compressutil.CompressionTypeSnappy,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return errwrap.Wrapf("failed to compress packed bucket: {{err}}", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Store the compressed value
|
2019-06-20 20:02:11 +00:00
|
|
|
err = s.view.Put(ctx, &logical.StorageEntry{
|
2017-10-11 17:21:20 +00:00
|
|
|
Key: bucket.Key,
|
|
|
|
Value: compressedBucket,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return errwrap.Wrapf("failed to persist packed storage entry: {{err}}", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetItem fetches the storage entry for a given key from its corresponding
|
|
|
|
// bucket.
|
|
|
|
func (s *StoragePacker) GetItem(itemID string) (*Item, error) {
|
|
|
|
if itemID == "" {
|
|
|
|
return nil, fmt.Errorf("empty item ID")
|
|
|
|
}
|
|
|
|
|
|
|
|
bucketKey := s.BucketKey(itemID)
|
|
|
|
|
|
|
|
// Fetch the bucket entry
|
2019-05-07 19:29:51 +00:00
|
|
|
bucket, err := s.GetBucket(bucketKey)
|
2017-10-11 17:21:20 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, errwrap.Wrapf("failed to read packed storage item: {{err}}", err)
|
|
|
|
}
|
2018-01-31 23:42:22 +00:00
|
|
|
if bucket == nil {
|
|
|
|
return nil, nil
|
|
|
|
}
|
2017-10-11 17:21:20 +00:00
|
|
|
|
|
|
|
// Look for a matching storage entry in the bucket items
|
|
|
|
for _, item := range bucket.Items {
|
|
|
|
if item.ID == itemID {
|
|
|
|
return item, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
2019-05-07 19:29:51 +00:00
|
|
|
// PutItem stores the given item in its respective bucket
|
2019-05-01 17:47:41 +00:00
|
|
|
func (s *StoragePacker) PutItem(_ context.Context, item *Item) error {
|
2017-10-11 17:21:20 +00:00
|
|
|
if item == nil {
|
|
|
|
return fmt.Errorf("nil item")
|
|
|
|
}
|
|
|
|
|
|
|
|
if item.ID == "" {
|
|
|
|
return fmt.Errorf("missing ID in item")
|
|
|
|
}
|
|
|
|
|
|
|
|
var err error
|
|
|
|
bucketKey := s.BucketKey(item.ID)
|
|
|
|
|
|
|
|
bucket := &Bucket{
|
2019-05-07 19:29:51 +00:00
|
|
|
Key: bucketKey,
|
2017-10-11 17:21:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// In this case, we persist the storage entry regardless of the read
|
|
|
|
// storageEntry below is nil or not. Hence, directly acquire write lock
|
|
|
|
// even to read the entry.
|
2019-05-07 19:29:51 +00:00
|
|
|
lock := locksutil.LockForKey(s.storageLocks, bucketKey)
|
2017-10-11 17:21:20 +00:00
|
|
|
lock.Lock()
|
|
|
|
defer lock.Unlock()
|
|
|
|
|
|
|
|
// Check if there is an existing bucket for a given key
|
2019-05-07 19:29:51 +00:00
|
|
|
storageEntry, err := s.view.Get(context.Background(), bucketKey)
|
2017-10-11 17:21:20 +00:00
|
|
|
if err != nil {
|
|
|
|
return errwrap.Wrapf("failed to read packed storage bucket entry: {{err}}", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if storageEntry == nil {
|
|
|
|
// If the bucket entry does not exist, this will be the only item the
|
|
|
|
// bucket that is going to be persisted.
|
|
|
|
bucket.Items = []*Item{
|
|
|
|
item,
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
uncompressedData, notCompressed, err := compressutil.Decompress(storageEntry.Value)
|
|
|
|
if err != nil {
|
|
|
|
return errwrap.Wrapf("failed to decompress packed storage entry: {{err}}", err)
|
|
|
|
}
|
|
|
|
if notCompressed {
|
|
|
|
uncompressedData = storageEntry.Value
|
|
|
|
}
|
|
|
|
|
|
|
|
err = proto.Unmarshal(uncompressedData, bucket)
|
|
|
|
if err != nil {
|
|
|
|
return errwrap.Wrapf("failed to decode packed storage entry: {{err}}", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
err = bucket.upsert(item)
|
|
|
|
if err != nil {
|
|
|
|
return errwrap.Wrapf("failed to update entry in packed storage entry: {{err}}", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-20 20:02:11 +00:00
|
|
|
return s.putBucket(context.Background(), bucket)
|
2017-10-11 17:21:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// NewStoragePacker creates a new storage packer for a given view
|
|
|
|
func NewStoragePacker(view logical.Storage, logger log.Logger, viewPrefix string) (*StoragePacker, error) {
|
|
|
|
if view == nil {
|
|
|
|
return nil, fmt.Errorf("nil view")
|
|
|
|
}
|
|
|
|
|
|
|
|
if viewPrefix == "" {
|
|
|
|
viewPrefix = StoragePackerBucketsPrefix
|
|
|
|
}
|
|
|
|
|
|
|
|
if !strings.HasSuffix(viewPrefix, "/") {
|
|
|
|
viewPrefix = viewPrefix + "/"
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create a new packer object for the given view
|
|
|
|
packer := &StoragePacker{
|
|
|
|
view: view,
|
|
|
|
viewPrefix: viewPrefix,
|
2018-09-05 19:52:54 +00:00
|
|
|
logger: logger,
|
2017-10-11 17:21:20 +00:00
|
|
|
storageLocks: locksutil.CreateLocks(),
|
|
|
|
}
|
|
|
|
|
|
|
|
return packer, nil
|
|
|
|
}
|