2017-08-03 17:24:27 +00:00
|
|
|
package gcs
|
2016-12-01 19:42:31 +00:00
|
|
|
|
|
|
|
import (
|
2018-03-30 16:36:37 +00:00
|
|
|
"errors"
|
2016-12-01 19:42:31 +00:00
|
|
|
"fmt"
|
|
|
|
"io/ioutil"
|
|
|
|
"os"
|
|
|
|
"sort"
|
|
|
|
"strconv"
|
|
|
|
"strings"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/hashicorp/errwrap"
|
2018-04-03 00:46:59 +00:00
|
|
|
log "github.com/hashicorp/go-hclog"
|
2018-02-15 23:30:31 +00:00
|
|
|
"github.com/hashicorp/vault/helper/useragent"
|
2017-08-03 17:24:27 +00:00
|
|
|
"github.com/hashicorp/vault/physical"
|
2016-12-01 19:42:31 +00:00
|
|
|
|
|
|
|
"cloud.google.com/go/storage"
|
|
|
|
"github.com/armon/go-metrics"
|
|
|
|
"golang.org/x/net/context"
|
|
|
|
"google.golang.org/api/iterator"
|
|
|
|
"google.golang.org/api/option"
|
|
|
|
)
|
|
|
|
|
2018-03-30 16:36:37 +00:00
|
|
|
// Verify Backend satisfies the correct interfaces
|
|
|
|
var _ physical.Backend = (*Backend)(nil)
|
|
|
|
|
|
|
|
const (
|
|
|
|
// envBucket is the name of the environment variable to search for the
|
|
|
|
// storage bucket name.
|
|
|
|
envBucket = "GOOGLE_STORAGE_BUCKET"
|
|
|
|
|
|
|
|
// envChunkSize is the environment variable to serach for the chunk size for
|
|
|
|
// requests.
|
|
|
|
envChunkSize = "GOOGLE_STORAGE_CHUNK_SIZE"
|
|
|
|
|
|
|
|
// envHAEnabled is the name of the environment variable to search for the
|
|
|
|
// boolean indicating if HA is enabled.
|
|
|
|
envHAEnabled = "GOOGLE_STORAGE_HA_ENABLED"
|
|
|
|
|
|
|
|
// defaultChunkSize is the number of bytes the writer will attempt to write in
|
|
|
|
// a single request.
|
|
|
|
defaultChunkSize = "8192"
|
|
|
|
|
|
|
|
// objectDelimiter is the string to use to delimit objects.
|
|
|
|
objectDelimiter = "/"
|
|
|
|
)
|
2016-12-01 19:42:31 +00:00
|
|
|
|
2018-03-05 13:32:48 +00:00
|
|
|
var (
|
2018-03-30 16:36:37 +00:00
|
|
|
// metricDelete is the key for the metric for measuring a Delete call.
|
|
|
|
metricDelete = []string{"gcs", "delete"}
|
|
|
|
|
|
|
|
// metricGet is the key for the metric for measuring a Get call.
|
|
|
|
metricGet = []string{"gcs", "get"}
|
2018-03-05 13:32:48 +00:00
|
|
|
|
2018-03-30 16:36:37 +00:00
|
|
|
// metricList is the key for the metric for measuring a List call.
|
|
|
|
metricList = []string{"gcs", "list"}
|
|
|
|
|
|
|
|
// metricPut is the key for the metric for measuring a Put call.
|
|
|
|
metricPut = []string{"gcs", "put"}
|
2018-03-05 13:32:48 +00:00
|
|
|
)
|
|
|
|
|
2018-03-30 16:36:37 +00:00
|
|
|
// Backend implements physical.Backend and describes the steps necessary to
|
|
|
|
// persist data in Google Cloud Storage.
|
|
|
|
type Backend struct {
|
|
|
|
// bucket is the name of the bucket to use for data storage and retrieval.
|
|
|
|
bucket string
|
2016-12-01 19:42:31 +00:00
|
|
|
|
2018-03-30 16:36:37 +00:00
|
|
|
// chunkSize is the chunk size to use for requests.
|
|
|
|
chunkSize int
|
2016-12-01 19:42:31 +00:00
|
|
|
|
2018-03-30 16:36:37 +00:00
|
|
|
// client is the underlying API client for talking to gcs.
|
|
|
|
client *storage.Client
|
|
|
|
|
|
|
|
// haEnabled indicates if HA is enabled.
|
|
|
|
haEnabled bool
|
|
|
|
|
|
|
|
// logger and permitPool are internal constructs
|
|
|
|
logger log.Logger
|
|
|
|
permitPool *physical.PermitPool
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewBackend constructs a Google Cloud Storage backend with the given
|
|
|
|
// configuration. This uses the official Golang Cloud SDK and therefore supports
|
|
|
|
// specifying credentials via envvars, credential files, etc. from environment
|
|
|
|
// variables or a service account file
|
|
|
|
func NewBackend(c map[string]string, logger log.Logger) (physical.Backend, error) {
|
|
|
|
logger.Debug("physical/gcs: configuring backend")
|
|
|
|
|
|
|
|
// Bucket name
|
|
|
|
bucket := os.Getenv(envBucket)
|
|
|
|
if bucket == "" {
|
|
|
|
bucket = c["bucket"]
|
|
|
|
}
|
|
|
|
if bucket == "" {
|
|
|
|
return nil, errors.New("missing bucket name")
|
2016-12-01 19:42:31 +00:00
|
|
|
}
|
|
|
|
|
2018-03-30 16:36:37 +00:00
|
|
|
// Chunk size
|
|
|
|
chunkSizeStr := os.Getenv(envChunkSize)
|
|
|
|
if chunkSizeStr == "" {
|
|
|
|
chunkSizeStr = c["chunk_size"]
|
|
|
|
}
|
|
|
|
if chunkSizeStr == "" {
|
|
|
|
chunkSizeStr = defaultChunkSize
|
|
|
|
}
|
|
|
|
chunkSize, err := strconv.Atoi(chunkSizeStr)
|
2016-12-01 19:42:31 +00:00
|
|
|
if err != nil {
|
2018-03-30 16:36:37 +00:00
|
|
|
return nil, errwrap.Wrapf("failed to parse chunk_size: {{err}}", err)
|
2016-12-01 19:42:31 +00:00
|
|
|
}
|
2018-04-03 00:46:59 +00:00
|
|
|
|
2018-03-30 16:36:37 +00:00
|
|
|
// Values are specified as kb, but the API expects them as bytes.
|
|
|
|
chunkSize = chunkSize * 1024
|
|
|
|
|
|
|
|
// HA configuration
|
|
|
|
haEnabled := false
|
|
|
|
haEnabledStr := os.Getenv(envHAEnabled)
|
|
|
|
if haEnabledStr == "" {
|
|
|
|
haEnabledStr = c["ha_enabled"]
|
2016-12-01 19:42:31 +00:00
|
|
|
}
|
2018-03-30 16:36:37 +00:00
|
|
|
if haEnabledStr != "" {
|
|
|
|
var err error
|
|
|
|
haEnabled, err = strconv.ParseBool(haEnabledStr)
|
2018-03-05 13:32:48 +00:00
|
|
|
if err != nil {
|
2018-03-30 16:36:37 +00:00
|
|
|
return nil, errwrap.Wrapf("failed to parse HA enabled: {{err}}", err)
|
2018-03-05 13:32:48 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-30 16:36:37 +00:00
|
|
|
// Max parallel
|
|
|
|
maxParallel, err := extractInt(c["max_parallel"])
|
|
|
|
if err != nil {
|
|
|
|
return nil, errwrap.Wrapf("failed to parse max_parallel: {{err}}", err)
|
2016-12-01 19:42:31 +00:00
|
|
|
}
|
|
|
|
|
2018-03-30 16:36:37 +00:00
|
|
|
logger.Debug("physical/gcs: configuration",
|
|
|
|
"bucket", bucket,
|
|
|
|
"chunk_size", chunkSize,
|
|
|
|
"ha_enabled", haEnabled,
|
|
|
|
"max_parallel", maxParallel,
|
|
|
|
)
|
|
|
|
logger.Debug("physical/gcs: creating client")
|
|
|
|
|
|
|
|
// Client
|
|
|
|
opts := []option.ClientOption{option.WithUserAgent(useragent.String())}
|
|
|
|
if credentialsFile := c["credentials_file"]; credentialsFile != "" {
|
|
|
|
logger.Warn("physical.gcs: specifying credentials_file as an option is " +
|
|
|
|
"deprecated. Please use the GOOGLE_APPLICATION_CREDENTIALS environment " +
|
|
|
|
"variable or instance credentials instead.")
|
|
|
|
opts = append(opts, option.WithServiceAccountFile(credentialsFile))
|
2017-08-30 19:42:02 +00:00
|
|
|
}
|
|
|
|
|
2018-03-30 16:36:37 +00:00
|
|
|
ctx := context.Background()
|
|
|
|
client, err := storage.NewClient(ctx, opts...)
|
2017-08-30 19:42:02 +00:00
|
|
|
if err != nil {
|
2018-03-30 16:36:37 +00:00
|
|
|
return nil, errwrap.Wrapf("failed to create storage client: {{err}}", err)
|
2017-08-30 19:42:02 +00:00
|
|
|
}
|
2018-03-30 16:36:37 +00:00
|
|
|
|
|
|
|
return &Backend{
|
|
|
|
bucket: bucket,
|
|
|
|
haEnabled: haEnabled,
|
|
|
|
|
|
|
|
client: client,
|
|
|
|
permitPool: physical.NewPermitPool(maxParallel),
|
|
|
|
logger: logger,
|
|
|
|
}, nil
|
2017-08-30 19:42:02 +00:00
|
|
|
}
|
|
|
|
|
2016-12-01 19:42:31 +00:00
|
|
|
// Put is used to insert or update an entry
|
2018-03-30 16:36:37 +00:00
|
|
|
func (b *Backend) Put(ctx context.Context, entry *physical.Entry) error {
|
|
|
|
defer metrics.MeasureSince(metricPut, time.Now())
|
2016-12-01 19:42:31 +00:00
|
|
|
|
2018-03-30 16:36:37 +00:00
|
|
|
// Pooling
|
|
|
|
b.permitPool.Acquire()
|
|
|
|
defer b.permitPool.Release()
|
2016-12-01 19:42:31 +00:00
|
|
|
|
2018-03-30 16:36:37 +00:00
|
|
|
// Insert
|
|
|
|
w := b.client.Bucket(b.bucket).Object(entry.Key).NewWriter(ctx)
|
|
|
|
w.ChunkSize = b.chunkSize
|
|
|
|
defer w.Close()
|
2016-12-01 19:42:31 +00:00
|
|
|
|
2018-03-30 16:36:37 +00:00
|
|
|
if _, err := w.Write(entry.Value); err != nil {
|
|
|
|
return errwrap.Wrapf("failed to put data: {{err}}", err)
|
|
|
|
}
|
|
|
|
return nil
|
2016-12-01 19:42:31 +00:00
|
|
|
}
|
|
|
|
|
2018-03-30 16:36:37 +00:00
|
|
|
// Get fetches an entry. If no entry exists, this function returns nil.
|
|
|
|
func (b *Backend) Get(ctx context.Context, key string) (*physical.Entry, error) {
|
|
|
|
defer metrics.MeasureSince(metricGet, time.Now())
|
2016-12-01 19:42:31 +00:00
|
|
|
|
2018-03-30 16:36:37 +00:00
|
|
|
// Pooling
|
|
|
|
b.permitPool.Acquire()
|
|
|
|
defer b.permitPool.Release()
|
2016-12-01 19:42:31 +00:00
|
|
|
|
2018-03-30 16:36:37 +00:00
|
|
|
// Read
|
|
|
|
r, err := b.client.Bucket(b.bucket).Object(key).NewReader(ctx)
|
2016-12-01 19:42:31 +00:00
|
|
|
if err == storage.ErrObjectNotExist {
|
|
|
|
return nil, nil
|
|
|
|
}
|
2018-03-30 16:36:37 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, errwrap.Wrapf(fmt.Sprintf("failed to read value for %q: {{err}}", key), err)
|
|
|
|
}
|
|
|
|
defer r.Close()
|
2016-12-01 19:42:31 +00:00
|
|
|
|
2018-03-30 16:36:37 +00:00
|
|
|
value, err := ioutil.ReadAll(r)
|
2016-12-01 19:42:31 +00:00
|
|
|
if err != nil {
|
2018-03-30 16:36:37 +00:00
|
|
|
return nil, errwrap.Wrapf("failed to read value into a string: {{err}}", err)
|
2016-12-01 19:42:31 +00:00
|
|
|
}
|
|
|
|
|
2018-03-30 16:36:37 +00:00
|
|
|
return &physical.Entry{
|
2016-12-01 19:42:31 +00:00
|
|
|
Key: key,
|
|
|
|
Value: value,
|
2018-03-30 16:36:37 +00:00
|
|
|
}, nil
|
2016-12-01 19:42:31 +00:00
|
|
|
}
|
|
|
|
|
2018-03-30 16:36:37 +00:00
|
|
|
// Delete deletes an entry with the given key
|
|
|
|
func (b *Backend) Delete(ctx context.Context, key string) error {
|
|
|
|
defer metrics.MeasureSince(metricDelete, time.Now())
|
2016-12-01 19:42:31 +00:00
|
|
|
|
2018-03-30 16:36:37 +00:00
|
|
|
// Pooling
|
|
|
|
b.permitPool.Acquire()
|
|
|
|
defer b.permitPool.Release()
|
2016-12-01 19:42:31 +00:00
|
|
|
|
2018-03-30 16:36:37 +00:00
|
|
|
// Delete
|
|
|
|
err := b.client.Bucket(b.bucket).Object(key).Delete(ctx)
|
|
|
|
if err != nil && err != storage.ErrObjectNotExist {
|
|
|
|
return errwrap.Wrapf(fmt.Sprintf("failed to delete key %q: {{err}}", key), err)
|
2016-12-01 19:42:31 +00:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// List is used to list all the keys under a given
|
|
|
|
// prefix, up to the next prefix.
|
2018-03-30 16:36:37 +00:00
|
|
|
func (b *Backend) List(ctx context.Context, prefix string) ([]string, error) {
|
|
|
|
defer metrics.MeasureSince(metricList, time.Now())
|
2016-12-01 19:42:31 +00:00
|
|
|
|
2018-03-30 16:36:37 +00:00
|
|
|
// Pooling
|
|
|
|
b.permitPool.Acquire()
|
|
|
|
defer b.permitPool.Release()
|
2016-12-01 19:42:31 +00:00
|
|
|
|
2018-03-30 16:36:37 +00:00
|
|
|
iter := b.client.Bucket(b.bucket).Objects(ctx, &storage.Query{
|
|
|
|
Prefix: prefix,
|
|
|
|
Delimiter: objectDelimiter,
|
|
|
|
Versions: false,
|
|
|
|
})
|
2016-12-01 19:42:31 +00:00
|
|
|
|
|
|
|
keys := []string{}
|
|
|
|
|
|
|
|
for {
|
2018-03-30 16:36:37 +00:00
|
|
|
objAttrs, err := iter.Next()
|
2016-12-01 19:42:31 +00:00
|
|
|
if err == iterator.Done {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
if err != nil {
|
2018-03-30 16:36:37 +00:00
|
|
|
return nil, errwrap.Wrapf("failed to read object: {{err}}", err)
|
2016-12-01 19:42:31 +00:00
|
|
|
}
|
|
|
|
|
2018-03-30 16:36:37 +00:00
|
|
|
var path string
|
2016-12-01 19:42:31 +00:00
|
|
|
if objAttrs.Prefix != "" {
|
|
|
|
// "subdirectory"
|
|
|
|
path = objAttrs.Prefix
|
|
|
|
} else {
|
|
|
|
// file
|
|
|
|
path = objAttrs.Name
|
|
|
|
}
|
|
|
|
|
|
|
|
// get relative file/dir just like "basename"
|
|
|
|
key := strings.TrimPrefix(path, prefix)
|
|
|
|
keys = append(keys, key)
|
|
|
|
}
|
|
|
|
|
|
|
|
sort.Strings(keys)
|
|
|
|
|
|
|
|
return keys, nil
|
|
|
|
}
|
2018-03-30 16:36:37 +00:00
|
|
|
|
|
|
|
// extractInt is a helper function that takes a string and converts that string
|
|
|
|
// to an int, but accounts for the empty string.
|
|
|
|
func extractInt(s string) (int, error) {
|
|
|
|
if s == "" {
|
|
|
|
return 0, nil
|
|
|
|
}
|
|
|
|
return strconv.Atoi(s)
|
|
|
|
}
|