2017-08-03 17:24:27 +00:00
|
|
|
package s3
|
2015-05-20 14:54:26 +00:00
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"fmt"
|
|
|
|
"io"
|
2017-03-29 17:27:27 +00:00
|
|
|
"net/http"
|
2015-05-20 14:54:26 +00:00
|
|
|
"os"
|
|
|
|
"sort"
|
2017-03-26 18:32:26 +00:00
|
|
|
"strconv"
|
2015-05-20 14:54:26 +00:00
|
|
|
"strings"
|
|
|
|
"time"
|
|
|
|
|
2016-08-19 20:45:17 +00:00
|
|
|
log "github.com/mgutz/logxi/v1"
|
|
|
|
|
2015-05-20 14:54:26 +00:00
|
|
|
"github.com/armon/go-metrics"
|
2015-06-03 19:02:49 +00:00
|
|
|
"github.com/aws/aws-sdk-go/aws"
|
|
|
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
2015-10-30 22:22:48 +00:00
|
|
|
"github.com/aws/aws-sdk-go/aws/session"
|
2015-06-03 19:02:49 +00:00
|
|
|
"github.com/aws/aws-sdk-go/service/s3"
|
2017-03-26 18:32:26 +00:00
|
|
|
"github.com/hashicorp/errwrap"
|
2017-03-29 17:27:27 +00:00
|
|
|
cleanhttp "github.com/hashicorp/go-cleanhttp"
|
2016-05-03 21:00:16 +00:00
|
|
|
"github.com/hashicorp/vault/helper/awsutil"
|
2017-03-29 17:27:27 +00:00
|
|
|
"github.com/hashicorp/vault/helper/consts"
|
2017-08-03 17:24:27 +00:00
|
|
|
"github.com/hashicorp/vault/physical"
|
2015-05-20 14:54:26 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// S3Backend is a physical backend that stores data
|
|
|
|
// within an S3 bucket.
|
|
|
|
type S3Backend struct {
|
2017-03-29 17:27:27 +00:00
|
|
|
bucket string
|
|
|
|
client *s3.S3
|
|
|
|
logger log.Logger
|
2017-08-03 17:24:27 +00:00
|
|
|
permitPool *physical.PermitPool
|
2015-05-20 14:54:26 +00:00
|
|
|
}
|
|
|
|
|
2017-08-03 17:24:27 +00:00
|
|
|
// NewS3Backend constructs a S3 backend using a pre-existing
|
2015-05-20 14:54:26 +00:00
|
|
|
// bucket. Credentials can be provided to the backend, sourced
|
|
|
|
// from the environment, AWS credential files or by IAM role.
|
2017-08-03 17:24:27 +00:00
|
|
|
func NewS3Backend(conf map[string]string, logger log.Logger) (physical.Backend, error) {
|
2015-12-17 15:19:42 +00:00
|
|
|
bucket := os.Getenv("AWS_S3_BUCKET")
|
|
|
|
if bucket == "" {
|
|
|
|
bucket = conf["bucket"]
|
2015-11-06 13:05:29 +00:00
|
|
|
if bucket == "" {
|
|
|
|
return nil, fmt.Errorf("'bucket' must be set")
|
|
|
|
}
|
2015-05-20 14:54:26 +00:00
|
|
|
}
|
|
|
|
|
2016-05-03 21:00:16 +00:00
|
|
|
accessKey, ok := conf["access_key"]
|
2015-05-20 14:54:26 +00:00
|
|
|
if !ok {
|
2016-05-03 21:00:16 +00:00
|
|
|
accessKey = ""
|
2015-05-20 14:54:26 +00:00
|
|
|
}
|
2016-05-03 21:00:16 +00:00
|
|
|
secretKey, ok := conf["secret_key"]
|
2015-05-20 14:54:26 +00:00
|
|
|
if !ok {
|
2016-05-03 21:00:16 +00:00
|
|
|
secretKey = ""
|
2015-05-20 14:54:26 +00:00
|
|
|
}
|
2016-05-03 21:00:16 +00:00
|
|
|
sessionToken, ok := conf["session_token"]
|
2015-08-06 16:37:08 +00:00
|
|
|
if !ok {
|
2016-05-03 21:00:16 +00:00
|
|
|
sessionToken = ""
|
2015-08-06 16:37:08 +00:00
|
|
|
}
|
2015-12-17 15:19:42 +00:00
|
|
|
endpoint := os.Getenv("AWS_S3_ENDPOINT")
|
|
|
|
if endpoint == "" {
|
|
|
|
endpoint = conf["endpoint"]
|
2015-11-04 12:34:40 +00:00
|
|
|
}
|
2017-07-31 22:27:16 +00:00
|
|
|
region := os.Getenv("AWS_REGION")
|
2015-12-17 15:19:42 +00:00
|
|
|
if region == "" {
|
2017-07-31 22:27:16 +00:00
|
|
|
region = os.Getenv("AWS_DEFAULT_REGION")
|
2015-05-20 14:54:26 +00:00
|
|
|
if region == "" {
|
2017-07-31 22:27:16 +00:00
|
|
|
region = conf["region"]
|
|
|
|
if region == "" {
|
|
|
|
region = "us-east-1"
|
|
|
|
}
|
2015-05-20 14:54:26 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-03 21:00:16 +00:00
|
|
|
credsConfig := &awsutil.CredentialsConfig{
|
|
|
|
AccessKey: accessKey,
|
|
|
|
SecretKey: secretKey,
|
|
|
|
SessionToken: sessionToken,
|
|
|
|
}
|
|
|
|
creds, err := credsConfig.GenerateCredentialChain()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2015-05-20 14:54:26 +00:00
|
|
|
|
2017-03-29 17:27:27 +00:00
|
|
|
pooledTransport := cleanhttp.DefaultPooledTransport()
|
|
|
|
pooledTransport.MaxIdleConnsPerHost = consts.ExpirationRestoreWorkerCount
|
|
|
|
|
2015-10-30 22:22:48 +00:00
|
|
|
s3conn := s3.New(session.New(&aws.Config{
|
2015-05-20 14:54:26 +00:00
|
|
|
Credentials: creds,
|
2017-03-29 17:27:27 +00:00
|
|
|
HTTPClient: &http.Client{
|
|
|
|
Transport: pooledTransport,
|
|
|
|
},
|
|
|
|
Endpoint: aws.String(endpoint),
|
|
|
|
Region: aws.String(region),
|
2015-10-30 22:22:48 +00:00
|
|
|
}))
|
2015-05-20 14:54:26 +00:00
|
|
|
|
2017-06-20 00:16:41 +00:00
|
|
|
_, err = s3conn.ListObjects(&s3.ListObjectsInput{Bucket: &bucket})
|
2015-05-20 14:54:26 +00:00
|
|
|
if err != nil {
|
2017-06-20 00:16:41 +00:00
|
|
|
return nil, fmt.Errorf("unable to access bucket '%s' in region %s: %v", bucket, region, err)
|
2015-05-20 14:54:26 +00:00
|
|
|
}
|
|
|
|
|
2017-03-26 18:32:26 +00:00
|
|
|
maxParStr, ok := conf["max_parallel"]
|
|
|
|
var maxParInt int
|
|
|
|
if ok {
|
|
|
|
maxParInt, err = strconv.Atoi(maxParStr)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err)
|
|
|
|
}
|
|
|
|
if logger.IsDebug() {
|
|
|
|
logger.Debug("s3: max_parallel set", "max_parallel", maxParInt)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-05-20 14:54:26 +00:00
|
|
|
s := &S3Backend{
|
2017-03-29 17:27:27 +00:00
|
|
|
client: s3conn,
|
|
|
|
bucket: bucket,
|
|
|
|
logger: logger,
|
2017-08-03 17:24:27 +00:00
|
|
|
permitPool: physical.NewPermitPool(maxParInt),
|
2015-05-20 14:54:26 +00:00
|
|
|
}
|
|
|
|
return s, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Put is used to insert or update an entry
|
2017-08-03 17:24:27 +00:00
|
|
|
func (s *S3Backend) Put(entry *physical.Entry) error {
|
2015-05-20 14:54:26 +00:00
|
|
|
defer metrics.MeasureSince([]string{"s3", "put"}, time.Now())
|
|
|
|
|
2017-03-26 18:32:26 +00:00
|
|
|
s.permitPool.Acquire()
|
|
|
|
defer s.permitPool.Release()
|
|
|
|
|
2015-05-20 14:54:26 +00:00
|
|
|
_, err := s.client.PutObject(&s3.PutObjectInput{
|
|
|
|
Bucket: aws.String(s.bucket),
|
|
|
|
Key: aws.String(entry.Key),
|
|
|
|
Body: bytes.NewReader(entry.Value),
|
|
|
|
})
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get is used to fetch an entry
|
2017-08-03 17:24:27 +00:00
|
|
|
func (s *S3Backend) Get(key string) (*physical.Entry, error) {
|
2015-05-20 14:54:26 +00:00
|
|
|
defer metrics.MeasureSince([]string{"s3", "get"}, time.Now())
|
|
|
|
|
2017-03-26 18:32:26 +00:00
|
|
|
s.permitPool.Acquire()
|
|
|
|
defer s.permitPool.Release()
|
|
|
|
|
2015-05-20 14:54:26 +00:00
|
|
|
resp, err := s.client.GetObject(&s3.GetObjectInput{
|
|
|
|
Bucket: aws.String(s.bucket),
|
|
|
|
Key: aws.String(key),
|
|
|
|
})
|
2015-05-21 20:15:21 +00:00
|
|
|
if awsErr, ok := err.(awserr.RequestFailure); ok {
|
2015-05-20 14:54:26 +00:00
|
|
|
// Return nil on 404s, error on anything else
|
2015-05-21 20:15:21 +00:00
|
|
|
if awsErr.StatusCode() == 404 {
|
2015-05-20 14:54:26 +00:00
|
|
|
return nil, nil
|
|
|
|
}
|
2017-03-26 18:32:26 +00:00
|
|
|
return nil, err
|
2015-05-20 14:54:26 +00:00
|
|
|
}
|
2016-04-22 18:07:59 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if resp == nil {
|
|
|
|
return nil, fmt.Errorf("got nil response from S3 but no error")
|
|
|
|
}
|
2015-05-20 14:54:26 +00:00
|
|
|
|
|
|
|
data := make([]byte, *resp.ContentLength)
|
|
|
|
_, err = io.ReadFull(resp.Body, data)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2017-08-03 17:24:27 +00:00
|
|
|
ent := &physical.Entry{
|
2015-05-20 14:54:26 +00:00
|
|
|
Key: key,
|
|
|
|
Value: data,
|
|
|
|
}
|
|
|
|
|
|
|
|
return ent, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Delete is used to permanently delete an entry
|
|
|
|
func (s *S3Backend) Delete(key string) error {
|
|
|
|
defer metrics.MeasureSince([]string{"s3", "delete"}, time.Now())
|
|
|
|
|
2017-03-26 18:32:26 +00:00
|
|
|
s.permitPool.Acquire()
|
|
|
|
defer s.permitPool.Release()
|
|
|
|
|
2015-05-20 23:53:35 +00:00
|
|
|
_, err := s.client.DeleteObject(&s3.DeleteObjectInput{
|
2015-05-20 14:54:26 +00:00
|
|
|
Bucket: aws.String(s.bucket),
|
2015-05-20 23:53:35 +00:00
|
|
|
Key: aws.String(key),
|
2015-05-20 14:54:26 +00:00
|
|
|
})
|
2015-05-20 23:53:35 +00:00
|
|
|
|
2015-05-20 14:54:26 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// List is used to list all the keys under a given
|
|
|
|
// prefix, up to the next prefix.
|
|
|
|
func (s *S3Backend) List(prefix string) ([]string, error) {
|
|
|
|
defer metrics.MeasureSince([]string{"s3", "list"}, time.Now())
|
|
|
|
|
2017-03-26 18:32:26 +00:00
|
|
|
s.permitPool.Acquire()
|
|
|
|
defer s.permitPool.Release()
|
|
|
|
|
2017-01-03 16:15:48 +00:00
|
|
|
params := &s3.ListObjectsV2Input{
|
2017-06-16 15:09:15 +00:00
|
|
|
Bucket: aws.String(s.bucket),
|
|
|
|
Prefix: aws.String(prefix),
|
|
|
|
Delimiter: aws.String("/"),
|
2016-04-22 18:07:59 +00:00
|
|
|
}
|
2015-05-20 14:54:26 +00:00
|
|
|
|
|
|
|
keys := []string{}
|
2017-01-03 16:15:48 +00:00
|
|
|
|
|
|
|
err := s.client.ListObjectsV2Pages(params,
|
|
|
|
func(page *s3.ListObjectsV2Output, lastPage bool) bool {
|
2017-06-05 14:54:26 +00:00
|
|
|
if page != nil {
|
2017-06-16 15:09:15 +00:00
|
|
|
// Add truncated 'folder' paths
|
|
|
|
for _, commonPrefix := range page.CommonPrefixes {
|
|
|
|
// Avoid panic
|
|
|
|
if commonPrefix == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
commonPrefix := strings.TrimPrefix(*commonPrefix.Prefix, prefix)
|
|
|
|
keys = append(keys, commonPrefix)
|
|
|
|
}
|
|
|
|
// Add objects only from the current 'folder'
|
2017-06-05 14:54:26 +00:00
|
|
|
for _, key := range page.Contents {
|
|
|
|
// Avoid panic
|
|
|
|
if key == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
key := strings.TrimPrefix(*key.Key, prefix)
|
2017-06-16 15:09:15 +00:00
|
|
|
keys = append(keys, key)
|
2017-01-03 16:15:48 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
})
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2015-05-20 14:54:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
sort.Strings(keys)
|
|
|
|
|
|
|
|
return keys, nil
|
|
|
|
}
|