Update go-getter for minio support
This commit is contained in:
parent
0d3bdf7210
commit
5bf5dae13e
19
vendor/github.com/hashicorp/go-getter/README.md
generated
vendored
19
vendor/github.com/hashicorp/go-getter/README.md
generated
vendored
|
@ -222,13 +222,17 @@ None
|
|||
|
||||
### HTTP (`http`)
|
||||
|
||||
None
|
||||
#### Basic Authentication
|
||||
|
||||
To use HTTP basic authentication with go-getter, simply prepend `username:password@` to the
|
||||
hostname in the URL such as `https://Aladdin:OpenSesame@www.example.com/index.html`. All special
|
||||
characters, including the username and password, must be URL encoded.
|
||||
|
||||
### S3 (`s3`)
|
||||
|
||||
S3 takes various access configurations in the URL. Note that it will also
|
||||
read these from standard AWS environment variables if they're set. If
|
||||
the query parameters are present, these take priority.
|
||||
read these from standard AWS environment variables if they're set. S3 compliant servers like Minio
|
||||
are also supported. If the query parameters are present, these take priority.
|
||||
|
||||
* `aws_access_key_id` - AWS access key.
|
||||
* `aws_access_key_secret` - AWS access key secret.
|
||||
|
@ -240,6 +244,14 @@ If you use go-getter and want to use an EC2 IAM Instance Profile to avoid
|
|||
using credentials, then just omit these and the profile, if available will
|
||||
be used automatically.
|
||||
|
||||
### Using S3 with Minio
|
||||
If you use go-gitter for Minio support, you must consider the following:
|
||||
|
||||
* `aws_access_key_id` (required) - Minio access key.
|
||||
* `aws_access_key_secret` (required) - Minio access key secret.
|
||||
* `region` (optional - defaults to us-east-1) - Region identifier to use.
|
||||
* `version` (optional - fefaults to Minio default) - Configuration file format.
|
||||
|
||||
#### S3 Bucket Examples
|
||||
|
||||
S3 has several addressing schemes used to reference your bucket. These are
|
||||
|
@ -250,4 +262,5 @@ Some examples for these addressing schemes:
|
|||
- s3::https://s3-eu-west-1.amazonaws.com/bucket/foo
|
||||
- bucket.s3.amazonaws.com/foo
|
||||
- bucket.s3-eu-west-1.amazonaws.com/foo/bar
|
||||
- "s3::http://127.0.0.1:9000/test-bucket/hello.txt?aws_access_key_id=KEYID&aws_access_key_secret=SECRETKEY®ion=us-east-2"
|
||||
|
||||
|
|
9
vendor/github.com/hashicorp/go-getter/decompress_testing.go
generated
vendored
9
vendor/github.com/hashicorp/go-getter/decompress_testing.go
generated
vendored
|
@ -11,7 +11,8 @@ import (
|
|||
"runtime"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/mitchellh/go-testing-interface"
|
||||
)
|
||||
|
||||
// TestDecompressCase is a single test case for testing decompressors
|
||||
|
@ -24,7 +25,7 @@ type TestDecompressCase struct {
|
|||
}
|
||||
|
||||
// TestDecompressor is a helper function for testing generic decompressors.
|
||||
func TestDecompressor(t *testing.T, d Decompressor, cases []TestDecompressCase) {
|
||||
func TestDecompressor(t testing.T, d Decompressor, cases []TestDecompressCase) {
|
||||
for _, tc := range cases {
|
||||
t.Logf("Testing: %s", tc.Input)
|
||||
|
||||
|
@ -87,7 +88,7 @@ func TestDecompressor(t *testing.T, d Decompressor, cases []TestDecompressCase)
|
|||
}
|
||||
}
|
||||
|
||||
func testListDir(t *testing.T, path string) []string {
|
||||
func testListDir(t testing.T, path string) []string {
|
||||
var result []string
|
||||
err := filepath.Walk(path, func(sub string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
|
@ -116,7 +117,7 @@ func testListDir(t *testing.T, path string) []string {
|
|||
return result
|
||||
}
|
||||
|
||||
func testMD5(t *testing.T, path string) string {
|
||||
func testMD5(t testing.T, path string) string {
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
|
|
35
vendor/github.com/hashicorp/go-getter/get_s3.go
generated
vendored
35
vendor/github.com/hashicorp/go-getter/get_s3.go
generated
vendored
|
@ -28,7 +28,7 @@ func (g *S3Getter) ClientMode(u *url.URL) (ClientMode, error) {
|
|||
}
|
||||
|
||||
// Create client config
|
||||
config := g.getAWSConfig(region, creds)
|
||||
config := g.getAWSConfig(region, u, creds)
|
||||
sess := session.New(config)
|
||||
client := s3.New(sess)
|
||||
|
||||
|
@ -84,7 +84,7 @@ func (g *S3Getter) Get(dst string, u *url.URL) error {
|
|||
return err
|
||||
}
|
||||
|
||||
config := g.getAWSConfig(region, creds)
|
||||
config := g.getAWSConfig(region, u, creds)
|
||||
sess := session.New(config)
|
||||
client := s3.New(sess)
|
||||
|
||||
|
@ -139,7 +139,7 @@ func (g *S3Getter) GetFile(dst string, u *url.URL) error {
|
|||
return err
|
||||
}
|
||||
|
||||
config := g.getAWSConfig(region, creds)
|
||||
config := g.getAWSConfig(region, u, creds)
|
||||
sess := session.New(config)
|
||||
client := s3.New(sess)
|
||||
return g.getObject(client, dst, bucket, path, version)
|
||||
|
@ -174,7 +174,7 @@ func (g *S3Getter) getObject(client *s3.S3, dst, bucket, key, version string) er
|
|||
return err
|
||||
}
|
||||
|
||||
func (g *S3Getter) getAWSConfig(region string, creds *credentials.Credentials) *aws.Config {
|
||||
func (g *S3Getter) getAWSConfig(region string, url *url.URL, creds *credentials.Credentials) *aws.Config {
|
||||
conf := &aws.Config{}
|
||||
if creds == nil {
|
||||
// Grab the metadata URL
|
||||
|
@ -195,6 +195,14 @@ func (g *S3Getter) getAWSConfig(region string, creds *credentials.Credentials) *
|
|||
})
|
||||
}
|
||||
|
||||
if creds != nil {
|
||||
conf.Endpoint = &url.Host
|
||||
conf.S3ForcePathStyle = aws.Bool(true)
|
||||
if url.Scheme == "http" {
|
||||
conf.DisableSSL = aws.Bool(true)
|
||||
}
|
||||
}
|
||||
|
||||
conf.Credentials = creds
|
||||
if region != "" {
|
||||
conf.Region = aws.String(region)
|
||||
|
@ -204,6 +212,10 @@ func (g *S3Getter) getAWSConfig(region string, creds *credentials.Credentials) *
|
|||
}
|
||||
|
||||
func (g *S3Getter) parseUrl(u *url.URL) (region, bucket, path, version string, creds *credentials.Credentials, err error) {
|
||||
// This just check whether we are dealing with S3 or
|
||||
// any other S3 compliant service. S3 has a predictable
|
||||
// url as others do not
|
||||
if strings.Contains(u.Host, "amazonaws.com") {
|
||||
// Expected host style: s3.amazonaws.com. They always have 3 parts,
|
||||
// although the first may differ if we're accessing a specific region.
|
||||
hostParts := strings.Split(u.Host, ".")
|
||||
|
@ -228,6 +240,21 @@ func (g *S3Getter) parseUrl(u *url.URL) (region, bucket, path, version string, c
|
|||
path = pathParts[2]
|
||||
version = u.Query().Get("version")
|
||||
|
||||
} else {
|
||||
pathParts := strings.SplitN(u.Path, "/", 3)
|
||||
if len(pathParts) != 3 {
|
||||
err = fmt.Errorf("URL is not a valid S3 complaint URL")
|
||||
return
|
||||
}
|
||||
bucket = pathParts[1]
|
||||
path = pathParts[2]
|
||||
version = u.Query().Get("version")
|
||||
region = u.Query().Get("region")
|
||||
if region == "" {
|
||||
region = "us-east-1"
|
||||
}
|
||||
}
|
||||
|
||||
_, hasAwsId := u.Query()["aws_access_key_id"]
|
||||
_, hasAwsSecret := u.Query()["aws_access_key_secret"]
|
||||
_, hasAwsToken := u.Query()["aws_access_token"]
|
||||
|
|
10
vendor/vendor.json
vendored
10
vendor/vendor.json
vendored
|
@ -689,16 +689,16 @@
|
|||
"revisionTime": "2017-06-02T22:43:19Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "hHoNT6CfqcDF1+yXRKnz/duuwIk=",
|
||||
"checksumSHA1": "Sozy3aNAPBleUfiECj0jnaYjw2k=",
|
||||
"path": "github.com/hashicorp/go-getter",
|
||||
"revision": "e48f67b534e614bf7fbd978fd0020f61a17b7527",
|
||||
"revisionTime": "2017-04-05T22:15:29Z"
|
||||
"revision": "2814e6fb2ca5b3bd950c97eff22553ecb3c7f77b",
|
||||
"revisionTime": "2017-07-06T02:51:20Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "9J+kDr29yDrwsdu2ULzewmqGjpA=",
|
||||
"path": "github.com/hashicorp/go-getter/helper/url",
|
||||
"revision": "e48f67b534e614bf7fbd978fd0020f61a17b7527",
|
||||
"revisionTime": "2017-04-05T22:15:29Z"
|
||||
"revision": "2814e6fb2ca5b3bd950c97eff22553ecb3c7f77b",
|
||||
"revisionTime": "2017-07-06T02:51:20Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "zvmksNyW6g+Fd/bywd4vcn8rp+M=",
|
||||
|
|
Loading…
Reference in a new issue