govendor fetch github.com/hashicorp/go-getter@6be654f
This commit is contained in:
parent
2999d5e2b2
commit
f807d9208f
|
@ -19,7 +19,7 @@ import (
|
|||
// Using a client directly allows more fine-grained control over how downloading
|
||||
// is done, as well as customizing the protocols supported.
|
||||
type Client struct {
|
||||
// Ctx for cancellation
|
||||
// Ctx for cancellation
|
||||
Ctx context.Context
|
||||
|
||||
// Src is the source URL to get.
|
||||
|
@ -39,6 +39,10 @@ type Client struct {
|
|||
// for documentation.
|
||||
Mode ClientMode
|
||||
|
||||
// Umask is used to mask file permissions when storing local files or decompressing
|
||||
// an archive
|
||||
Umask os.FileMode
|
||||
|
||||
// Detectors is the list of detectors that are tried on the source.
|
||||
// If this is nil, then the default Detectors will be used.
|
||||
Detectors []Detector
|
||||
|
@ -66,6 +70,20 @@ type Client struct {
|
|||
Options []ClientOption
|
||||
}
|
||||
|
||||
// umask returns the effective umask for the Client, defaulting to the process umask
|
||||
func (c *Client) umask() os.FileMode {
|
||||
if c == nil {
|
||||
return 0
|
||||
}
|
||||
return c.Umask
|
||||
}
|
||||
|
||||
// mode returns file mode umasked by the Client umask
|
||||
func (c *Client) mode(mode os.FileMode) os.FileMode {
|
||||
m := mode & ^c.umask()
|
||||
return m
|
||||
}
|
||||
|
||||
// Get downloads the configured source to the destination.
|
||||
func (c *Client) Get() error {
|
||||
if err := c.Configure(c.Options...); err != nil {
|
||||
|
@ -233,7 +251,7 @@ func (c *Client) Get() error {
|
|||
if decompressor != nil {
|
||||
// We have a decompressor, so decompress the current destination
|
||||
// into the final destination with the proper mode.
|
||||
err := decompressor.Decompress(decompressDst, dst, decompressDir)
|
||||
err := decompressor.Decompress(decompressDst, dst, decompressDir, c.umask())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -281,7 +299,7 @@ func (c *Client) Get() error {
|
|||
if err := os.RemoveAll(realDst); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := os.MkdirAll(realDst, 0755); err != nil {
|
||||
if err := os.MkdirAll(realDst, c.mode(0755)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -291,7 +309,7 @@ func (c *Client) Get() error {
|
|||
return err
|
||||
}
|
||||
|
||||
return copyDir(c.Ctx, realDst, subDir, false)
|
||||
return copyDir(c.Ctx, realDst, subDir, false, c.umask())
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
@ -7,11 +7,16 @@ import (
|
|||
"strings"
|
||||
)
|
||||
|
||||
// mode returns the file mode masked by the umask
|
||||
func mode(mode, umask os.FileMode) os.FileMode {
|
||||
return mode & ^umask
|
||||
}
|
||||
|
||||
// copyDir copies the src directory contents into dst. Both directories
|
||||
// should already exist.
|
||||
//
|
||||
// If ignoreDot is set to true, then dot-prefixed files/folders are ignored.
|
||||
func copyDir(ctx context.Context, dst string, src string, ignoreDot bool) error {
|
||||
func copyDir(ctx context.Context, dst string, src string, ignoreDot bool, umask os.FileMode) error {
|
||||
src, err := filepath.EvalSymlinks(src)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -46,7 +51,7 @@ func copyDir(ctx context.Context, dst string, src string, ignoreDot bool) error
|
|||
return nil
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(dstPath, 0755); err != nil {
|
||||
if err := os.MkdirAll(dstPath, mode(0755, umask)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -54,24 +59,8 @@ func copyDir(ctx context.Context, dst string, src string, ignoreDot bool) error
|
|||
}
|
||||
|
||||
// If we have a file, copy the contents.
|
||||
srcF, err := os.Open(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer srcF.Close()
|
||||
|
||||
dstF, err := os.Create(dstPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer dstF.Close()
|
||||
|
||||
if _, err := Copy(ctx, dstF, srcF); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Chmod it
|
||||
return os.Chmod(dstPath, info.Mode())
|
||||
_, err = copyFile(ctx, dstPath, path, info.Mode(), umask)
|
||||
return err
|
||||
}
|
||||
|
||||
return filepath.Walk(src, walkFn)
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package getter
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
|
@ -14,7 +15,7 @@ type Decompressor interface {
|
|||
// Decompress should decompress src to dst. dir specifies whether dst
|
||||
// is a directory or single file. src is guaranteed to be a single file
|
||||
// that exists. dst is not guaranteed to exist already.
|
||||
Decompress(dst, src string, dir bool) error
|
||||
Decompress(dst, src string, dir bool, umask os.FileMode) error
|
||||
}
|
||||
|
||||
// Decompressors is the mapping of extension to the Decompressor implementation
|
||||
|
|
|
@ -3,7 +3,6 @@ package getter
|
|||
import (
|
||||
"compress/bzip2"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
@ -12,14 +11,14 @@ import (
|
|||
// decompress bz2 files.
|
||||
type Bzip2Decompressor struct{}
|
||||
|
||||
func (d *Bzip2Decompressor) Decompress(dst, src string, dir bool) error {
|
||||
func (d *Bzip2Decompressor) Decompress(dst, src string, dir bool, umask os.FileMode) error {
|
||||
// Directory isn't supported at all
|
||||
if dir {
|
||||
return fmt.Errorf("bzip2-compressed files can only unarchive to a single file")
|
||||
}
|
||||
|
||||
// If we're going into a directory we should make that first
|
||||
if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil {
|
||||
if err := os.MkdirAll(filepath.Dir(dst), mode(0755, umask)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -34,12 +33,5 @@ func (d *Bzip2Decompressor) Decompress(dst, src string, dir bool) error {
|
|||
bzipR := bzip2.NewReader(f)
|
||||
|
||||
// Copy it out
|
||||
dstF, err := os.Create(dst)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer dstF.Close()
|
||||
|
||||
_, err = io.Copy(dstF, bzipR)
|
||||
return err
|
||||
return copyReader(dst, bzipR, 0622, umask)
|
||||
}
|
||||
|
|
|
@ -3,7 +3,6 @@ package getter
|
|||
import (
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
@ -12,14 +11,14 @@ import (
|
|||
// decompress gzip files.
|
||||
type GzipDecompressor struct{}
|
||||
|
||||
func (d *GzipDecompressor) Decompress(dst, src string, dir bool) error {
|
||||
func (d *GzipDecompressor) Decompress(dst, src string, dir bool, umask os.FileMode) error {
|
||||
// Directory isn't supported at all
|
||||
if dir {
|
||||
return fmt.Errorf("gzip-compressed files can only unarchive to a single file")
|
||||
}
|
||||
|
||||
// If we're going into a directory we should make that first
|
||||
if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil {
|
||||
if err := os.MkdirAll(filepath.Dir(dst), mode(0755, umask)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -38,12 +37,5 @@ func (d *GzipDecompressor) Decompress(dst, src string, dir bool) error {
|
|||
defer gzipR.Close()
|
||||
|
||||
// Copy it out
|
||||
dstF, err := os.Create(dst)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer dstF.Close()
|
||||
|
||||
_, err = io.Copy(dstF, gzipR)
|
||||
return err
|
||||
return copyReader(dst, gzipR, 0622, umask)
|
||||
}
|
||||
|
|
|
@ -11,7 +11,7 @@ import (
|
|||
|
||||
// untar is a shared helper for untarring an archive. The reader should provide
|
||||
// an uncompressed view of the tar archive.
|
||||
func untar(input io.Reader, dst, src string, dir bool) error {
|
||||
func untar(input io.Reader, dst, src string, dir bool, umask os.FileMode) error {
|
||||
tarR := tar.NewReader(input)
|
||||
done := false
|
||||
dirHdrs := []*tar.Header{}
|
||||
|
@ -51,7 +51,7 @@ func untar(input io.Reader, dst, src string, dir bool) error {
|
|||
}
|
||||
|
||||
// A directory, just make the directory and continue unarchiving...
|
||||
if err := os.MkdirAll(path, 0755); err != nil {
|
||||
if err := os.MkdirAll(path, mode(0755, umask)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -67,7 +67,7 @@ func untar(input io.Reader, dst, src string, dir bool) error {
|
|||
|
||||
// Check that the directory exists, otherwise create it
|
||||
if _, err := os.Stat(dstPath); os.IsNotExist(err) {
|
||||
if err := os.MkdirAll(dstPath, 0755); err != nil {
|
||||
if err := os.MkdirAll(dstPath, mode(0755, umask)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -82,20 +82,10 @@ func untar(input io.Reader, dst, src string, dir bool) error {
|
|||
done = true
|
||||
|
||||
// Open the file for writing
|
||||
dstF, err := os.Create(path)
|
||||
err = copyReader(path, tarR, hdr.FileInfo().Mode(), umask)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = io.Copy(dstF, tarR)
|
||||
dstF.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Chmod the file
|
||||
if err := os.Chmod(path, hdr.FileInfo().Mode()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Set the access and modification time if valid, otherwise default to current time
|
||||
aTime := now
|
||||
|
@ -115,7 +105,7 @@ func untar(input io.Reader, dst, src string, dir bool) error {
|
|||
for _, dirHdr := range dirHdrs {
|
||||
path := filepath.Join(dst, dirHdr.Name)
|
||||
// Chmod the directory since they might be created before we know the mode flags
|
||||
if err := os.Chmod(path, dirHdr.FileInfo().Mode()); err != nil {
|
||||
if err := os.Chmod(path, mode(dirHdr.FileInfo().Mode(), umask)); err != nil {
|
||||
return err
|
||||
}
|
||||
// Set the mtime/atime attributes since they would have been changed during extraction
|
||||
|
@ -139,13 +129,13 @@ func untar(input io.Reader, dst, src string, dir bool) error {
|
|||
// unpack tar files.
|
||||
type tarDecompressor struct{}
|
||||
|
||||
func (d *tarDecompressor) Decompress(dst, src string, dir bool) error {
|
||||
func (d *tarDecompressor) Decompress(dst, src string, dir bool, umask os.FileMode) error {
|
||||
// If we're going into a directory we should make that first
|
||||
mkdir := dst
|
||||
if !dir {
|
||||
mkdir = filepath.Dir(dst)
|
||||
}
|
||||
if err := os.MkdirAll(mkdir, 0755); err != nil {
|
||||
if err := os.MkdirAll(mkdir, mode(0755, umask)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -156,5 +146,5 @@ func (d *tarDecompressor) Decompress(dst, src string, dir bool) error {
|
|||
}
|
||||
defer f.Close()
|
||||
|
||||
return untar(f, dst, src, dir)
|
||||
return untar(f, dst, src, dir, umask)
|
||||
}
|
||||
|
|
|
@ -10,13 +10,13 @@ import (
|
|||
// decompress tar.bz2 files.
|
||||
type TarBzip2Decompressor struct{}
|
||||
|
||||
func (d *TarBzip2Decompressor) Decompress(dst, src string, dir bool) error {
|
||||
func (d *TarBzip2Decompressor) Decompress(dst, src string, dir bool, umask os.FileMode) error {
|
||||
// If we're going into a directory we should make that first
|
||||
mkdir := dst
|
||||
if !dir {
|
||||
mkdir = filepath.Dir(dst)
|
||||
}
|
||||
if err := os.MkdirAll(mkdir, 0755); err != nil {
|
||||
if err := os.MkdirAll(mkdir, mode(0755, umask)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -29,5 +29,5 @@ func (d *TarBzip2Decompressor) Decompress(dst, src string, dir bool) error {
|
|||
|
||||
// Bzip2 compression is second
|
||||
bzipR := bzip2.NewReader(f)
|
||||
return untar(bzipR, dst, src, dir)
|
||||
return untar(bzipR, dst, src, dir, umask)
|
||||
}
|
||||
|
|
|
@ -47,7 +47,7 @@ func TestDecompressor(t testing.T, d Decompressor, cases []TestDecompressCase) {
|
|||
defer os.RemoveAll(td)
|
||||
|
||||
// Decompress
|
||||
err := d.Decompress(dst, tc.Input, tc.Dir)
|
||||
err := d.Decompress(dst, tc.Input, tc.Dir, 0022)
|
||||
if (err != nil) != tc.Err {
|
||||
t.Fatalf("err %s: %s", tc.Input, err)
|
||||
}
|
||||
|
|
|
@ -11,13 +11,13 @@ import (
|
|||
// decompress tar.gzip files.
|
||||
type TarGzipDecompressor struct{}
|
||||
|
||||
func (d *TarGzipDecompressor) Decompress(dst, src string, dir bool) error {
|
||||
func (d *TarGzipDecompressor) Decompress(dst, src string, dir bool, umask os.FileMode) error {
|
||||
// If we're going into a directory we should make that first
|
||||
mkdir := dst
|
||||
if !dir {
|
||||
mkdir = filepath.Dir(dst)
|
||||
}
|
||||
if err := os.MkdirAll(mkdir, 0755); err != nil {
|
||||
if err := os.MkdirAll(mkdir, mode(0755, umask)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -35,5 +35,5 @@ func (d *TarGzipDecompressor) Decompress(dst, src string, dir bool) error {
|
|||
}
|
||||
defer gzipR.Close()
|
||||
|
||||
return untar(gzipR, dst, src, dir)
|
||||
return untar(gzipR, dst, src, dir, umask)
|
||||
}
|
||||
|
|
|
@ -12,13 +12,13 @@ import (
|
|||
// decompress tar.xz files.
|
||||
type TarXzDecompressor struct{}
|
||||
|
||||
func (d *TarXzDecompressor) Decompress(dst, src string, dir bool) error {
|
||||
func (d *TarXzDecompressor) Decompress(dst, src string, dir bool, umask os.FileMode) error {
|
||||
// If we're going into a directory we should make that first
|
||||
mkdir := dst
|
||||
if !dir {
|
||||
mkdir = filepath.Dir(dst)
|
||||
}
|
||||
if err := os.MkdirAll(mkdir, 0755); err != nil {
|
||||
if err := os.MkdirAll(mkdir, mode(0755, umask)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -35,5 +35,5 @@ func (d *TarXzDecompressor) Decompress(dst, src string, dir bool) error {
|
|||
return fmt.Errorf("Error opening an xz reader for %s: %s", src, err)
|
||||
}
|
||||
|
||||
return untar(txzR, dst, src, dir)
|
||||
return untar(txzR, dst, src, dir, umask)
|
||||
}
|
||||
|
|
|
@ -2,7 +2,6 @@ package getter
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
|
@ -13,14 +12,14 @@ import (
|
|||
// decompress xz files.
|
||||
type XzDecompressor struct{}
|
||||
|
||||
func (d *XzDecompressor) Decompress(dst, src string, dir bool) error {
|
||||
func (d *XzDecompressor) Decompress(dst, src string, dir bool, umask os.FileMode) error {
|
||||
// Directory isn't supported at all
|
||||
if dir {
|
||||
return fmt.Errorf("xz-compressed files can only unarchive to a single file")
|
||||
}
|
||||
|
||||
// If we're going into a directory we should make that first
|
||||
if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil {
|
||||
if err := os.MkdirAll(filepath.Dir(dst), mode(0755, umask)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -38,12 +37,5 @@ func (d *XzDecompressor) Decompress(dst, src string, dir bool) error {
|
|||
}
|
||||
|
||||
// Copy it out
|
||||
dstF, err := os.Create(dst)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer dstF.Close()
|
||||
|
||||
_, err = io.Copy(dstF, xzR)
|
||||
return err
|
||||
return copyReader(dst, xzR, 0622, umask)
|
||||
}
|
||||
|
|
|
@ -3,7 +3,6 @@ package getter
|
|||
import (
|
||||
"archive/zip"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
@ -12,13 +11,13 @@ import (
|
|||
// decompress tar.gzip files.
|
||||
type ZipDecompressor struct{}
|
||||
|
||||
func (d *ZipDecompressor) Decompress(dst, src string, dir bool) error {
|
||||
func (d *ZipDecompressor) Decompress(dst, src string, dir bool, umask os.FileMode) error {
|
||||
// If we're going into a directory we should make that first
|
||||
mkdir := dst
|
||||
if !dir {
|
||||
mkdir = filepath.Dir(dst)
|
||||
}
|
||||
if err := os.MkdirAll(mkdir, 0755); err != nil {
|
||||
if err := os.MkdirAll(mkdir, mode(0755, umask)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -56,7 +55,7 @@ func (d *ZipDecompressor) Decompress(dst, src string, dir bool) error {
|
|||
}
|
||||
|
||||
// A directory, just make the directory and continue unarchiving...
|
||||
if err := os.MkdirAll(path, 0755); err != nil {
|
||||
if err := os.MkdirAll(path, mode(0755, umask)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -67,32 +66,21 @@ func (d *ZipDecompressor) Decompress(dst, src string, dir bool) error {
|
|||
// required to contain entries for just the directories so this
|
||||
// can happen.
|
||||
if dir {
|
||||
if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
|
||||
if err := os.MkdirAll(filepath.Dir(path), mode(0755, umask)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Open the file for reading
|
||||
srcF, err := f.Open()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Open the file for writing
|
||||
dstF, err := os.Create(path)
|
||||
if err != nil {
|
||||
srcF.Close()
|
||||
return err
|
||||
}
|
||||
_, err = io.Copy(dstF, srcF)
|
||||
srcF.Close()
|
||||
dstF.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Chmod the file
|
||||
if err := os.Chmod(path, f.Mode()); err != nil {
|
||||
err = copyReader(path, srcF, f.Mode(), umask)
|
||||
srcF.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
|
|
@ -35,7 +35,7 @@ func (d *BitBucketDetector) detectHTTP(src string) (string, bool, error) {
|
|||
var info struct {
|
||||
SCM string `json:"scm"`
|
||||
}
|
||||
infoUrl := "https://api.bitbucket.org/1.0/repositories" + u.Path
|
||||
infoUrl := "https://api.bitbucket.org/2.0/repositories" + u.Path
|
||||
resp, err := http.Get(infoUrl)
|
||||
if err != nil {
|
||||
return "", true, fmt.Errorf("error looking up BitBucket URL: %s", err)
|
||||
|
|
|
@ -3,6 +3,7 @@ package getter
|
|||
import (
|
||||
"context"
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
// readerFunc is syntactic sugar for read interface.
|
||||
|
@ -27,3 +28,48 @@ func Copy(ctx context.Context, dst io.Writer, src io.Reader) (int64, error) {
|
|||
}
|
||||
}))
|
||||
}
|
||||
|
||||
// copyReader copies from an io.Reader into a file, using umask to create the dst file
|
||||
func copyReader(dst string, src io.Reader, fmode, umask os.FileMode) error {
|
||||
dstF, err := os.OpenFile(dst, os.O_RDWR|os.O_CREATE|os.O_TRUNC, fmode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer dstF.Close()
|
||||
|
||||
_, err = io.Copy(dstF, src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Explicitly chmod; the process umask is unconditionally applied otherwise.
|
||||
// We'll mask the mode with our own umask, but that may be different than
|
||||
// the process umask
|
||||
return os.Chmod(dst, mode(fmode, umask))
|
||||
}
|
||||
|
||||
// copyFile copies a file in chunks from src path to dst path, using umask to create the dst file
|
||||
func copyFile(ctx context.Context, dst, src string, fmode, umask os.FileMode) (int64, error) {
|
||||
srcF, err := os.Open(src)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer srcF.Close()
|
||||
|
||||
dstF, err := os.OpenFile(dst, os.O_RDWR|os.O_CREATE|os.O_TRUNC, fmode)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer dstF.Close()
|
||||
|
||||
count, err := Copy(ctx, dstF, srcF)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Explicitly chmod; the process umask is unconditionally applied otherwise.
|
||||
// We'll mask the mode with our own umask, but that may be different than
|
||||
// the process umask
|
||||
err = os.Chmod(dst, mode(fmode, umask))
|
||||
return count, err
|
||||
}
|
||||
|
|
|
@ -41,7 +41,7 @@ func (g *FileGetter) Get(dst string, u *url.URL) error {
|
|||
}
|
||||
|
||||
// Create all the parent directories
|
||||
if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil {
|
||||
if err := os.MkdirAll(filepath.Dir(dst), g.client.mode(0755)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -56,13 +56,15 @@ func (g *FileGetter) GetFile(dst string, u *url.URL) error {
|
|||
}
|
||||
|
||||
// The source path must exist and be a file to be usable.
|
||||
if fi, err := os.Stat(path); err != nil {
|
||||
var fi os.FileInfo
|
||||
var err error
|
||||
if fi, err = os.Stat(path); err != nil {
|
||||
return fmt.Errorf("source path error: %s", err)
|
||||
} else if fi.IsDir() {
|
||||
return fmt.Errorf("source path must be a file")
|
||||
}
|
||||
|
||||
_, err := os.Lstat(dst)
|
||||
_, err = os.Lstat(dst)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
|
@ -76,7 +78,7 @@ func (g *FileGetter) GetFile(dst string, u *url.URL) error {
|
|||
}
|
||||
|
||||
// Create all the parent directories
|
||||
if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil {
|
||||
if err = os.MkdirAll(filepath.Dir(dst), g.client.mode(0755)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -86,18 +88,6 @@ func (g *FileGetter) GetFile(dst string, u *url.URL) error {
|
|||
}
|
||||
|
||||
// Copy
|
||||
srcF, err := os.Open(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer srcF.Close()
|
||||
|
||||
dstF, err := os.Create(dst)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer dstF.Close()
|
||||
|
||||
_, err = Copy(ctx, dstF, srcF)
|
||||
_, err = copyFile(ctx, dst, path, fi.Mode(), g.client.umask())
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -45,7 +45,7 @@ func (g *FileGetter) Get(dst string, u *url.URL) error {
|
|||
}
|
||||
|
||||
// Create all the parent directories
|
||||
if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil {
|
||||
if err := os.MkdirAll(filepath.Dir(dst), g.client.mode(0755)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -88,7 +88,7 @@ func (g *FileGetter) GetFile(dst string, u *url.URL) error {
|
|||
}
|
||||
|
||||
// Create all the parent directories
|
||||
if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil {
|
||||
if err := os.MkdirAll(filepath.Dir(dst), g.client.mode(0755)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
@ -138,11 +138,11 @@ func (g *HttpGetter) GetFile(dst string, src *url.URL) error {
|
|||
}
|
||||
|
||||
// Create all the parent directories if needed
|
||||
if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil {
|
||||
if err := os.MkdirAll(filepath.Dir(dst), g.client.mode(0755)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f, err := os.OpenFile(dst, os.O_RDWR|os.O_CREATE, os.FileMode(0666))
|
||||
f, err := os.OpenFile(dst, os.O_RDWR|os.O_CREATE, g.client.mode(0666))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -253,11 +253,11 @@ func (g *HttpGetter) getSubdir(ctx context.Context, dst, source, subDir string)
|
|||
}
|
||||
|
||||
// Make the final destination
|
||||
if err := os.MkdirAll(dst, 0755); err != nil {
|
||||
if err := os.MkdirAll(dst, g.client.mode(0755)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return copyDir(ctx, dst, sourcePath, false)
|
||||
return copyDir(ctx, dst, sourcePath, false, g.client.umask())
|
||||
}
|
||||
|
||||
// parseMeta looks for the first meta tag in the given reader that
|
||||
|
|
|
@ -84,7 +84,7 @@ func (g *S3Getter) Get(dst string, u *url.URL) error {
|
|||
}
|
||||
|
||||
// Create all the parent directories
|
||||
if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil {
|
||||
if err := os.MkdirAll(filepath.Dir(dst), g.client.mode(0755)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -165,7 +165,7 @@ func (g *S3Getter) getObject(ctx context.Context, client *s3.S3, dst, bucket, ke
|
|||
}
|
||||
|
||||
// Create all the parent directories
|
||||
if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil {
|
||||
if err := os.MkdirAll(filepath.Dir(dst), g.client.mode(0755)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
@ -196,7 +196,7 @@
|
|||
{"path":"github.com/hashicorp/go-discover/provider/softlayer","checksumSHA1":"SIyZ44AHIUTBfI336ACpCeybsLg=","revision":"40ccfdee6c0d7136f98f2b54882b86aaf0250d2f","revisionTime":"2018-05-03T15:30:45Z","tree":true},
|
||||
{"path":"github.com/hashicorp/go-discover/provider/triton","checksumSHA1":"n2iQu2IbTPw2XpWF2CqBrFSMjwI=","revision":"40ccfdee6c0d7136f98f2b54882b86aaf0250d2f","revisionTime":"2018-05-03T15:30:45Z","tree":true},
|
||||
{"path":"github.com/hashicorp/go-envparse","checksumSHA1":"FKmqR4DC3nCXtnT9pe02z5CLNWo=","revision":"310ca1881b22af3522e3a8638c0b426629886196","revisionTime":"2018-01-19T21:58:41Z"},
|
||||
{"path":"github.com/hashicorp/go-getter","checksumSHA1":"DPz/YgCQgHt4RkkJhgOHtj1w2JM=","revision":"9363991638334fdee4f649819138250a3b6402c8","revisionTime":"2019-02-18T16:50:04Z","version":"v1.1.0","versionExact":"v1.1.0"},
|
||||
{"path":"github.com/hashicorp/go-getter","checksumSHA1":"rWTrxQbWjRp/T+hh309tN4KCQdg=","revision":"6be654f023a4019aa64ec8785503de635f51bf1f","revisionTime":"2019-08-15T20:19:34Z"},
|
||||
{"path":"github.com/hashicorp/go-getter/helper/url","checksumSHA1":"9J+kDr29yDrwsdu2ULzewmqGjpA=","revision":"b345bfcec894fb7ff3fdf9b21baf2f56ea423d98","revisionTime":"2018-04-10T17:49:45Z"},
|
||||
{"path":"github.com/hashicorp/go-hclog","checksumSHA1":"dOP7kCX3dACHc9mU79826N411QA=","revision":"ff2cf002a8dd750586d91dddd4470c341f981fe1","revisionTime":"2018-07-09T16:53:50Z"},
|
||||
{"path":"github.com/hashicorp/go-immutable-radix","checksumSHA1":"Cas2nprG6pWzf05A2F/OlnjUu2Y=","revision":"8aac2701530899b64bdea735a1de8da899815220","revisionTime":"2017-07-25T22:12:15Z"},
|
||||
|
|
Loading…
Reference in New Issue