Alibaba Object Storage support (#4783)

This commit is contained in:
Nándor István Krácser 2018-08-13 23:03:24 +02:00 committed by Jeff Mitchell
parent 63e7ac034f
commit b9fab6375b
24 changed files with 6501 additions and 0 deletions

View File

@ -51,6 +51,7 @@ import (
credToken "github.com/hashicorp/vault/builtin/credential/token"
credUserpass "github.com/hashicorp/vault/builtin/credential/userpass"
physAliCloudOSS "github.com/hashicorp/vault/physical/alicloudoss"
physAzure "github.com/hashicorp/vault/physical/azure"
physCassandra "github.com/hashicorp/vault/physical/cassandra"
physCockroachDB "github.com/hashicorp/vault/physical/cockroachdb"
@ -137,6 +138,7 @@ var (
}
physicalBackends = map[string]physical.Factory{
"alicloudoss": physAliCloudOSS.NewAliCloudOSSBackend,
"azure": physAzure.NewAzureBackend,
"cassandra": physCassandra.NewCassandraBackend,
"cockroachdb": physCockroachDB.NewCockroachDBBackend,

View File

@ -0,0 +1,223 @@
package alicloudoss
import (
"bytes"
"context"
"fmt"
"io"
"os"
"sort"
"strconv"
"strings"
"time"
"github.com/aliyun/aliyun-oss-go-sdk/oss"
log "github.com/hashicorp/go-hclog"
"github.com/armon/go-metrics"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/vault/physical"
)
// Verify AliCloudOSSBackend satisfies the correct interfaces
var _ physical.Backend = (*AliCloudOSSBackend)(nil)
// AliCloudOSSBackend is a physical backend that stores data
// within an Alibaba OSS bucket.
type AliCloudOSSBackend struct {
bucket string
client *oss.Client
logger log.Logger
permitPool *physical.PermitPool
}
// NewAliCloudOSSBackend constructs an OSS backend using a pre-existing
// bucket. Credentials can be provided to the backend, sourced
// from the environment.
func NewAliCloudOSSBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) {
endpoint := os.Getenv("ALICLOUD_OSS_ENDPOINT")
if endpoint == "" {
endpoint = conf["endpoint"]
if endpoint == "" {
return nil, fmt.Errorf("'endpoint' must be set")
}
}
bucket := os.Getenv("ALICLOUD_OSS_BUCKET")
if bucket == "" {
bucket = conf["bucket"]
if bucket == "" {
return nil, fmt.Errorf("'bucket' must be set")
}
}
accessKeyID := os.Getenv("ALICLOUD_ACCESS_KEY")
if accessKeyID == "" {
accessKeyID = conf["access_key"]
if accessKeyID == "" {
return nil, fmt.Errorf("'access_key' must be set")
}
}
accessKeySecret := os.Getenv("ALICLOUD_SECRET_KEY")
if accessKeySecret == "" {
accessKeySecret = conf["secret_key"]
if accessKeySecret == "" {
return nil, fmt.Errorf("'secret_key' must be set")
}
}
options := func(c *oss.Client) {
c.Config.Timeout = 30
}
client, err := oss.New(endpoint, accessKeyID, accessKeySecret, options)
if err != nil {
return nil, err
}
bucketObj, err := client.Bucket(bucket)
if err != nil {
return nil, err
}
_, err = bucketObj.ListObjects()
if err != nil {
return nil, errwrap.Wrapf(fmt.Sprintf("unable to access bucket %q at endpoint %q: {{err}}", bucket, endpoint), err)
}
maxParStr, ok := conf["max_parallel"]
var maxParInt int
if ok {
maxParInt, err = strconv.Atoi(maxParStr)
if err != nil {
return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err)
}
if logger.IsDebug() {
logger.Debug("max_parallel set", "max_parallel", maxParInt)
}
}
a := &AliCloudOSSBackend{
client: client,
bucket: bucket,
logger: logger,
permitPool: physical.NewPermitPool(maxParInt),
}
return a, nil
}
// Put is used to insert or update an entry
func (a *AliCloudOSSBackend) Put(ctx context.Context, entry *physical.Entry) error {
defer metrics.MeasureSince([]string{"alibaba", "put"}, time.Now())
a.permitPool.Acquire()
defer a.permitPool.Release()
bucket, err := a.client.Bucket(a.bucket)
if err != nil {
return err
}
return bucket.PutObject(entry.Key, bytes.NewReader(entry.Value))
}
// Get is used to fetch an entry
func (a *AliCloudOSSBackend) Get(ctx context.Context, key string) (*physical.Entry, error) {
defer metrics.MeasureSince([]string{"alibaba", "get"}, time.Now())
a.permitPool.Acquire()
defer a.permitPool.Release()
bucket, err := a.client.Bucket(a.bucket)
if err != nil {
return nil, err
}
object, err := bucket.GetObject(key)
if err != nil {
switch err := err.(type) {
case oss.ServiceError:
if err.StatusCode == 404 && err.Code == "NoSuchKey" {
return nil, nil
}
}
return nil, err
}
data := bytes.NewBuffer(nil)
_, err = io.Copy(data, object)
if err != nil {
return nil, err
}
ent := &physical.Entry{
Key: key,
Value: data.Bytes(),
}
return ent, nil
}
// Delete is used to permanently delete an entry
func (a *AliCloudOSSBackend) Delete(ctx context.Context, key string) error {
defer metrics.MeasureSince([]string{"alibaba", "delete"}, time.Now())
a.permitPool.Acquire()
defer a.permitPool.Release()
bucket, err := a.client.Bucket(a.bucket)
if err != nil {
return err
}
return bucket.DeleteObject(key)
}
// List is used to list all the keys under a given
// prefix, up to the next prefix.
func (a *AliCloudOSSBackend) List(ctx context.Context, prefix string) ([]string, error) {
defer metrics.MeasureSince([]string{"alibaba", "list"}, time.Now())
a.permitPool.Acquire()
defer a.permitPool.Release()
keys := []string{}
bucket, err := a.client.Bucket(a.bucket)
if err != nil {
return nil, err
}
marker := oss.Marker("")
for {
result, err := bucket.ListObjects(oss.Prefix(prefix), oss.Delimiter("/"), marker)
if err != nil {
return nil, err
}
for _, commonPrefix := range result.CommonPrefixes {
commonPrefix := strings.TrimPrefix(commonPrefix, prefix)
keys = append(keys, commonPrefix)
}
for _, object := range result.Objects {
// Add objects only from the current 'folder'
key := strings.TrimPrefix(object.Key, prefix)
keys = append(keys, key)
}
if !result.IsTruncated {
break
}
marker = oss.Marker(result.NextMarker)
}
sort.Strings(keys)
return keys, nil
}

View File

@ -0,0 +1,71 @@
package alicloudoss
import (
"fmt"
"math/rand"
"os"
"testing"
"time"
"github.com/aliyun/aliyun-oss-go-sdk/oss"
log "github.com/hashicorp/go-hclog"
"github.com/hashicorp/vault/helper/logging"
"github.com/hashicorp/vault/physical"
)
func TestAliCloudOSSBackend(t *testing.T) {
endpoint := os.Getenv("ALICLOUD_OSS_ENDPOINT")
accessKeyID := os.Getenv("ALICLOUD_ACCESS_KEY")
accessKeySecret := os.Getenv("ALICLOUD_SECRET_KEY")
if endpoint == "" || accessKeyID == "" || accessKeySecret == "" {
t.SkipNow()
}
conn, err := oss.New(endpoint, accessKeyID, accessKeySecret)
if err != nil {
t.Fatalf("unable to create test client: %s", err)
}
var randInt = rand.New(rand.NewSource(time.Now().UnixNano())).Int()
bucket := fmt.Sprintf("vault-alibaba-testacc-%d", randInt)
err = conn.CreateBucket(bucket)
if err != nil {
t.Fatalf("unable to create test bucket: %s", err)
}
defer func() {
// Gotta list all the objects and delete them
// before being able to delete the bucket
b, err := conn.Bucket(bucket)
listResp, err := b.ListObjects()
objects := []string{}
for _, object := range listResp.Objects {
objects = append(objects, object.Key)
}
b.DeleteObjects(objects)
err = conn.DeleteBucket(bucket)
if err != nil {
t.Fatalf("err: %s", err)
}
}()
logger := logging.NewVaultLogger(log.Debug)
// This uses the same logic to find the Alibaba credentials as we did at the beginning of the test
b, err := NewAliCloudOSSBackend(map[string]string{
"bucket": bucket,
}, logger)
if err != nil {
t.Fatalf("err: %s", err)
}
physical.ExerciseBackend(t, b)
physical.ExerciseBackend_ListPrefix(t, b)
}

97
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/auth.go generated vendored Normal file
View File

@ -0,0 +1,97 @@
package oss
import (
"bytes"
"crypto/hmac"
"crypto/sha1"
"encoding/base64"
"hash"
"io"
"net/http"
"sort"
"strings"
)
// headerSorter defines the key-value structure for storing the sorted data in signHeader.
type headerSorter struct {
Keys []string
Vals []string
}
// signHeader signs the header and sets it as the authorization header.
func (conn Conn) signHeader(req *http.Request, canonicalizedResource string) {
// Get the final authorization string
authorizationStr := "OSS " + conn.config.AccessKeyID + ":" + conn.getSignedStr(req, canonicalizedResource)
// Give the parameter "Authorization" value
req.Header.Set(HTTPHeaderAuthorization, authorizationStr)
}
func (conn Conn) getSignedStr(req *http.Request, canonicalizedResource string) string {
// Find out the "x-oss-"'s address in header of the request
temp := make(map[string]string)
for k, v := range req.Header {
if strings.HasPrefix(strings.ToLower(k), "x-oss-") {
temp[strings.ToLower(k)] = v[0]
}
}
hs := newHeaderSorter(temp)
// Sort the temp by the ascending order
hs.Sort()
// Get the canonicalizedOSSHeaders
canonicalizedOSSHeaders := ""
for i := range hs.Keys {
canonicalizedOSSHeaders += hs.Keys[i] + ":" + hs.Vals[i] + "\n"
}
// Give other parameters values
// when sign URL, date is expires
date := req.Header.Get(HTTPHeaderDate)
contentType := req.Header.Get(HTTPHeaderContentType)
contentMd5 := req.Header.Get(HTTPHeaderContentMD5)
signStr := req.Method + "\n" + contentMd5 + "\n" + contentType + "\n" + date + "\n" + canonicalizedOSSHeaders + canonicalizedResource
h := hmac.New(func() hash.Hash { return sha1.New() }, []byte(conn.config.AccessKeySecret))
io.WriteString(h, signStr)
signedStr := base64.StdEncoding.EncodeToString(h.Sum(nil))
return signedStr
}
// newHeaderSorter is an additional function for function SignHeader.
func newHeaderSorter(m map[string]string) *headerSorter {
hs := &headerSorter{
Keys: make([]string, 0, len(m)),
Vals: make([]string, 0, len(m)),
}
for k, v := range m {
hs.Keys = append(hs.Keys, k)
hs.Vals = append(hs.Vals, v)
}
return hs
}
// Sort is an additional function for function SignHeader.
func (hs *headerSorter) Sort() {
sort.Sort(hs)
}
// Len is an additional function for function SignHeader.
func (hs *headerSorter) Len() int {
return len(hs.Vals)
}
// Less is an additional function for function SignHeader.
func (hs *headerSorter) Less(i, j int) bool {
return bytes.Compare([]byte(hs.Keys[i]), []byte(hs.Keys[j])) < 0
}
// Swap is an additional function for function SignHeader.
func (hs *headerSorter) Swap(i, j int) {
hs.Vals[i], hs.Vals[j] = hs.Vals[j], hs.Vals[i]
hs.Keys[i], hs.Keys[j] = hs.Keys[j], hs.Keys[i]
}

View File

@ -0,0 +1,933 @@
package oss
import (
"bytes"
"crypto/md5"
"encoding/base64"
"encoding/xml"
"fmt"
"hash"
"hash/crc64"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
"strconv"
"time"
)
// Bucket implements the operations of object.
type Bucket struct {
Client Client
BucketName string
}
// PutObject creates a new object and it will overwrite the original one if it exists already.
//
// objectKey the object key in UTF-8 encoding. The length must be between 1 and 1023, and cannot start with "/" or "\".
// reader io.Reader instance for reading the data for uploading
// options the options for uploading the object. The valid options here are CacheControl, ContentDisposition, ContentEncoding
// Expires, ServerSideEncryption, ObjectACL and Meta. Refer to the link below for more details.
// https://help.aliyun.com/document_detail/oss/api-reference/object/PutObject.html
//
// error it's nil if no error, otherwise it's an error object.
//
func (bucket Bucket) PutObject(objectKey string, reader io.Reader, options ...Option) error {
opts := addContentType(options, objectKey)
request := &PutObjectRequest{
ObjectKey: objectKey,
Reader: reader,
}
resp, err := bucket.DoPutObject(request, opts)
if err != nil {
return err
}
defer resp.Body.Close()
return err
}
// PutObjectFromFile creates a new object from the local file.
//
// objectKey object key.
// filePath the local file path to upload.
// options the options for uploading the object. Refer to the parameter options in PutObject for more details.
//
// error it's nil if no error, otherwise it's an error object.
//
func (bucket Bucket) PutObjectFromFile(objectKey, filePath string, options ...Option) error {
fd, err := os.Open(filePath)
if err != nil {
return err
}
defer fd.Close()
opts := addContentType(options, filePath, objectKey)
request := &PutObjectRequest{
ObjectKey: objectKey,
Reader: fd,
}
resp, err := bucket.DoPutObject(request, opts)
if err != nil {
return err
}
defer resp.Body.Close()
return err
}
// DoPutObject does the actual upload work.
//
// request the request instance for uploading an object.
// options the options for uploading an object.
//
// Response the response from OSS.
// error it's nil if no error, otherwise it's an error object.
//
func (bucket Bucket) DoPutObject(request *PutObjectRequest, options []Option) (*Response, error) {
isOptSet, _, _ := isOptionSet(options, HTTPHeaderContentType)
if !isOptSet {
options = addContentType(options, request.ObjectKey)
}
listener := getProgressListener(options)
params := map[string]interface{}{}
resp, err := bucket.do("PUT", request.ObjectKey, params, options, request.Reader, listener)
if err != nil {
return nil, err
}
if bucket.getConfig().IsEnableCRC {
err = checkCRC(resp, "DoPutObject")
if err != nil {
return resp, err
}
}
err = checkRespCode(resp.StatusCode, []int{http.StatusOK})
return resp, err
}
// GetObject downloads the object.
//
// objectKey the object key.
// options the options for downloading the object. The valid values are: Range, IfModifiedSince, IfUnmodifiedSince, IfMatch,
// IfNoneMatch, AcceptEncoding. For more details, please check out:
// https://help.aliyun.com/document_detail/oss/api-reference/object/GetObject.html
//
// io.ReadCloser reader instance for reading data from response. It must be called close() after the usage and only valid when error is nil.
// error it's nil if no error, otherwise it's an error object.
//
func (bucket Bucket) GetObject(objectKey string, options ...Option) (io.ReadCloser, error) {
result, err := bucket.DoGetObject(&GetObjectRequest{objectKey}, options)
if err != nil {
return nil, err
}
return result.Response.Body, nil
}
// GetObjectToFile downloads the data to a local file.
//
// objectKey the object key to download.
// filePath the local file to store the object data.
// options the options for downloading the object. Refer to the parameter options in method GetObject for more details.
//
// error it's nil if no error, otherwise it's an error object.
//
func (bucket Bucket) GetObjectToFile(objectKey, filePath string, options ...Option) error {
tempFilePath := filePath + TempFileSuffix
// Calls the API to actually download the object. Returns the result instance.
result, err := bucket.DoGetObject(&GetObjectRequest{objectKey}, options)
if err != nil {
return err
}
defer result.Response.Body.Close()
// If the local file does not exist, create a new one. If it exists, overwrite it.
fd, err := os.OpenFile(tempFilePath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, FilePermMode)
if err != nil {
return err
}
// Copy the data to the local file path.
_, err = io.Copy(fd, result.Response.Body)
fd.Close()
if err != nil {
return err
}
// Compares the CRC value
hasRange, _, _ := isOptionSet(options, HTTPHeaderRange)
if bucket.getConfig().IsEnableCRC && !hasRange {
result.Response.ClientCRC = result.ClientCRC.Sum64()
err = checkCRC(result.Response, "GetObjectToFile")
if err != nil {
os.Remove(tempFilePath)
return err
}
}
return os.Rename(tempFilePath, filePath)
}
// DoGetObject is the actual API that gets the object. It's the internal function called by other public APIs.
//
// request the request to download the object.
// options the options for downloading the file. Checks out the parameter options in method GetObject.
//
// GetObjectResult the result instance of getting the object.
// error it's nil if no error, otherwise it's an error object.
//
func (bucket Bucket) DoGetObject(request *GetObjectRequest, options []Option) (*GetObjectResult, error) {
params := map[string]interface{}{}
resp, err := bucket.do("GET", request.ObjectKey, params, options, nil, nil)
if err != nil {
return nil, err
}
result := &GetObjectResult{
Response: resp,
}
// CRC
var crcCalc hash.Hash64
hasRange, _, _ := isOptionSet(options, HTTPHeaderRange)
if bucket.getConfig().IsEnableCRC && !hasRange {
crcCalc = crc64.New(crcTable())
result.ServerCRC = resp.ServerCRC
result.ClientCRC = crcCalc
}
// Progress
listener := getProgressListener(options)
contentLen, _ := strconv.ParseInt(resp.Headers.Get(HTTPHeaderContentLength), 10, 64)
resp.Body = ioutil.NopCloser(TeeReader(resp.Body, crcCalc, contentLen, listener, nil))
return result, nil
}
// CopyObject copies the object inside the bucket.
//
// srcObjectKey the source object to copy.
// destObjectKey the target object to copy.
// options options for copying an object. You can specify the conditions of copy. The valid conditions are CopySourceIfMatch,
// CopySourceIfNoneMatch, CopySourceIfModifiedSince, CopySourceIfUnmodifiedSince, MetadataDirective.
// Also you can specify the target object's attributes, such as CacheControl, ContentDisposition, ContentEncoding, Expires,
// ServerSideEncryption, ObjectACL, Meta. Refer to the link below for more details :
// https://help.aliyun.com/document_detail/oss/api-reference/object/CopyObject.html
//
// error it's nil if no error, otherwise it's an error object.
//
func (bucket Bucket) CopyObject(srcObjectKey, destObjectKey string, options ...Option) (CopyObjectResult, error) {
var out CopyObjectResult
options = append(options, CopySource(bucket.BucketName, url.QueryEscape(srcObjectKey)))
params := map[string]interface{}{}
resp, err := bucket.do("PUT", destObjectKey, params, options, nil, nil)
if err != nil {
return out, err
}
defer resp.Body.Close()
err = xmlUnmarshal(resp.Body, &out)
return out, err
}
// CopyObjectTo copies the object to another bucket.
//
// srcObjectKey source object key. The source bucket is Bucket.BucketName .
// destBucketName target bucket name.
// destObjectKey target object name.
// options copy options, check out parameter options in function CopyObject for more details.
//
// error it's nil if no error, otherwise it's an error object.
//
func (bucket Bucket) CopyObjectTo(destBucketName, destObjectKey, srcObjectKey string, options ...Option) (CopyObjectResult, error) {
return bucket.copy(srcObjectKey, destBucketName, destObjectKey, options...)
}
//
// CopyObjectFrom copies the object to another bucket.
//
// srcBucketName source bucket name.
// srcObjectKey source object name.
// destObjectKey target object name. The target bucket name is Bucket.BucketName.
// options copy options. Check out parameter options in function CopyObject.
//
// error it's nil if no error, otherwise it's an error object.
//
func (bucket Bucket) CopyObjectFrom(srcBucketName, srcObjectKey, destObjectKey string, options ...Option) (CopyObjectResult, error) {
destBucketName := bucket.BucketName
var out CopyObjectResult
srcBucket, err := bucket.Client.Bucket(srcBucketName)
if err != nil {
return out, err
}
return srcBucket.copy(srcObjectKey, destBucketName, destObjectKey, options...)
}
func (bucket Bucket) copy(srcObjectKey, destBucketName, destObjectKey string, options ...Option) (CopyObjectResult, error) {
var out CopyObjectResult
options = append(options, CopySource(bucket.BucketName, url.QueryEscape(srcObjectKey)))
headers := make(map[string]string)
err := handleOptions(headers, options)
if err != nil {
return out, err
}
params := map[string]interface{}{}
resp, err := bucket.Client.Conn.Do("PUT", destBucketName, destObjectKey, params, headers, nil, 0, nil)
if err != nil {
return out, err
}
defer resp.Body.Close()
err = xmlUnmarshal(resp.Body, &out)
return out, err
}
// AppendObject uploads the data in the way of appending an existing or new object.
//
// AppendObject the parameter appendPosition specifies which postion (in the target object) to append. For the first append (to a non-existing file),
// the appendPosition should be 0. The appendPosition in the subsequent calls will be the current object length.
// For example, the first appendObject's appendPosition is 0 and it uploaded 65536 bytes data, then the second call's position is 65536.
// The response header x-oss-next-append-position after each successful request also specifies the next call's append position (so the caller need not to maintain this information).
//
// objectKey the target object to append to.
// reader io.Reader. The read instance for reading the data to append.
// appendPosition the start position to append.
// destObjectProperties the options for the first appending, such as CacheControl, ContentDisposition, ContentEncoding,
// Expires, ServerSideEncryption, ObjectACL.
//
// int64 the next append position, it's valid when error is nil.
// error it's nil if no error, otherwise it's an error object.
//
func (bucket Bucket) AppendObject(objectKey string, reader io.Reader, appendPosition int64, options ...Option) (int64, error) {
request := &AppendObjectRequest{
ObjectKey: objectKey,
Reader: reader,
Position: appendPosition,
}
result, err := bucket.DoAppendObject(request, options)
if err != nil {
return appendPosition, err
}
return result.NextPosition, err
}
// DoAppendObject is the actual API that does the object append.
//
// request the request object for appending object.
// options the options for appending object.
//
// AppendObjectResult the result object for appending object.
// error it's nil if no error, otherwise it's an error object.
//
func (bucket Bucket) DoAppendObject(request *AppendObjectRequest, options []Option) (*AppendObjectResult, error) {
params := map[string]interface{}{}
params["append"] = nil
params["position"] = strconv.FormatInt(request.Position, 10)
headers := make(map[string]string)
opts := addContentType(options, request.ObjectKey)
handleOptions(headers, opts)
var initCRC uint64
isCRCSet, initCRCOpt, _ := isOptionSet(options, initCRC64)
if isCRCSet {
initCRC = initCRCOpt.(uint64)
}
listener := getProgressListener(options)
handleOptions(headers, opts)
resp, err := bucket.Client.Conn.Do("POST", bucket.BucketName, request.ObjectKey, params, headers,
request.Reader, initCRC, listener)
if err != nil {
return nil, err
}
defer resp.Body.Close()
nextPosition, _ := strconv.ParseInt(resp.Headers.Get(HTTPHeaderOssNextAppendPosition), 10, 64)
result := &AppendObjectResult{
NextPosition: nextPosition,
CRC: resp.ServerCRC,
}
if bucket.getConfig().IsEnableCRC && isCRCSet {
err = checkCRC(resp, "AppendObject")
if err != nil {
return result, err
}
}
return result, nil
}
// DeleteObject deletes the object.
//
// objectKey the object key to delete.
//
// error it's nil if no error, otherwise it's an error object.
//
func (bucket Bucket) DeleteObject(objectKey string) error {
params := map[string]interface{}{}
resp, err := bucket.do("DELETE", objectKey, params, nil, nil, nil)
if err != nil {
return err
}
defer resp.Body.Close()
return checkRespCode(resp.StatusCode, []int{http.StatusNoContent})
}
// DeleteObjects deletes multiple objects.
//
// objectKeys the object keys to delete.
// options the options for deleting objects.
// Supported option is DeleteObjectsQuiet which means it will not return error even deletion failed (not recommended). By default it's not used.
//
// DeleteObjectsResult the result object.
// error it's nil if no error, otherwise it's an error object.
//
func (bucket Bucket) DeleteObjects(objectKeys []string, options ...Option) (DeleteObjectsResult, error) {
out := DeleteObjectsResult{}
dxml := deleteXML{}
for _, key := range objectKeys {
dxml.Objects = append(dxml.Objects, DeleteObject{Key: key})
}
isQuiet, _ := findOption(options, deleteObjectsQuiet, false)
dxml.Quiet = isQuiet.(bool)
bs, err := xml.Marshal(dxml)
if err != nil {
return out, err
}
buffer := new(bytes.Buffer)
buffer.Write(bs)
contentType := http.DetectContentType(buffer.Bytes())
options = append(options, ContentType(contentType))
sum := md5.Sum(bs)
b64 := base64.StdEncoding.EncodeToString(sum[:])
options = append(options, ContentMD5(b64))
params := map[string]interface{}{}
params["delete"] = nil
params["encoding-type"] = "url"
resp, err := bucket.do("POST", "", params, options, buffer, nil)
if err != nil {
return out, err
}
defer resp.Body.Close()
if !dxml.Quiet {
if err = xmlUnmarshal(resp.Body, &out); err == nil {
err = decodeDeleteObjectsResult(&out)
}
}
return out, err
}
// IsObjectExist checks if the object exists.
//
// bool flag of object's existence (true:exists; false:non-exist) when error is nil.
//
// error it's nil if no error, otherwise it's an error object.
//
func (bucket Bucket) IsObjectExist(objectKey string) (bool, error) {
_, err := bucket.GetObjectMeta(objectKey)
if err == nil {
return true, nil
}
switch err.(type) {
case ServiceError:
if err.(ServiceError).StatusCode == 404 && err.(ServiceError).Code == "NoSuchKey" {
return false, nil
}
}
return false, err
}
// ListObjects lists the objects under the current bucket.
//
// options it contains all the filters for listing objects.
// It could specify a prefix filter on object keys, the max keys count to return and the object key marker and the delimiter for grouping object names.
// The key marker means the returned objects' key must be greater than it in lexicographic order.
//
// For example, if the bucket has 8 objects, my-object-1, my-object-11, my-object-2, my-object-21,
// my-object-22, my-object-3, my-object-31, my-object-32. If the prefix is my-object-2 (no other filters), then it returns
// my-object-2, my-object-21, my-object-22 three objects. If the marker is my-object-22 (no other filters), then it returns
// my-object-3, my-object-31, my-object-32 three objects. If the max keys is 5, then it returns 5 objects.
// The three filters could be used together to achieve filter and paging functionality.
// If the prefix is the folder name, then it could list all files under this folder (including the files under its subfolders).
// But if the delimiter is specified with '/', then it only returns that folder's files (no subfolder's files). The direct subfolders are in the commonPrefixes properties.
// For example, if the bucket has three objects fun/test.jpg, fun/movie/001.avi, fun/movie/007.avi. And if the prefix is "fun/", then it returns all three objects.
// But if the delimiter is '/', then only "fun/test.jpg" is returned as files and fun/movie/ is returned as common prefix.
//
// For common usage scenario, check out sample/list_object.go.
//
// ListObjectsResponse the return value after operation succeeds (only valid when error is nil).
//
func (bucket Bucket) ListObjects(options ...Option) (ListObjectsResult, error) {
var out ListObjectsResult
options = append(options, EncodingType("url"))
params, err := getRawParams(options)
if err != nil {
return out, err
}
resp, err := bucket.do("GET", "", params, nil, nil, nil)
if err != nil {
return out, err
}
defer resp.Body.Close()
err = xmlUnmarshal(resp.Body, &out)
if err != nil {
return out, err
}
err = decodeListObjectsResult(&out)
return out, err
}
// SetObjectMeta sets the metadata of the Object.
//
// objectKey object
// options options for setting the metadata. The valid options are CacheControl, ContentDisposition, ContentEncoding, Expires,
// ServerSideEncryption, and custom metadata.
//
// error it's nil if no error, otherwise it's an error object.
//
func (bucket Bucket) SetObjectMeta(objectKey string, options ...Option) error {
options = append(options, MetadataDirective(MetaReplace))
_, err := bucket.CopyObject(objectKey, objectKey, options...)
return err
}
// GetObjectDetailedMeta gets the object's detailed metadata
//
// objectKey object key.
// options the constraints of the object. Only when the object meets the requirements this method will return the metadata. Otherwise returns error. Valid options are IfModifiedSince, IfUnmodifiedSince,
// IfMatch, IfNoneMatch. For more details check out https://help.aliyun.com/document_detail/oss/api-reference/object/HeadObject.html
//
// http.Header object meta when error is nil.
// error it's nil if no error, otherwise it's an error object.
//
func (bucket Bucket) GetObjectDetailedMeta(objectKey string, options ...Option) (http.Header, error) {
params := map[string]interface{}{}
resp, err := bucket.do("HEAD", objectKey, params, options, nil, nil)
if err != nil {
return nil, err
}
defer resp.Body.Close()
return resp.Headers, nil
}
// GetObjectMeta gets object metadata.
//
// GetObjectMeta is more lightweight than GetObjectDetailedMeta as it only returns basic metadata including ETag
// size, LastModified. The size information is in the HTTP header Content-Length.
//
// objectKey object key
//
// http.Header the object's metadata, valid when error is nil.
// error it's nil if no error, otherwise it's an error object.
//
func (bucket Bucket) GetObjectMeta(objectKey string) (http.Header, error) {
params := map[string]interface{}{}
params["objectMeta"] = nil
//resp, err := bucket.do("GET", objectKey, "?objectMeta", "", nil, nil, nil)
resp, err := bucket.do("GET", objectKey, params, nil, nil, nil)
if err != nil {
return nil, err
}
defer resp.Body.Close()
return resp.Headers, nil
}
// SetObjectACL updates the object's ACL.
//
// Only the bucket's owner could update object's ACL which priority is higher than bucket's ACL.
// For example, if the bucket ACL is private and object's ACL is public-read-write.
// Then object's ACL is used and it means all users could read or write that object.
// When the object's ACL is not set, then bucket's ACL is used as the object's ACL.
//
// Object read operations include GetObject, HeadObject, CopyObject and UploadPartCopy on the source object;
// Object write operations include PutObject, PostObject, AppendObject, DeleteObject, DeleteMultipleObjects,
// CompleteMultipartUpload and CopyObject on target object.
//
// objectKey the target object key (to set the ACL on)
// objectAcl object ACL. Valid options are PrivateACL, PublicReadACL, PublicReadWriteACL.
//
// error it's nil if no error, otherwise it's an error object.
//
func (bucket Bucket) SetObjectACL(objectKey string, objectACL ACLType) error {
options := []Option{ObjectACL(objectACL)}
params := map[string]interface{}{}
params["acl"] = nil
resp, err := bucket.do("PUT", objectKey, params, options, nil, nil)
if err != nil {
return err
}
defer resp.Body.Close()
return checkRespCode(resp.StatusCode, []int{http.StatusOK})
}
// GetObjectACL gets object's ACL
//
// objectKey the object to get ACL from.
//
// GetObjectACLResult the result object when error is nil. GetObjectACLResult.Acl is the object ACL.
// error it's nil if no error, otherwise it's an error object.
//
func (bucket Bucket) GetObjectACL(objectKey string) (GetObjectACLResult, error) {
var out GetObjectACLResult
params := map[string]interface{}{}
params["acl"] = nil
resp, err := bucket.do("GET", objectKey, params, nil, nil, nil)
if err != nil {
return out, err
}
defer resp.Body.Close()
err = xmlUnmarshal(resp.Body, &out)
return out, err
}
// PutSymlink creates a symlink (to point to an existing object)
//
// Symlink cannot point to another symlink.
// When creating a symlink, it does not check the existence of the target file, and does not check if the target file is symlink.
// Neither it checks the caller's permission on the target file. All these checks are deferred to the actual GetObject call via this symlink.
// If trying to add an existing file, as long as the caller has the write permission, the existing one will be overwritten.
// If the x-oss-meta- is specified, it will be added as the metadata of the symlink file.
//
// symObjectKey the symlink object's key.
// targetObjectKey the target object key to point to.
//
// error it's nil if no error, otherwise it's an error object.
//
func (bucket Bucket) PutSymlink(symObjectKey string, targetObjectKey string, options ...Option) error {
options = append(options, symlinkTarget(url.QueryEscape(targetObjectKey)))
params := map[string]interface{}{}
params["symlink"] = nil
resp, err := bucket.do("PUT", symObjectKey, params, options, nil, nil)
if err != nil {
return err
}
defer resp.Body.Close()
return checkRespCode(resp.StatusCode, []int{http.StatusOK})
}
// GetSymlink gets the symlink object with the specified key.
// If the symlink object does not exist, returns 404.
//
// objectKey the symlink object's key.
//
// error it's nil if no error, otherwise it's an error object.
// When error is nil, the target file key is in the X-Oss-Symlink-Target header of the returned object.
//
func (bucket Bucket) GetSymlink(objectKey string) (http.Header, error) {
params := map[string]interface{}{}
params["symlink"] = nil
resp, err := bucket.do("GET", objectKey, params, nil, nil, nil)
if err != nil {
return nil, err
}
defer resp.Body.Close()
targetObjectKey := resp.Headers.Get(HTTPHeaderOssSymlinkTarget)
targetObjectKey, err = url.QueryUnescape(targetObjectKey)
if err != nil {
return resp.Headers, err
}
resp.Headers.Set(HTTPHeaderOssSymlinkTarget, targetObjectKey)
return resp.Headers, err
}
// RestoreObject restores the object from the archive storage.
//
// An archive object is in cold status by default and it cannot be accessed.
// When restore is called on the cold object, it will become available for access after some time.
// If multiple restores are called on the same file when the object is being restored, server side does nothing for additional calls but returns success.
// By default, the restored object is available for access for one day. After that it will be unavailable again.
// But if another RestoreObject are called after the file is restored, then it will extend one day's access time of that object, up to 7 days.
//
// objectKey object key to restore.
//
// error it's nil if no error, otherwise it's an error object.
//
func (bucket Bucket) RestoreObject(objectKey string) error {
params := map[string]interface{}{}
params["restore"] = nil
resp, err := bucket.do("POST", objectKey, params, nil, nil, nil)
if err != nil {
return err
}
defer resp.Body.Close()
return checkRespCode(resp.StatusCode, []int{http.StatusOK, http.StatusAccepted})
}
// SignURL signs the URL. Users could access the object directly with this URL without getting the AK.
//
// objectKey the target object to sign.
// signURLConfig the configuration for the signed URL
//
// string returns the signed URL, when error is nil.
// error it's nil if no error, otherwise it's an error object.
//
func (bucket Bucket) SignURL(objectKey string, method HTTPMethod, expiredInSec int64, options ...Option) (string, error) {
if expiredInSec < 0 {
return "", fmt.Errorf("invalid expires: %d, expires must bigger than 0", expiredInSec)
}
expiration := time.Now().Unix() + expiredInSec
params, err := getRawParams(options)
if err != nil {
return "", err
}
headers := make(map[string]string)
err = handleOptions(headers, options)
if err != nil {
return "", err
}
return bucket.Client.Conn.signURL(method, bucket.BucketName, objectKey, expiration, params, headers), nil
}
// PutObjectWithURL uploads an object with the URL. If the object exists, it will be overwritten.
// PutObjectWithURL It will not generate minetype according to the key name.
//
// signedURL signed URL.
// reader io.Reader the read instance for reading the data for the upload.
// options the options for uploading the data. The valid options are CacheControl, ContentDisposition, ContentEncoding,
// Expires, ServerSideEncryption, ObjectACL and custom metadata. Check out the following link for details:
// https://help.aliyun.com/document_detail/oss/api-reference/object/PutObject.html
//
// error it's nil if no error, otherwise it's an error object.
//
func (bucket Bucket) PutObjectWithURL(signedURL string, reader io.Reader, options ...Option) error {
resp, err := bucket.DoPutObjectWithURL(signedURL, reader, options)
if err != nil {
return err
}
defer resp.Body.Close()
return err
}
// PutObjectFromFileWithURL uploads an object from a local file with the signed URL.
// PutObjectFromFileWithURL It does not generate mimetype according to object key's name or the local file name.
//
// signedURL the signed URL.
// filePath local file path, such as dirfile.txt, for uploading.
// options options for uploading, same as the options in PutObject function.
//
// error it's nil if no error, otherwise it's an error object.
//
func (bucket Bucket) PutObjectFromFileWithURL(signedURL, filePath string, options ...Option) error {
fd, err := os.Open(filePath)
if err != nil {
return err
}
defer fd.Close()
resp, err := bucket.DoPutObjectWithURL(signedURL, fd, options)
if err != nil {
return err
}
defer resp.Body.Close()
return err
}
// DoPutObjectWithURL is the actual API that does the upload with URL work(internal for SDK)
//
// signedURL the signed URL.
// reader io.Reader the read instance for getting the data to upload.
// options options for uploading.
//
// Response the response object which contains the HTTP response.
// error it's nil if no error, otherwise it's an error object.
//
func (bucket Bucket) DoPutObjectWithURL(signedURL string, reader io.Reader, options []Option) (*Response, error) {
listener := getProgressListener(options)
params := map[string]interface{}{}
resp, err := bucket.doURL("PUT", signedURL, params, options, reader, listener)
if err != nil {
return nil, err
}
if bucket.getConfig().IsEnableCRC {
err = checkCRC(resp, "DoPutObjectWithURL")
if err != nil {
return resp, err
}
}
err = checkRespCode(resp.StatusCode, []int{http.StatusOK})
return resp, err
}
// GetObjectWithURL downloads the object and returns the reader instance, with the signed URL.
//
// signedURL the signed URL.
// options options for downloading the object. Valid options are IfModifiedSince, IfUnmodifiedSince, IfMatch,
// IfNoneMatch, AcceptEncoding. For more information, check out the following link:
// https://help.aliyun.com/document_detail/oss/api-reference/object/GetObject.html
//
// io.ReadCloser the reader object for getting the data from response. It needs be closed after the usage. It's only valid when error is nil.
// error it's nil if no error, otherwise it's an error object.
//
func (bucket Bucket) GetObjectWithURL(signedURL string, options ...Option) (io.ReadCloser, error) {
result, err := bucket.DoGetObjectWithURL(signedURL, options)
if err != nil {
return nil, err
}
return result.Response.Body, nil
}
// GetObjectToFileWithURL downloads the object into a local file with the signed URL.
//
// signedURL the signed URL
// filePath the local file path to download to.
// options the options for downloading object. Check out the parameter options in function GetObject for the reference.
//
// error it's nil if no error, otherwise it's an error object.
//
func (bucket Bucket) GetObjectToFileWithURL(signedURL, filePath string, options ...Option) error {
tempFilePath := filePath + TempFileSuffix
// Get the object's content
result, err := bucket.DoGetObjectWithURL(signedURL, options)
if err != nil {
return err
}
defer result.Response.Body.Close()
// If the file does not exist, create one. If exists, then overwrite it.
fd, err := os.OpenFile(tempFilePath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, FilePermMode)
if err != nil {
return err
}
// Save the data to the file.
_, err = io.Copy(fd, result.Response.Body)
fd.Close()
if err != nil {
return err
}
// Compare the CRC value. If CRC values do not match, return error.
hasRange, _, _ := isOptionSet(options, HTTPHeaderRange)
if bucket.getConfig().IsEnableCRC && !hasRange {
result.Response.ClientCRC = result.ClientCRC.Sum64()
err = checkCRC(result.Response, "GetObjectToFileWithURL")
if err != nil {
os.Remove(tempFilePath)
return err
}
}
return os.Rename(tempFilePath, filePath)
}
// DoGetObjectWithURL is the actual API that downloads the file with the signed URL.
//
// signedURL the signed URL.
// options the options for getting object. Check out parameter options in GetObject for the reference.
//
// GetObjectResult the result object when the error is nil.
// error it's nil if no error, otherwise it's an error object.
//
func (bucket Bucket) DoGetObjectWithURL(signedURL string, options []Option) (*GetObjectResult, error) {
params := map[string]interface{}{}
resp, err := bucket.doURL("GET", signedURL, params, options, nil, nil)
if err != nil {
return nil, err
}
result := &GetObjectResult{
Response: resp,
}
// CRC
var crcCalc hash.Hash64
hasRange, _, _ := isOptionSet(options, HTTPHeaderRange)
if bucket.getConfig().IsEnableCRC && !hasRange {
crcCalc = crc64.New(crcTable())
result.ServerCRC = resp.ServerCRC
result.ClientCRC = crcCalc
}
// Progress
listener := getProgressListener(options)
contentLen, _ := strconv.ParseInt(resp.Headers.Get(HTTPHeaderContentLength), 10, 64)
resp.Body = ioutil.NopCloser(TeeReader(resp.Body, crcCalc, contentLen, listener, nil))
return result, nil
}
// Private
func (bucket Bucket) do(method, objectName string, params map[string]interface{}, options []Option,
data io.Reader, listener ProgressListener) (*Response, error) {
headers := make(map[string]string)
err := handleOptions(headers, options)
if err != nil {
return nil, err
}
return bucket.Client.Conn.Do(method, bucket.BucketName, objectName,
params, headers, data, 0, listener)
}
func (bucket Bucket) doURL(method HTTPMethod, signedURL string, params map[string]interface{}, options []Option,
data io.Reader, listener ProgressListener) (*Response, error) {
headers := make(map[string]string)
err := handleOptions(headers, options)
if err != nil {
return nil, err
}
return bucket.Client.Conn.DoURL(method, signedURL, headers, data, 0, listener)
}
func (bucket Bucket) getConfig() *Config {
return bucket.Client.Config
}
func addContentType(options []Option, keys ...string) []Option {
typ := TypeByExtension("")
for _, key := range keys {
typ = TypeByExtension(key)
if typ != "" {
break
}
}
if typ == "" {
typ = "application/octet-stream"
}
opts := []Option{ContentType(typ)}
opts = append(opts, options...)
return opts
}

View File

@ -0,0 +1,765 @@
// Package oss implements functions for access oss service.
// It has two main struct Client and Bucket.
package oss
import (
"bytes"
"encoding/xml"
"io"
"net/http"
"strings"
"time"
)
// Client SDK's entry point. It's for bucket related options such as create/delete/set bucket (such as set/get ACL/lifecycle/referer/logging/website).
// Object related operations are done by Bucket class.
// Users use oss.New to create Client instance.
//
type (
// Client OSS client
Client struct {
Config *Config // OSS client configuration
Conn *Conn // Send HTTP request
}
// ClientOption client option such as UseCname, Timeout, SecurityToken.
ClientOption func(*Client)
)
// New creates a new client.
//
// endpoint the OSS datacenter endpoint such as http://oss-cn-hangzhou.aliyuncs.com .
// accessKeyId access key Id.
// accessKeySecret access key secret.
//
// Client creates the new client instance, the returned value is valid when error is nil.
// error it's nil if no error, otherwise it's an error object.
//
func New(endpoint, accessKeyID, accessKeySecret string, options ...ClientOption) (*Client, error) {
// Configuration
config := getDefaultOssConfig()
config.Endpoint = endpoint
config.AccessKeyID = accessKeyID
config.AccessKeySecret = accessKeySecret
// URL parse
url := &urlMaker{}
url.Init(config.Endpoint, config.IsCname, config.IsUseProxy)
// HTTP connect
conn := &Conn{config: config, url: url}
// OSS client
client := &Client{
config,
conn,
}
// Client options parse
for _, option := range options {
option(client)
}
// Create HTTP connection
err := conn.init(config, url)
return client, err
}
// Bucket gets the bucket instance.
//
// bucketName the bucket name.
// Bucket the bucket object, when error is nil.
//
// error it's nil if no error, otherwise it's an error object.
//
func (client Client) Bucket(bucketName string) (*Bucket, error) {
return &Bucket{
client,
bucketName,
}, nil
}
// CreateBucket creates a bucket.
//
// bucketName the bucket name, it's globably unique and immutable. The bucket name can only consist of lowercase letters, numbers and dash ('-').
// It must start with lowercase letter or number and the length can only be between 3 and 255.
// options options for creating the bucket, with optional ACL. The ACL could be ACLPrivate, ACLPublicRead, and ACLPublicReadWrite. By default it's ACLPrivate.
// It could also be specified with StorageClass option, which supports StorageStandard, StorageIA(infrequent access), StorageArchive.
//
// error it's nil if no error, otherwise it's an error object.
//
func (client Client) CreateBucket(bucketName string, options ...Option) error {
headers := make(map[string]string)
handleOptions(headers, options)
buffer := new(bytes.Buffer)
isOptSet, val, _ := isOptionSet(options, storageClass)
if isOptSet {
cbConfig := createBucketConfiguration{StorageClass: val.(StorageClassType)}
bs, err := xml.Marshal(cbConfig)
if err != nil {
return err
}
buffer.Write(bs)
contentType := http.DetectContentType(buffer.Bytes())
headers[HTTPHeaderContentType] = contentType
}
params := map[string]interface{}{}
resp, err := client.do("PUT", bucketName, params, headers, buffer)
if err != nil {
return err
}
defer resp.Body.Close()
return checkRespCode(resp.StatusCode, []int{http.StatusOK})
}
// ListBuckets lists buckets of the current account under the given endpoint, with optional filters.
//
// options specifies the filters such as Prefix, Marker and MaxKeys. Prefix is the bucket name's prefix filter.
// And marker makes sure the returned buckets' name are greater than it in lexicographic order.
// Maxkeys limits the max keys to return, and by default it's 100 and up to 1000.
// For the common usage scenario, please check out list_bucket.go in the sample.
// ListBucketsResponse the response object if error is nil.
//
// error it's nil if no error, otherwise it's an error object.
//
func (client Client) ListBuckets(options ...Option) (ListBucketsResult, error) {
var out ListBucketsResult
params, err := getRawParams(options)
if err != nil {
return out, err
}
resp, err := client.do("GET", "", params, nil, nil)
if err != nil {
return out, err
}
defer resp.Body.Close()
err = xmlUnmarshal(resp.Body, &out)
return out, err
}
// IsBucketExist checks if the bucket exists
//
// bucketName the bucket name.
//
// bool true if it exists, and it's only valid when error is nil.
// error it's nil if no error, otherwise it's an error object.
//
func (client Client) IsBucketExist(bucketName string) (bool, error) {
listRes, err := client.ListBuckets(Prefix(bucketName), MaxKeys(1))
if err != nil {
return false, err
}
if len(listRes.Buckets) == 1 && listRes.Buckets[0].Name == bucketName {
return true, nil
}
return false, nil
}
// DeleteBucket deletes the bucket. Only empty bucket can be deleted (no object and parts).
//
// bucketName the bucket name.
//
// error it's nil if no error, otherwise it's an error object.
//
func (client Client) DeleteBucket(bucketName string) error {
params := map[string]interface{}{}
resp, err := client.do("DELETE", bucketName, params, nil, nil)
if err != nil {
return err
}
defer resp.Body.Close()
return checkRespCode(resp.StatusCode, []int{http.StatusNoContent})
}
// GetBucketLocation gets the bucket location.
//
// Checks out the following link for more information :
// https://help.aliyun.com/document_detail/oss/user_guide/oss_concept/endpoint.html
//
// bucketName the bucket name
//
// string bucket's datacenter location
// error it's nil if no error, otherwise it's an error object.
//
func (client Client) GetBucketLocation(bucketName string) (string, error) {
params := map[string]interface{}{}
params["location"] = nil
resp, err := client.do("GET", bucketName, params, nil, nil)
if err != nil {
return "", err
}
defer resp.Body.Close()
var LocationConstraint string
err = xmlUnmarshal(resp.Body, &LocationConstraint)
return LocationConstraint, err
}
// SetBucketACL sets bucket's ACL.
//
// bucketName the bucket name
// bucketAcl the bucket ACL: ACLPrivate, ACLPublicRead and ACLPublicReadWrite.
//
// error it's nil if no error, otherwise it's an error object.
//
func (client Client) SetBucketACL(bucketName string, bucketACL ACLType) error {
headers := map[string]string{HTTPHeaderOssACL: string(bucketACL)}
params := map[string]interface{}{}
resp, err := client.do("PUT", bucketName, params, headers, nil)
if err != nil {
return err
}
defer resp.Body.Close()
return checkRespCode(resp.StatusCode, []int{http.StatusOK})
}
// GetBucketACL gets the bucket ACL.
//
// bucketName the bucket name.
//
// GetBucketAclResponse the result object, and it's only valid when error is nil.
// error it's nil if no error, otherwise it's an error object.
//
func (client Client) GetBucketACL(bucketName string) (GetBucketACLResult, error) {
var out GetBucketACLResult
params := map[string]interface{}{}
params["acl"] = nil
resp, err := client.do("GET", bucketName, params, nil, nil)
if err != nil {
return out, err
}
defer resp.Body.Close()
err = xmlUnmarshal(resp.Body, &out)
return out, err
}
// SetBucketLifecycle sets the bucket's lifecycle.
//
// For more information, checks out following link:
// https://help.aliyun.com/document_detail/oss/user_guide/manage_object/object_lifecycle.html
//
// bucketName the bucket name.
// rules the lifecycle rules. There're two kind of rules: absolute time expiration and relative time expiration in days and day/month/year respectively.
// Check out sample/bucket_lifecycle.go for more details.
//
// error it's nil if no error, otherwise it's an error object.
//
func (client Client) SetBucketLifecycle(bucketName string, rules []LifecycleRule) error {
lxml := lifecycleXML{Rules: convLifecycleRule(rules)}
bs, err := xml.Marshal(lxml)
if err != nil {
return err
}
buffer := new(bytes.Buffer)
buffer.Write(bs)
contentType := http.DetectContentType(buffer.Bytes())
headers := map[string]string{}
headers[HTTPHeaderContentType] = contentType
params := map[string]interface{}{}
params["lifecycle"] = nil
resp, err := client.do("PUT", bucketName, params, headers, buffer)
if err != nil {
return err
}
defer resp.Body.Close()
return checkRespCode(resp.StatusCode, []int{http.StatusOK})
}
// DeleteBucketLifecycle deletes the bucket's lifecycle.
//
//
// bucketName the bucket name.
//
// error it's nil if no error, otherwise it's an error object.
//
func (client Client) DeleteBucketLifecycle(bucketName string) error {
params := map[string]interface{}{}
params["lifecycle"] = nil
resp, err := client.do("DELETE", bucketName, params, nil, nil)
if err != nil {
return err
}
defer resp.Body.Close()
return checkRespCode(resp.StatusCode, []int{http.StatusNoContent})
}
// GetBucketLifecycle gets the bucket's lifecycle settings.
//
// bucketName the bucket name.
//
// GetBucketLifecycleResponse the result object upon successful request. It's only valid when error is nil.
// error it's nil if no error, otherwise it's an error object.
//
func (client Client) GetBucketLifecycle(bucketName string) (GetBucketLifecycleResult, error) {
var out GetBucketLifecycleResult
params := map[string]interface{}{}
params["lifecycle"] = nil
resp, err := client.do("GET", bucketName, params, nil, nil)
if err != nil {
return out, err
}
defer resp.Body.Close()
err = xmlUnmarshal(resp.Body, &out)
return out, err
}
// SetBucketReferer sets the bucket's referer whitelist and the flag if allowing empty referrer.
//
// To avoid stealing link on OSS data, OSS supports the HTTP referrer header. A whitelist referrer could be set either by API or web console, as well as
// the allowing empty referrer flag. Note that this applies to requests from webbrowser only.
// For example, for a bucket os-example and its referrer http://www.aliyun.com, all requests from this URL could access the bucket.
// For more information, please check out this link :
// https://help.aliyun.com/document_detail/oss/user_guide/security_management/referer.html
//
// bucketName the bucket name.
// referers the referrer white list. A bucket could have a referrer list and each referrer supports one '*' and multiple '?' as wildcards.
// The sample could be found in sample/bucket_referer.go
// allowEmptyReferer the flag of allowing empty referrer. By default it's true.
//
// error it's nil if no error, otherwise it's an error object.
//
func (client Client) SetBucketReferer(bucketName string, referers []string, allowEmptyReferer bool) error {
rxml := RefererXML{}
rxml.AllowEmptyReferer = allowEmptyReferer
if referers == nil {
rxml.RefererList = append(rxml.RefererList, "")
} else {
for _, referer := range referers {
rxml.RefererList = append(rxml.RefererList, referer)
}
}
bs, err := xml.Marshal(rxml)
if err != nil {
return err
}
buffer := new(bytes.Buffer)
buffer.Write(bs)
contentType := http.DetectContentType(buffer.Bytes())
headers := map[string]string{}
headers[HTTPHeaderContentType] = contentType
params := map[string]interface{}{}
params["referer"] = nil
resp, err := client.do("PUT", bucketName, params, headers, buffer)
if err != nil {
return err
}
defer resp.Body.Close()
return checkRespCode(resp.StatusCode, []int{http.StatusOK})
}
// GetBucketReferer gets the bucket's referrer white list.
//
// bucketName the bucket name.
//
// GetBucketRefererResponse the result object upon successful request. It's only valid when error is nil.
// error it's nil if no error, otherwise it's an error object.
//
func (client Client) GetBucketReferer(bucketName string) (GetBucketRefererResult, error) {
var out GetBucketRefererResult
params := map[string]interface{}{}
params["referer"] = nil
resp, err := client.do("GET", bucketName, params, nil, nil)
if err != nil {
return out, err
}
defer resp.Body.Close()
err = xmlUnmarshal(resp.Body, &out)
return out, err
}
// SetBucketLogging sets the bucket logging settings.
//
// OSS could automatically store the access log. Only the bucket owner could enable the logging.
// Once enabled, OSS would save all the access log into hourly log files in a specified bucket.
// For more information, please check out https://help.aliyun.com/document_detail/oss/user_guide/security_management/logging.html
//
// bucketName bucket name to enable the log.
// targetBucket the target bucket name to store the log files.
// targetPrefix the log files' prefix.
//
// error it's nil if no error, otherwise it's an error object.
//
func (client Client) SetBucketLogging(bucketName, targetBucket, targetPrefix string,
isEnable bool) error {
var err error
var bs []byte
if isEnable {
lxml := LoggingXML{}
lxml.LoggingEnabled.TargetBucket = targetBucket
lxml.LoggingEnabled.TargetPrefix = targetPrefix
bs, err = xml.Marshal(lxml)
} else {
lxml := loggingXMLEmpty{}
bs, err = xml.Marshal(lxml)
}
if err != nil {
return err
}
buffer := new(bytes.Buffer)
buffer.Write(bs)
contentType := http.DetectContentType(buffer.Bytes())
headers := map[string]string{}
headers[HTTPHeaderContentType] = contentType
params := map[string]interface{}{}
params["logging"] = nil
resp, err := client.do("PUT", bucketName, params, headers, buffer)
if err != nil {
return err
}
defer resp.Body.Close()
return checkRespCode(resp.StatusCode, []int{http.StatusOK})
}
// DeleteBucketLogging deletes the logging configuration to disable the logging on the bucket.
//
// bucketName the bucket name to disable the logging.
//
// error it's nil if no error, otherwise it's an error object.
//
func (client Client) DeleteBucketLogging(bucketName string) error {
params := map[string]interface{}{}
params["logging"] = nil
resp, err := client.do("DELETE", bucketName, params, nil, nil)
if err != nil {
return err
}
defer resp.Body.Close()
return checkRespCode(resp.StatusCode, []int{http.StatusNoContent})
}
// GetBucketLogging gets the bucket's logging settings
//
// bucketName the bucket name
// GetBucketLoggingResponse the result object upon successful request. It's only valid when error is nil.
//
// error it's nil if no error, otherwise it's an error object.
//
func (client Client) GetBucketLogging(bucketName string) (GetBucketLoggingResult, error) {
var out GetBucketLoggingResult
params := map[string]interface{}{}
params["logging"] = nil
resp, err := client.do("GET", bucketName, params, nil, nil)
if err != nil {
return out, err
}
defer resp.Body.Close()
err = xmlUnmarshal(resp.Body, &out)
return out, err
}
// SetBucketWebsite sets the bucket's static website's index and error page.
//
// OSS supports static web site hosting for the bucket data. When the bucket is enabled with that, you can access the file in the bucket like the way to access a static website.
// For more information, please check out: https://help.aliyun.com/document_detail/oss/user_guide/static_host_website.html
//
// bucketName the bucket name to enable static web site.
// indexDocument index page.
// errorDocument error page.
//
// error it's nil if no error, otherwise it's an error object.
//
func (client Client) SetBucketWebsite(bucketName, indexDocument, errorDocument string) error {
wxml := WebsiteXML{}
wxml.IndexDocument.Suffix = indexDocument
wxml.ErrorDocument.Key = errorDocument
bs, err := xml.Marshal(wxml)
if err != nil {
return err
}
buffer := new(bytes.Buffer)
buffer.Write(bs)
contentType := http.DetectContentType(buffer.Bytes())
headers := make(map[string]string)
headers[HTTPHeaderContentType] = contentType
params := map[string]interface{}{}
params["website"] = nil
resp, err := client.do("PUT", bucketName, params, headers, buffer)
if err != nil {
return err
}
defer resp.Body.Close()
return checkRespCode(resp.StatusCode, []int{http.StatusOK})
}
// DeleteBucketWebsite deletes the bucket's static web site settings.
//
// bucketName the bucket name.
//
// error it's nil if no error, otherwise it's an error object.
//
func (client Client) DeleteBucketWebsite(bucketName string) error {
params := map[string]interface{}{}
params["website"] = nil
resp, err := client.do("DELETE", bucketName, params, nil, nil)
if err != nil {
return err
}
defer resp.Body.Close()
return checkRespCode(resp.StatusCode, []int{http.StatusNoContent})
}
// GetBucketWebsite gets the bucket's default page (index page) and the error page.
//
// bucketName the bucket name
//
// GetBucketWebsiteResponse the result object upon successful request. It's only valid when error is nil.
// error it's nil if no error, otherwise it's an error object.
//
func (client Client) GetBucketWebsite(bucketName string) (GetBucketWebsiteResult, error) {
var out GetBucketWebsiteResult
params := map[string]interface{}{}
params["website"] = nil
resp, err := client.do("GET", bucketName, params, nil, nil)
if err != nil {
return out, err
}
defer resp.Body.Close()
err = xmlUnmarshal(resp.Body, &out)
return out, err
}
// SetBucketCORS sets the bucket's CORS rules
//
// For more information, please check out https://help.aliyun.com/document_detail/oss/user_guide/security_management/cors.html
//
// bucketName the bucket name
// corsRules the CORS rules to set. The related sample code is in sample/bucket_cors.go.
//
// error it's nil if no error, otherwise it's an error object.
//
func (client Client) SetBucketCORS(bucketName string, corsRules []CORSRule) error {
corsxml := CORSXML{}
for _, v := range corsRules {
cr := CORSRule{}
cr.AllowedMethod = v.AllowedMethod
cr.AllowedOrigin = v.AllowedOrigin
cr.AllowedHeader = v.AllowedHeader
cr.ExposeHeader = v.ExposeHeader
cr.MaxAgeSeconds = v.MaxAgeSeconds
corsxml.CORSRules = append(corsxml.CORSRules, cr)
}
bs, err := xml.Marshal(corsxml)
if err != nil {
return err
}
buffer := new(bytes.Buffer)
buffer.Write(bs)
contentType := http.DetectContentType(buffer.Bytes())
headers := map[string]string{}
headers[HTTPHeaderContentType] = contentType
params := map[string]interface{}{}
params["cors"] = nil
resp, err := client.do("PUT", bucketName, params, headers, buffer)
if err != nil {
return err
}
defer resp.Body.Close()
return checkRespCode(resp.StatusCode, []int{http.StatusOK})
}
// DeleteBucketCORS deletes the bucket's static website settings.
//
// bucketName the bucket name.
//
// error it's nil if no error, otherwise it's an error object.
//
func (client Client) DeleteBucketCORS(bucketName string) error {
params := map[string]interface{}{}
params["cors"] = nil
resp, err := client.do("DELETE", bucketName, params, nil, nil)
if err != nil {
return err
}
defer resp.Body.Close()
return checkRespCode(resp.StatusCode, []int{http.StatusNoContent})
}
// GetBucketCORS gets the bucket's CORS settings.
//
// bucketName the bucket name.
// GetBucketCORSResult the result object upon successful request. It's only valid when error is nil.
//
// error it's nil if no error, otherwise it's an error object.
//
func (client Client) GetBucketCORS(bucketName string) (GetBucketCORSResult, error) {
var out GetBucketCORSResult
params := map[string]interface{}{}
params["cors"] = nil
resp, err := client.do("GET", bucketName, params, nil, nil)
if err != nil {
return out, err
}
defer resp.Body.Close()
err = xmlUnmarshal(resp.Body, &out)
return out, err
}
// GetBucketInfo gets the bucket information.
//
// bucketName the bucket name.
// GetBucketInfoResult the result object upon successful request. It's only valid when error is nil.
//
// error it's nil if no error, otherwise it's an error object.
//
func (client Client) GetBucketInfo(bucketName string) (GetBucketInfoResult, error) {
var out GetBucketInfoResult
params := map[string]interface{}{}
params["bucketInfo"] = nil
resp, err := client.do("GET", bucketName, params, nil, nil)
if err != nil {
return out, err
}
defer resp.Body.Close()
err = xmlUnmarshal(resp.Body, &out)
return out, err
}
// UseCname sets the flag of using CName. By default it's false.
//
// isUseCname true: the endpoint has the CName, false: the endpoint does not have cname. Default is false.
//
func UseCname(isUseCname bool) ClientOption {
return func(client *Client) {
client.Config.IsCname = isUseCname
client.Conn.url.Init(client.Config.Endpoint, client.Config.IsCname, client.Config.IsUseProxy)
}
}
// Timeout sets the HTTP timeout in seconds.
//
// connectTimeoutSec HTTP timeout in seconds. Default is 10 seconds. 0 means infinite (not recommended)
// readWriteTimeout HTTP read or write's timeout in seconds. Default is 20 seconds. 0 means infinite.
//
func Timeout(connectTimeoutSec, readWriteTimeout int64) ClientOption {
return func(client *Client) {
client.Config.HTTPTimeout.ConnectTimeout =
time.Second * time.Duration(connectTimeoutSec)
client.Config.HTTPTimeout.ReadWriteTimeout =
time.Second * time.Duration(readWriteTimeout)
client.Config.HTTPTimeout.HeaderTimeout =
time.Second * time.Duration(readWriteTimeout)
client.Config.HTTPTimeout.IdleConnTimeout =
time.Second * time.Duration(readWriteTimeout)
client.Config.HTTPTimeout.LongTimeout =
time.Second * time.Duration(readWriteTimeout*10)
}
}
// SecurityToken sets the temporary user's SecurityToken.
//
// token STS token
//
func SecurityToken(token string) ClientOption {
return func(client *Client) {
client.Config.SecurityToken = strings.TrimSpace(token)
}
}
// EnableMD5 enables MD5 validation.
//
// isEnableMD5 true: enable MD5 validation; false: disable MD5 validation.
//
func EnableMD5(isEnableMD5 bool) ClientOption {
return func(client *Client) {
client.Config.IsEnableMD5 = isEnableMD5
}
}
// MD5ThresholdCalcInMemory sets the memory usage threshold for computing the MD5, default is 16MB.
//
// threshold the memory threshold in bytes. When the uploaded content is more than 16MB, the temp file is used for computing the MD5.
//
func MD5ThresholdCalcInMemory(threshold int64) ClientOption {
return func(client *Client) {
client.Config.MD5Threshold = threshold
}
}
// EnableCRC enables the CRC checksum. Default is true.
//
// isEnableCRC true: enable CRC checksum; false: disable the CRC checksum.
//
func EnableCRC(isEnableCRC bool) ClientOption {
return func(client *Client) {
client.Config.IsEnableCRC = isEnableCRC
}
}
// UserAgent specifies UserAgent. The default is aliyun-sdk-go/1.2.0 (windows/-/amd64;go1.5.2).
//
// userAgent the user agent string.
//
func UserAgent(userAgent string) ClientOption {
return func(client *Client) {
client.Config.UserAgent = userAgent
}
}
// Proxy sets the proxy (optional). The default is not using proxy.
//
// proxyHost the proxy host in the format "host:port". For example, proxy.com:80 .
//
func Proxy(proxyHost string) ClientOption {
return func(client *Client) {
client.Config.IsUseProxy = true
client.Config.ProxyHost = proxyHost
client.Conn.url.Init(client.Config.Endpoint, client.Config.IsCname, client.Config.IsUseProxy)
}
}
// AuthProxy sets the proxy information with user name and password.
//
// proxyHost the proxy host in the format "host:port". For example, proxy.com:80 .
// proxyUser the proxy user name.
// proxyPassword the proxy password.
//
func AuthProxy(proxyHost, proxyUser, proxyPassword string) ClientOption {
return func(client *Client) {
client.Config.IsUseProxy = true
client.Config.ProxyHost = proxyHost
client.Config.IsAuthProxy = true
client.Config.ProxyUser = proxyUser
client.Config.ProxyPassword = proxyPassword
client.Conn.url.Init(client.Config.Endpoint, client.Config.IsCname, client.Config.IsUseProxy)
}
}
// Private
func (client Client) do(method, bucketName string, params map[string]interface{},
headers map[string]string, data io.Reader) (*Response, error) {
return client.Conn.Do(method, bucketName, "", params,
headers, data, 0, nil)
}

69
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/conf.go generated vendored Normal file
View File

@ -0,0 +1,69 @@
package oss
import (
"time"
)
// HTTPTimeout defines HTTP timeout.
type HTTPTimeout struct {
ConnectTimeout time.Duration
ReadWriteTimeout time.Duration
HeaderTimeout time.Duration
LongTimeout time.Duration
IdleConnTimeout time.Duration
}
// Config defines oss configuration
type Config struct {
Endpoint string // OSS endpoint
AccessKeyID string // AccessId
AccessKeySecret string // AccessKey
RetryTimes uint // Retry count by default it's 5.
UserAgent string // SDK name/version/system information
IsDebug bool // Enable debug mode. Default is false.
Timeout uint // Timeout in seconds. By default it's 60.
SecurityToken string // STS Token
IsCname bool // If cname is in the endpoint.
HTTPTimeout HTTPTimeout // HTTP timeout
IsUseProxy bool // Flag of using proxy.
ProxyHost string // Flag of using proxy host.
IsAuthProxy bool // Flag of needing authentication.
ProxyUser string // Proxy user
ProxyPassword string // Proxy password
IsEnableMD5 bool // Flag of enabling MD5 for upload.
MD5Threshold int64 // Memory footprint threshold for each MD5 computation (16MB is the default), in byte. When the data is more than that, temp file is used.
IsEnableCRC bool // Flag of enabling CRC for upload.
}
// getDefaultOssConfig gets the default configuration.
func getDefaultOssConfig() *Config {
config := Config{}
config.Endpoint = ""
config.AccessKeyID = ""
config.AccessKeySecret = ""
config.RetryTimes = 5
config.IsDebug = false
config.UserAgent = userAgent
config.Timeout = 60 // Seconds
config.SecurityToken = ""
config.IsCname = false
config.HTTPTimeout.ConnectTimeout = time.Second * 30 // 30s
config.HTTPTimeout.ReadWriteTimeout = time.Second * 60 // 60s
config.HTTPTimeout.HeaderTimeout = time.Second * 60 // 60s
config.HTTPTimeout.LongTimeout = time.Second * 300 // 300s
config.HTTPTimeout.IdleConnTimeout = time.Second * 50 // 50s
config.IsUseProxy = false
config.ProxyHost = ""
config.IsAuthProxy = false
config.ProxyUser = ""
config.ProxyPassword = ""
config.MD5Threshold = 16 * 1024 * 1024 // 16MB
config.IsEnableMD5 = false
config.IsEnableCRC = true
return &config
}

599
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/conn.go generated vendored Normal file
View File

@ -0,0 +1,599 @@
package oss
import (
"bytes"
"crypto/md5"
"encoding/base64"
"encoding/xml"
"fmt"
"hash"
"io"
"io/ioutil"
"net"
"net/http"
"net/url"
"os"
"sort"
"strconv"
"strings"
"time"
)
// Conn defines OSS Conn
type Conn struct {
config *Config
url *urlMaker
client *http.Client
}
var signKeyList = []string{"acl", "uploads", "location", "cors", "logging", "website", "referer", "lifecycle", "delete", "append", "tagging", "objectMeta", "uploadId", "partNumber", "security-token", "position", "img", "style", "styleName", "replication", "replicationProgress", "replicationLocation", "cname", "bucketInfo", "comp", "qos", "live", "status", "vod", "startTime", "endTime", "symlink", "x-oss-process", "response-content-type", "response-content-language", "response-expires", "response-cache-control", "response-content-disposition", "response-content-encoding", "udf", "udfName", "udfImage", "udfId", "udfImageDesc", "udfApplication", "comp", "udfApplicationLog", "restore"}
// init initializes Conn
func (conn *Conn) init(config *Config, urlMaker *urlMaker) error {
// New transport
transport := newTransport(conn, config)
// Proxy
if conn.config.IsUseProxy {
proxyURL, err := url.Parse(config.ProxyHost)
if err != nil {
return err
}
transport.Proxy = http.ProxyURL(proxyURL)
}
conn.config = config
conn.url = urlMaker
conn.client = &http.Client{Transport: transport}
return nil
}
// Do sends request and returns the response
func (conn Conn) Do(method, bucketName, objectName string, params map[string]interface{}, headers map[string]string,
data io.Reader, initCRC uint64, listener ProgressListener) (*Response, error) {
urlParams := conn.getURLParams(params)
subResource := conn.getSubResource(params)
uri := conn.url.getURL(bucketName, objectName, urlParams)
resource := conn.url.getResource(bucketName, objectName, subResource)
return conn.doRequest(method, uri, resource, headers, data, initCRC, listener)
}
// DoURL sends the request with signed URL and returns the response result.
func (conn Conn) DoURL(method HTTPMethod, signedURL string, headers map[string]string,
data io.Reader, initCRC uint64, listener ProgressListener) (*Response, error) {
// Get URI from signedURL
uri, err := url.ParseRequestURI(signedURL)
if err != nil {
return nil, err
}
m := strings.ToUpper(string(method))
req := &http.Request{
Method: m,
URL: uri,
Proto: "HTTP/1.1",
ProtoMajor: 1,
ProtoMinor: 1,
Header: make(http.Header),
Host: uri.Host,
}
tracker := &readerTracker{completedBytes: 0}
fd, crc := conn.handleBody(req, data, initCRC, listener, tracker)
if fd != nil {
defer func() {
fd.Close()
os.Remove(fd.Name())
}()
}
if conn.config.IsAuthProxy {
auth := conn.config.ProxyUser + ":" + conn.config.ProxyPassword
basic := "Basic " + base64.StdEncoding.EncodeToString([]byte(auth))
req.Header.Set("Proxy-Authorization", basic)
}
req.Header.Set(HTTPHeaderHost, conn.config.Endpoint)
req.Header.Set(HTTPHeaderUserAgent, conn.config.UserAgent)
if headers != nil {
for k, v := range headers {
req.Header.Set(k, v)
}
}
// Transfer started
event := newProgressEvent(TransferStartedEvent, 0, req.ContentLength)
publishProgress(listener, event)
resp, err := conn.client.Do(req)
if err != nil {
// Transfer failed
event = newProgressEvent(TransferFailedEvent, tracker.completedBytes, req.ContentLength)
publishProgress(listener, event)
return nil, err
}
// Transfer completed
event = newProgressEvent(TransferCompletedEvent, tracker.completedBytes, req.ContentLength)
publishProgress(listener, event)
return conn.handleResponse(resp, crc)
}
func (conn Conn) getURLParams(params map[string]interface{}) string {
// Sort
keys := make([]string, 0, len(params))
for k := range params {
keys = append(keys, k)
}
sort.Strings(keys)
// Serialize
var buf bytes.Buffer
for _, k := range keys {
if buf.Len() > 0 {
buf.WriteByte('&')
}
buf.WriteString(url.QueryEscape(k))
if params[k] != nil {
buf.WriteString("=" + url.QueryEscape(params[k].(string)))
}
}
return buf.String()
}
func (conn Conn) getSubResource(params map[string]interface{}) string {
// Sort
keys := make([]string, 0, len(params))
for k := range params {
if conn.isParamSign(k) {
keys = append(keys, k)
}
}
sort.Strings(keys)
// Serialize
var buf bytes.Buffer
for _, k := range keys {
if buf.Len() > 0 {
buf.WriteByte('&')
}
buf.WriteString(k)
if params[k] != nil {
buf.WriteString("=" + params[k].(string))
}
}
return buf.String()
}
func (conn Conn) isParamSign(paramKey string) bool {
for _, k := range signKeyList {
if paramKey == k {
return true
}
}
return false
}
func (conn Conn) doRequest(method string, uri *url.URL, canonicalizedResource string, headers map[string]string,
data io.Reader, initCRC uint64, listener ProgressListener) (*Response, error) {
method = strings.ToUpper(method)
req := &http.Request{
Method: method,
URL: uri,
Proto: "HTTP/1.1",
ProtoMajor: 1,
ProtoMinor: 1,
Header: make(http.Header),
Host: uri.Host,
}
tracker := &readerTracker{completedBytes: 0}
fd, crc := conn.handleBody(req, data, initCRC, listener, tracker)
if fd != nil {
defer func() {
fd.Close()
os.Remove(fd.Name())
}()
}
if conn.config.IsAuthProxy {
auth := conn.config.ProxyUser + ":" + conn.config.ProxyPassword
basic := "Basic " + base64.StdEncoding.EncodeToString([]byte(auth))
req.Header.Set("Proxy-Authorization", basic)
}
date := time.Now().UTC().Format(http.TimeFormat)
req.Header.Set(HTTPHeaderDate, date)
req.Header.Set(HTTPHeaderHost, conn.config.Endpoint)
req.Header.Set(HTTPHeaderUserAgent, conn.config.UserAgent)
if conn.config.SecurityToken != "" {
req.Header.Set(HTTPHeaderOssSecurityToken, conn.config.SecurityToken)
}
if headers != nil {
for k, v := range headers {
req.Header.Set(k, v)
}
}
conn.signHeader(req, canonicalizedResource)
// Transfer started
event := newProgressEvent(TransferStartedEvent, 0, req.ContentLength)
publishProgress(listener, event)
resp, err := conn.client.Do(req)
if err != nil {
// Transfer failed
event = newProgressEvent(TransferFailedEvent, tracker.completedBytes, req.ContentLength)
publishProgress(listener, event)
return nil, err
}
// Transfer completed
event = newProgressEvent(TransferCompletedEvent, tracker.completedBytes, req.ContentLength)
publishProgress(listener, event)
return conn.handleResponse(resp, crc)
}
func (conn Conn) signURL(method HTTPMethod, bucketName, objectName string, expiration int64, params map[string]interface{}, headers map[string]string) string {
if conn.config.SecurityToken != "" {
params[HTTPParamSecurityToken] = conn.config.SecurityToken
}
subResource := conn.getSubResource(params)
canonicalizedResource := conn.url.getResource(bucketName, objectName, subResource)
m := strings.ToUpper(string(method))
req := &http.Request{
Method: m,
Header: make(http.Header),
}
if conn.config.IsAuthProxy {
auth := conn.config.ProxyUser + ":" + conn.config.ProxyPassword
basic := "Basic " + base64.StdEncoding.EncodeToString([]byte(auth))
req.Header.Set("Proxy-Authorization", basic)
}
req.Header.Set(HTTPHeaderDate, strconv.FormatInt(expiration, 10))
req.Header.Set(HTTPHeaderHost, conn.config.Endpoint)
req.Header.Set(HTTPHeaderUserAgent, conn.config.UserAgent)
if headers != nil {
for k, v := range headers {
req.Header.Set(k, v)
}
}
signedStr := conn.getSignedStr(req, canonicalizedResource)
params[HTTPParamExpires] = strconv.FormatInt(expiration, 10)
params[HTTPParamAccessKeyID] = conn.config.AccessKeyID
params[HTTPParamSignature] = signedStr
urlParams := conn.getURLParams(params)
return conn.url.getSignURL(bucketName, objectName, urlParams)
}
// handleBody handles request body
func (conn Conn) handleBody(req *http.Request, body io.Reader, initCRC uint64,
listener ProgressListener, tracker *readerTracker) (*os.File, hash.Hash64) {
var file *os.File
var crc hash.Hash64
reader := body
// Length
switch v := body.(type) {
case *bytes.Buffer:
req.ContentLength = int64(v.Len())
case *bytes.Reader:
req.ContentLength = int64(v.Len())
case *strings.Reader:
req.ContentLength = int64(v.Len())
case *os.File:
req.ContentLength = tryGetFileSize(v)
case *io.LimitedReader:
req.ContentLength = int64(v.N)
}
req.Header.Set(HTTPHeaderContentLength, strconv.FormatInt(req.ContentLength, 10))
// MD5
if body != nil && conn.config.IsEnableMD5 && req.Header.Get(HTTPHeaderContentMD5) == "" {
md5 := ""
reader, md5, file, _ = calcMD5(body, req.ContentLength, conn.config.MD5Threshold)
req.Header.Set(HTTPHeaderContentMD5, md5)
}
// CRC
if reader != nil && conn.config.IsEnableCRC {
crc = NewCRC(crcTable(), initCRC)
reader = TeeReader(reader, crc, req.ContentLength, listener, tracker)
}
// HTTP body
rc, ok := reader.(io.ReadCloser)
if !ok && reader != nil {
rc = ioutil.NopCloser(reader)
}
req.Body = rc
return file, crc
}
func tryGetFileSize(f *os.File) int64 {
fInfo, _ := f.Stat()
return fInfo.Size()
}
// handleResponse handles response
func (conn Conn) handleResponse(resp *http.Response, crc hash.Hash64) (*Response, error) {
var cliCRC uint64
var srvCRC uint64
statusCode := resp.StatusCode
if statusCode >= 400 && statusCode <= 505 {
// 4xx and 5xx indicate that the operation has error occurred
var respBody []byte
respBody, err := readResponseBody(resp)
if err != nil {
return nil, err
}
if len(respBody) == 0 {
// No error in response body
err = fmt.Errorf("oss: service returned without a response body (%s)", resp.Status)
} else {
// Response contains storage service error object, unmarshal
srvErr, errIn := serviceErrFromXML(respBody, resp.StatusCode,
resp.Header.Get(HTTPHeaderOssRequestID))
if err != nil { // error unmarshaling the error response
err = errIn
}
err = srvErr
}
return &Response{
StatusCode: resp.StatusCode,
Headers: resp.Header,
Body: ioutil.NopCloser(bytes.NewReader(respBody)), // restore the body
}, err
} else if statusCode >= 300 && statusCode <= 307 {
// OSS use 3xx, but response has no body
err := fmt.Errorf("oss: service returned %d,%s", resp.StatusCode, resp.Status)
return &Response{
StatusCode: resp.StatusCode,
Headers: resp.Header,
Body: resp.Body,
}, err
}
if conn.config.IsEnableCRC && crc != nil {
cliCRC = crc.Sum64()
}
srvCRC, _ = strconv.ParseUint(resp.Header.Get(HTTPHeaderOssCRC64), 10, 64)
// 2xx, successful
return &Response{
StatusCode: resp.StatusCode,
Headers: resp.Header,
Body: resp.Body,
ClientCRC: cliCRC,
ServerCRC: srvCRC,
}, nil
}
func calcMD5(body io.Reader, contentLen, md5Threshold int64) (reader io.Reader, b64 string, tempFile *os.File, err error) {
if contentLen == 0 || contentLen > md5Threshold {
// Huge body, use temporary file
tempFile, err = ioutil.TempFile(os.TempDir(), TempFilePrefix)
if tempFile != nil {
io.Copy(tempFile, body)
tempFile.Seek(0, os.SEEK_SET)
md5 := md5.New()
io.Copy(md5, tempFile)
sum := md5.Sum(nil)
b64 = base64.StdEncoding.EncodeToString(sum[:])
tempFile.Seek(0, os.SEEK_SET)
reader = tempFile
}
} else {
// Small body, use memory
buf, _ := ioutil.ReadAll(body)
sum := md5.Sum(buf)
b64 = base64.StdEncoding.EncodeToString(sum[:])
reader = bytes.NewReader(buf)
}
return
}
func readResponseBody(resp *http.Response) ([]byte, error) {
defer resp.Body.Close()
out, err := ioutil.ReadAll(resp.Body)
if err == io.EOF {
err = nil
}
return out, err
}
func serviceErrFromXML(body []byte, statusCode int, requestID string) (ServiceError, error) {
var storageErr ServiceError
if err := xml.Unmarshal(body, &storageErr); err != nil {
return storageErr, err
}
storageErr.StatusCode = statusCode
storageErr.RequestID = requestID
storageErr.RawMessage = string(body)
return storageErr, nil
}
func xmlUnmarshal(body io.Reader, v interface{}) error {
data, err := ioutil.ReadAll(body)
if err != nil {
return err
}
return xml.Unmarshal(data, v)
}
// timeoutConn handles HTTP timeout
type timeoutConn struct {
conn net.Conn
timeout time.Duration
longTimeout time.Duration
}
func newTimeoutConn(conn net.Conn, timeout time.Duration, longTimeout time.Duration) *timeoutConn {
conn.SetReadDeadline(time.Now().Add(longTimeout))
return &timeoutConn{
conn: conn,
timeout: timeout,
longTimeout: longTimeout,
}
}
func (c *timeoutConn) Read(b []byte) (n int, err error) {
c.SetReadDeadline(time.Now().Add(c.timeout))
n, err = c.conn.Read(b)
c.SetReadDeadline(time.Now().Add(c.longTimeout))
return n, err
}
func (c *timeoutConn) Write(b []byte) (n int, err error) {
c.SetWriteDeadline(time.Now().Add(c.timeout))
n, err = c.conn.Write(b)
c.SetReadDeadline(time.Now().Add(c.longTimeout))
return n, err
}
func (c *timeoutConn) Close() error {
return c.conn.Close()
}
func (c *timeoutConn) LocalAddr() net.Addr {
return c.conn.LocalAddr()
}
func (c *timeoutConn) RemoteAddr() net.Addr {
return c.conn.RemoteAddr()
}
func (c *timeoutConn) SetDeadline(t time.Time) error {
return c.conn.SetDeadline(t)
}
func (c *timeoutConn) SetReadDeadline(t time.Time) error {
return c.conn.SetReadDeadline(t)
}
func (c *timeoutConn) SetWriteDeadline(t time.Time) error {
return c.conn.SetWriteDeadline(t)
}
// UrlMaker builds URL and resource
const (
urlTypeCname = 1
urlTypeIP = 2
urlTypeAliyun = 3
)
type urlMaker struct {
Scheme string // HTTP or HTTPS
NetLoc string // Host or IP
Type int // 1 CNAME, 2 IP, 3 ALIYUN
IsProxy bool // Proxy
}
// Init parses endpoint
func (um *urlMaker) Init(endpoint string, isCname bool, isProxy bool) {
if strings.HasPrefix(endpoint, "http://") {
um.Scheme = "http"
um.NetLoc = endpoint[len("http://"):]
} else if strings.HasPrefix(endpoint, "https://") {
um.Scheme = "https"
um.NetLoc = endpoint[len("https://"):]
} else {
um.Scheme = "http"
um.NetLoc = endpoint
}
host, _, err := net.SplitHostPort(um.NetLoc)
if err != nil {
host = um.NetLoc
}
ip := net.ParseIP(host)
if ip != nil {
um.Type = urlTypeIP
} else if isCname {
um.Type = urlTypeCname
} else {
um.Type = urlTypeAliyun
}
um.IsProxy = isProxy
}
// getURL gets URL
func (um urlMaker) getURL(bucket, object, params string) *url.URL {
host, path := um.buildURL(bucket, object)
addr := ""
if params == "" {
addr = fmt.Sprintf("%s://%s%s", um.Scheme, host, path)
} else {
addr = fmt.Sprintf("%s://%s%s?%s", um.Scheme, host, path, params)
}
uri, _ := url.ParseRequestURI(addr)
return uri
}
// getSignURL gets sign URL
func (um urlMaker) getSignURL(bucket, object, params string) string {
host, path := um.buildURL(bucket, object)
return fmt.Sprintf("%s://%s%s?%s", um.Scheme, host, path, params)
}
// buildURL builds URL
func (um urlMaker) buildURL(bucket, object string) (string, string) {
var host = ""
var path = ""
object = url.QueryEscape(object)
object = strings.Replace(object, "+", "%20", -1)
if um.Type == urlTypeCname {
host = um.NetLoc
path = "/" + object
} else if um.Type == urlTypeIP {
if bucket == "" {
host = um.NetLoc
path = "/"
} else {
host = um.NetLoc
path = fmt.Sprintf("/%s/%s", bucket, object)
}
} else {
if bucket == "" {
host = um.NetLoc
path = "/"
} else {
host = bucket + "." + um.NetLoc
path = "/" + object
}
}
return host, path
}
// getResource gets canonicalized resource
func (um urlMaker) getResource(bucketName, objectName, subResource string) string {
if subResource != "" {
subResource = "?" + subResource
}
if bucketName == "" {
return fmt.Sprintf("/%s%s", bucketName, subResource)
}
return fmt.Sprintf("/%s/%s%s", bucketName, objectName, subResource)
}

132
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/const.go generated vendored Normal file
View File

@ -0,0 +1,132 @@
package oss
import "os"
// ACLType bucket/object ACL
type ACLType string
const (
// ACLPrivate definition : private read and write
ACLPrivate ACLType = "private"
// ACLPublicRead definition : public read and private write
ACLPublicRead ACLType = "public-read"
// ACLPublicReadWrite definition : public read and public write
ACLPublicReadWrite ACLType = "public-read-write"
// ACLDefault Object. It's only applicable for object.
ACLDefault ACLType = "default"
)
// MetadataDirectiveType specifying whether use the metadata of source object when copying object.
type MetadataDirectiveType string
const (
// MetaCopy the target object's metadata is copied from the source one
MetaCopy MetadataDirectiveType = "COPY"
// MetaReplace the target object's metadata is created as part of the copy request (not same as the source one)
MetaReplace MetadataDirectiveType = "REPLACE"
)
// StorageClassType bucket storage type
type StorageClassType string
const (
// StorageStandard standard
StorageStandard StorageClassType = "Standard"
// StorageIA infrequent access
StorageIA StorageClassType = "IA"
// StorageArchive archive
StorageArchive StorageClassType = "Archive"
)
// HTTPMethod HTTP request method
type HTTPMethod string
const (
// HTTPGet HTTP GET
HTTPGet HTTPMethod = "GET"
// HTTPPut HTTP PUT
HTTPPut HTTPMethod = "PUT"
// HTTPHead HTTP HEAD
HTTPHead HTTPMethod = "HEAD"
// HTTPPost HTTP POST
HTTPPost HTTPMethod = "POST"
// HTTPDelete HTTP DELETE
HTTPDelete HTTPMethod = "DELETE"
)
// HTTP headers
const (
HTTPHeaderAcceptEncoding string = "Accept-Encoding"
HTTPHeaderAuthorization = "Authorization"
HTTPHeaderCacheControl = "Cache-Control"
HTTPHeaderContentDisposition = "Content-Disposition"
HTTPHeaderContentEncoding = "Content-Encoding"
HTTPHeaderContentLength = "Content-Length"
HTTPHeaderContentMD5 = "Content-MD5"
HTTPHeaderContentType = "Content-Type"
HTTPHeaderContentLanguage = "Content-Language"
HTTPHeaderDate = "Date"
HTTPHeaderEtag = "ETag"
HTTPHeaderExpires = "Expires"
HTTPHeaderHost = "Host"
HTTPHeaderLastModified = "Last-Modified"
HTTPHeaderRange = "Range"
HTTPHeaderLocation = "Location"
HTTPHeaderOrigin = "Origin"
HTTPHeaderServer = "Server"
HTTPHeaderUserAgent = "User-Agent"
HTTPHeaderIfModifiedSince = "If-Modified-Since"
HTTPHeaderIfUnmodifiedSince = "If-Unmodified-Since"
HTTPHeaderIfMatch = "If-Match"
HTTPHeaderIfNoneMatch = "If-None-Match"
HTTPHeaderOssACL = "X-Oss-Acl"
HTTPHeaderOssMetaPrefix = "X-Oss-Meta-"
HTTPHeaderOssObjectACL = "X-Oss-Object-Acl"
HTTPHeaderOssSecurityToken = "X-Oss-Security-Token"
HTTPHeaderOssServerSideEncryption = "X-Oss-Server-Side-Encryption"
HTTPHeaderOssCopySource = "X-Oss-Copy-Source"
HTTPHeaderOssCopySourceRange = "X-Oss-Copy-Source-Range"
HTTPHeaderOssCopySourceIfMatch = "X-Oss-Copy-Source-If-Match"
HTTPHeaderOssCopySourceIfNoneMatch = "X-Oss-Copy-Source-If-None-Match"
HTTPHeaderOssCopySourceIfModifiedSince = "X-Oss-Copy-Source-If-Modified-Since"
HTTPHeaderOssCopySourceIfUnmodifiedSince = "X-Oss-Copy-Source-If-Unmodified-Since"
HTTPHeaderOssMetadataDirective = "X-Oss-Metadata-Directive"
HTTPHeaderOssNextAppendPosition = "X-Oss-Next-Append-Position"
HTTPHeaderOssRequestID = "X-Oss-Request-Id"
HTTPHeaderOssCRC64 = "X-Oss-Hash-Crc64ecma"
HTTPHeaderOssSymlinkTarget = "X-Oss-Symlink-Target"
)
// HTTP Param
const (
HTTPParamExpires = "Expires"
HTTPParamAccessKeyID = "OSSAccessKeyId"
HTTPParamSignature = "Signature"
HTTPParamSecurityToken = "security-token"
)
// Other constants
const (
MaxPartSize = 5 * 1024 * 1024 * 1024 // Max part size, 5GB
MinPartSize = 100 * 1024 // Min part size, 100KB
FilePermMode = os.FileMode(0664) // Default file permission
TempFilePrefix = "oss-go-temp-" // Temp file prefix
TempFileSuffix = ".temp" // Temp file suffix
CheckpointFileSuffix = ".cp" // Checkpoint file suffix
Version = "1.9.0" // Go SDK version
)

123
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/crc.go generated vendored Normal file
View File

@ -0,0 +1,123 @@
package oss
import (
"hash"
"hash/crc64"
)
// digest represents the partial evaluation of a checksum.
type digest struct {
crc uint64
tab *crc64.Table
}
// NewCRC creates a new hash.Hash64 computing the CRC64 checksum
// using the polynomial represented by the Table.
func NewCRC(tab *crc64.Table, init uint64) hash.Hash64 { return &digest{init, tab} }
// Size returns the number of bytes sum will return.
func (d *digest) Size() int { return crc64.Size }
// BlockSize returns the hash's underlying block size.
// The Write method must be able to accept any amount
// of data, but it may operate more efficiently if all writes
// are a multiple of the block size.
func (d *digest) BlockSize() int { return 1 }
// Reset resets the hash to its initial state.
func (d *digest) Reset() { d.crc = 0 }
// Write (via the embedded io.Writer interface) adds more data to the running hash.
// It never returns an error.
func (d *digest) Write(p []byte) (n int, err error) {
d.crc = crc64.Update(d.crc, d.tab, p)
return len(p), nil
}
// Sum64 returns CRC64 value.
func (d *digest) Sum64() uint64 { return d.crc }
// Sum returns hash value.
func (d *digest) Sum(in []byte) []byte {
s := d.Sum64()
return append(in, byte(s>>56), byte(s>>48), byte(s>>40), byte(s>>32), byte(s>>24), byte(s>>16), byte(s>>8), byte(s))
}
// gf2Dim dimension of GF(2) vectors (length of CRC)
const gf2Dim int = 64
func gf2MatrixTimes(mat []uint64, vec uint64) uint64 {
var sum uint64
for i := 0; vec != 0; i++ {
if vec&1 != 0 {
sum ^= mat[i]
}
vec >>= 1
}
return sum
}
func gf2MatrixSquare(square []uint64, mat []uint64) {
for n := 0; n < gf2Dim; n++ {
square[n] = gf2MatrixTimes(mat, mat[n])
}
}
// CRC64Combine combines CRC64
func CRC64Combine(crc1 uint64, crc2 uint64, len2 uint64) uint64 {
var even [gf2Dim]uint64 // Even-power-of-two zeros operator
var odd [gf2Dim]uint64 // Odd-power-of-two zeros operator
// Degenerate case
if len2 == 0 {
return crc1
}
// Put operator for one zero bit in odd
odd[0] = crc64.ECMA // CRC64 polynomial
var row uint64 = 1
for n := 1; n < gf2Dim; n++ {
odd[n] = row
row <<= 1
}
// Put operator for two zero bits in even
gf2MatrixSquare(even[:], odd[:])
// Put operator for four zero bits in odd
gf2MatrixSquare(odd[:], even[:])
// Apply len2 zeros to crc1, first square will put the operator for one zero byte, eight zero bits, in even
for {
// Apply zeros operator for this bit of len2
gf2MatrixSquare(even[:], odd[:])
if len2&1 != 0 {
crc1 = gf2MatrixTimes(even[:], crc1)
}
len2 >>= 1
// If no more bits set, then done
if len2 == 0 {
break
}
// Another iteration of the loop with odd and even swapped
gf2MatrixSquare(odd[:], even[:])
if len2&1 != 0 {
crc1 = gf2MatrixTimes(odd[:], crc1)
}
len2 >>= 1
// If no more bits set, then done
if len2 == 0 {
break
}
}
// Return combined CRC
crc1 ^= crc2
return crc1
}

View File

@ -0,0 +1,550 @@
package oss
import (
"crypto/md5"
"encoding/base64"
"encoding/json"
"errors"
"hash"
"hash/crc64"
"io"
"io/ioutil"
"os"
"strconv"
)
// DownloadFile downloads files with multipart download.
//
// objectKey the object key.
// filePath the local file to download from objectKey in OSS.
// partSize the part size in bytes.
// options object's constraints, check out GetObject for the reference.
//
// error it's nil when the call succeeds, otherwise it's an error object.
//
func (bucket Bucket) DownloadFile(objectKey, filePath string, partSize int64, options ...Option) error {
if partSize < 1 {
return errors.New("oss: part size smaller than 1")
}
cpConf, err := getCpConfig(options, filePath)
if err != nil {
return err
}
uRange, err := getRangeConfig(options)
if err != nil {
return err
}
routines := getRoutines(options)
if cpConf.IsEnable {
return bucket.downloadFileWithCp(objectKey, filePath, partSize, options, cpConf.FilePath, routines, uRange)
}
return bucket.downloadFile(objectKey, filePath, partSize, options, routines, uRange)
}
// getRangeConfig gets the download range from the options.
func getRangeConfig(options []Option) (*unpackedRange, error) {
rangeOpt, err := findOption(options, HTTPHeaderRange, nil)
if err != nil || rangeOpt == nil {
return nil, err
}
return parseRange(rangeOpt.(string))
}
// ----- concurrent download without checkpoint -----
// downloadWorkerArg is download worker's parameters
type downloadWorkerArg struct {
bucket *Bucket
key string
filePath string
options []Option
hook downloadPartHook
enableCRC bool
}
// downloadPartHook is hook for test
type downloadPartHook func(part downloadPart) error
var downloadPartHooker downloadPartHook = defaultDownloadPartHook
func defaultDownloadPartHook(part downloadPart) error {
return nil
}
// defaultDownloadProgressListener defines default ProgressListener, shields the ProgressListener in options of GetObject.
type defaultDownloadProgressListener struct {
}
// ProgressChanged no-ops
func (listener *defaultDownloadProgressListener) ProgressChanged(event *ProgressEvent) {
}
// downloadWorker
func downloadWorker(id int, arg downloadWorkerArg, jobs <-chan downloadPart, results chan<- downloadPart, failed chan<- error, die <-chan bool) {
for part := range jobs {
if err := arg.hook(part); err != nil {
failed <- err
break
}
// Resolve options
r := Range(part.Start, part.End)
p := Progress(&defaultDownloadProgressListener{})
opts := make([]Option, len(arg.options)+2)
// Append orderly, can not be reversed!
opts = append(opts, arg.options...)
opts = append(opts, r, p)
rd, err := arg.bucket.GetObject(arg.key, opts...)
if err != nil {
failed <- err
break
}
defer rd.Close()
var crcCalc hash.Hash64
if arg.enableCRC {
crcCalc = crc64.New(crcTable())
contentLen := part.End - part.Start + 1
rd = ioutil.NopCloser(TeeReader(rd, crcCalc, contentLen, nil, nil))
}
defer rd.Close()
select {
case <-die:
return
default:
}
fd, err := os.OpenFile(arg.filePath, os.O_WRONLY, FilePermMode)
if err != nil {
failed <- err
break
}
_, err = fd.Seek(part.Start-part.Offset, os.SEEK_SET)
if err != nil {
fd.Close()
failed <- err
break
}
_, err = io.Copy(fd, rd)
if err != nil {
fd.Close()
failed <- err
break
}
if arg.enableCRC {
part.CRC64 = crcCalc.Sum64()
}
fd.Close()
results <- part
}
}
// downloadScheduler
func downloadScheduler(jobs chan downloadPart, parts []downloadPart) {
for _, part := range parts {
jobs <- part
}
close(jobs)
}
// downloadPart defines download part
type downloadPart struct {
Index int // Part number, starting from 0
Start int64 // Start index
End int64 // End index
Offset int64 // Offset
CRC64 uint64 // CRC check value of part
}
// getDownloadParts gets download parts
func getDownloadParts(bucket *Bucket, objectKey string, partSize int64, uRange *unpackedRange) ([]downloadPart, bool, uint64, error) {
meta, err := bucket.GetObjectDetailedMeta(objectKey)
if err != nil {
return nil, false, 0, err
}
parts := []downloadPart{}
objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0)
if err != nil {
return nil, false, 0, err
}
enableCRC := false
crcVal := (uint64)(0)
if bucket.getConfig().IsEnableCRC && meta.Get(HTTPHeaderOssCRC64) != "" {
if uRange == nil || (!uRange.hasStart && !uRange.hasEnd) {
enableCRC = true
crcVal, _ = strconv.ParseUint(meta.Get(HTTPHeaderOssCRC64), 10, 0)
}
}
part := downloadPart{}
i := 0
start, end := adjustRange(uRange, objectSize)
for offset := start; offset < end; offset += partSize {
part.Index = i
part.Start = offset
part.End = GetPartEnd(offset, end, partSize)
part.Offset = start
part.CRC64 = 0
parts = append(parts, part)
i++
}
return parts, enableCRC, crcVal, nil
}
// getObjectBytes gets object bytes length
func getObjectBytes(parts []downloadPart) int64 {
var ob int64
for _, part := range parts {
ob += (part.End - part.Start + 1)
}
return ob
}
// combineCRCInParts caculates the total CRC of continuous parts
func combineCRCInParts(dps []downloadPart) uint64 {
if dps == nil || len(dps) == 0 {
return 0
}
crc := dps[0].CRC64
for i := 1; i < len(dps); i++ {
crc = CRC64Combine(crc, dps[i].CRC64, (uint64)(dps[i].End-dps[i].Start+1))
}
return crc
}
// downloadFile downloads file concurrently without checkpoint.
func (bucket Bucket) downloadFile(objectKey, filePath string, partSize int64, options []Option, routines int, uRange *unpackedRange) error {
tempFilePath := filePath + TempFileSuffix
listener := getProgressListener(options)
// If the file does not exist, create one. If exists, the download will overwrite it.
fd, err := os.OpenFile(tempFilePath, os.O_WRONLY|os.O_CREATE, FilePermMode)
if err != nil {
return err
}
fd.Close()
// Get the parts of the file
parts, enableCRC, expectedCRC, err := getDownloadParts(&bucket, objectKey, partSize, uRange)
if err != nil {
return err
}
jobs := make(chan downloadPart, len(parts))
results := make(chan downloadPart, len(parts))
failed := make(chan error)
die := make(chan bool)
var completedBytes int64
totalBytes := getObjectBytes(parts)
event := newProgressEvent(TransferStartedEvent, 0, totalBytes)
publishProgress(listener, event)
// Start the download workers
arg := downloadWorkerArg{&bucket, objectKey, tempFilePath, options, downloadPartHooker, enableCRC}
for w := 1; w <= routines; w++ {
go downloadWorker(w, arg, jobs, results, failed, die)
}
// Download parts concurrently
go downloadScheduler(jobs, parts)
// Waiting for parts download finished
completed := 0
for completed < len(parts) {
select {
case part := <-results:
completed++
completedBytes += (part.End - part.Start + 1)
parts[part.Index].CRC64 = part.CRC64
event = newProgressEvent(TransferDataEvent, completedBytes, totalBytes)
publishProgress(listener, event)
case err := <-failed:
close(die)
event = newProgressEvent(TransferFailedEvent, completedBytes, totalBytes)
publishProgress(listener, event)
return err
}
if completed >= len(parts) {
break
}
}
event = newProgressEvent(TransferCompletedEvent, completedBytes, totalBytes)
publishProgress(listener, event)
if enableCRC {
actualCRC := combineCRCInParts(parts)
err = checkDownloadCRC(actualCRC, expectedCRC)
if err != nil {
return err
}
}
return os.Rename(tempFilePath, filePath)
}
// ----- Concurrent download with chcekpoint -----
const downloadCpMagic = "92611BED-89E2-46B6-89E5-72F273D4B0A3"
type downloadCheckpoint struct {
Magic string // Magic
MD5 string // Checkpoint content MD5
FilePath string // Local file
Object string // Key
ObjStat objectStat // Object status
Parts []downloadPart // All download parts
PartStat []bool // Parts' download status
Start int64 // Start point of the file
End int64 // End point of the file
enableCRC bool // Whether has CRC check
CRC uint64 // CRC check value
}
type objectStat struct {
Size int64 // Object size
LastModified string // Last modified time
Etag string // Etag
}
// isValid flags of checkpoint data is valid. It returns true when the data is valid and the checkpoint is valid and the object is not updated.
func (cp downloadCheckpoint) isValid(bucket *Bucket, objectKey string, uRange *unpackedRange) (bool, error) {
// Compare the CP's Magic and the MD5
cpb := cp
cpb.MD5 = ""
js, _ := json.Marshal(cpb)
sum := md5.Sum(js)
b64 := base64.StdEncoding.EncodeToString(sum[:])
if cp.Magic != downloadCpMagic || b64 != cp.MD5 {
return false, nil
}
// Ensure the object is not updated.
meta, err := bucket.GetObjectDetailedMeta(objectKey)
if err != nil {
return false, err
}
objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0)
if err != nil {
return false, err
}
// Compare the object size, last modified time and etag
if cp.ObjStat.Size != objectSize ||
cp.ObjStat.LastModified != meta.Get(HTTPHeaderLastModified) ||
cp.ObjStat.Etag != meta.Get(HTTPHeaderEtag) {
return false, nil
}
// Check the download range
if uRange != nil {
start, end := adjustRange(uRange, objectSize)
if start != cp.Start || end != cp.End {
return false, nil
}
}
return true, nil
}
// load checkpoint from local file
func (cp *downloadCheckpoint) load(filePath string) error {
contents, err := ioutil.ReadFile(filePath)
if err != nil {
return err
}
err = json.Unmarshal(contents, cp)
return err
}
// dump funciton dumps to file
func (cp *downloadCheckpoint) dump(filePath string) error {
bcp := *cp
// Calculate MD5
bcp.MD5 = ""
js, err := json.Marshal(bcp)
if err != nil {
return err
}
sum := md5.Sum(js)
b64 := base64.StdEncoding.EncodeToString(sum[:])
bcp.MD5 = b64
// Serialize
js, err = json.Marshal(bcp)
if err != nil {
return err
}
// Dump
return ioutil.WriteFile(filePath, js, FilePermMode)
}
// todoParts gets unfinished parts
func (cp downloadCheckpoint) todoParts() []downloadPart {
dps := []downloadPart{}
for i, ps := range cp.PartStat {
if !ps {
dps = append(dps, cp.Parts[i])
}
}
return dps
}
// getCompletedBytes gets completed size
func (cp downloadCheckpoint) getCompletedBytes() int64 {
var completedBytes int64
for i, part := range cp.Parts {
if cp.PartStat[i] {
completedBytes += (part.End - part.Start + 1)
}
}
return completedBytes
}
// prepare initiates download tasks
func (cp *downloadCheckpoint) prepare(bucket *Bucket, objectKey, filePath string, partSize int64, uRange *unpackedRange) error {
// CP
cp.Magic = downloadCpMagic
cp.FilePath = filePath
cp.Object = objectKey
// Object
meta, err := bucket.GetObjectDetailedMeta(objectKey)
if err != nil {
return err
}
objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0)
if err != nil {
return err
}
cp.ObjStat.Size = objectSize
cp.ObjStat.LastModified = meta.Get(HTTPHeaderLastModified)
cp.ObjStat.Etag = meta.Get(HTTPHeaderEtag)
// Parts
cp.Parts, cp.enableCRC, cp.CRC, err = getDownloadParts(bucket, objectKey, partSize, uRange)
if err != nil {
return err
}
cp.PartStat = make([]bool, len(cp.Parts))
for i := range cp.PartStat {
cp.PartStat[i] = false
}
return nil
}
func (cp *downloadCheckpoint) complete(cpFilePath, downFilepath string) error {
os.Remove(cpFilePath)
return os.Rename(downFilepath, cp.FilePath)
}
// downloadFileWithCp downloads files with checkpoint.
func (bucket Bucket) downloadFileWithCp(objectKey, filePath string, partSize int64, options []Option, cpFilePath string, routines int, uRange *unpackedRange) error {
tempFilePath := filePath + TempFileSuffix
listener := getProgressListener(options)
// Load checkpoint data.
dcp := downloadCheckpoint{}
err := dcp.load(cpFilePath)
if err != nil {
os.Remove(cpFilePath)
}
// Load error or data invalid. Re-initialize the download.
valid, err := dcp.isValid(&bucket, objectKey, uRange)
if err != nil || !valid {
if err = dcp.prepare(&bucket, objectKey, filePath, partSize, uRange); err != nil {
return err
}
os.Remove(cpFilePath)
}
// Create the file if not exists. Otherwise the parts download will overwrite it.
fd, err := os.OpenFile(tempFilePath, os.O_WRONLY|os.O_CREATE, FilePermMode)
if err != nil {
return err
}
fd.Close()
// Unfinished parts
parts := dcp.todoParts()
jobs := make(chan downloadPart, len(parts))
results := make(chan downloadPart, len(parts))
failed := make(chan error)
die := make(chan bool)
completedBytes := dcp.getCompletedBytes()
event := newProgressEvent(TransferStartedEvent, completedBytes, dcp.ObjStat.Size)
publishProgress(listener, event)
// Start the download workers routine
arg := downloadWorkerArg{&bucket, objectKey, tempFilePath, options, downloadPartHooker, dcp.enableCRC}
for w := 1; w <= routines; w++ {
go downloadWorker(w, arg, jobs, results, failed, die)
}
// Concurrently downloads parts
go downloadScheduler(jobs, parts)
// Wait for the parts download finished
completed := 0
for completed < len(parts) {
select {
case part := <-results:
completed++
dcp.PartStat[part.Index] = true
dcp.Parts[part.Index].CRC64 = part.CRC64
dcp.dump(cpFilePath)
completedBytes += (part.End - part.Start + 1)
event = newProgressEvent(TransferDataEvent, completedBytes, dcp.ObjStat.Size)
publishProgress(listener, event)
case err := <-failed:
close(die)
event = newProgressEvent(TransferFailedEvent, completedBytes, dcp.ObjStat.Size)
publishProgress(listener, event)
return err
}
if completed >= len(parts) {
break
}
}
event = newProgressEvent(TransferCompletedEvent, completedBytes, dcp.ObjStat.Size)
publishProgress(listener, event)
if dcp.enableCRC {
actualCRC := combineCRCInParts(dcp.Parts)
err = checkDownloadCRC(actualCRC, dcp.CRC)
if err != nil {
return err
}
}
return dcp.complete(cpFilePath, tempFilePath)
}

View File

@ -0,0 +1,89 @@
package oss
import (
"encoding/xml"
"fmt"
"net/http"
"strings"
)
// ServiceError contains fields of the error response from Oss Service REST API.
type ServiceError struct {
XMLName xml.Name `xml:"Error"`
Code string `xml:"Code"` // The error code returned from OSS to the caller
Message string `xml:"Message"` // The detail error message from OSS
RequestID string `xml:"RequestId"` // The UUID used to uniquely identify the request
HostID string `xml:"HostId"` // The OSS server cluster's Id
RawMessage string // The raw messages from OSS
StatusCode int // HTTP status code
}
// Error implements interface error
func (e ServiceError) Error() string {
return fmt.Sprintf("oss: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=%s, RequestId=%s",
e.StatusCode, e.Code, e.Message, e.RequestID)
}
// UnexpectedStatusCodeError is returned when a storage service responds with neither an error
// nor with an HTTP status code indicating success.
type UnexpectedStatusCodeError struct {
allowed []int // The expected HTTP stats code returned from OSS
got int // The actual HTTP status code from OSS
}
// Error implements interface error
func (e UnexpectedStatusCodeError) Error() string {
s := func(i int) string { return fmt.Sprintf("%d %s", i, http.StatusText(i)) }
got := s(e.got)
expected := []string{}
for _, v := range e.allowed {
expected = append(expected, s(v))
}
return fmt.Sprintf("oss: status code from service response is %s; was expecting %s",
got, strings.Join(expected, " or "))
}
// Got is the actual status code returned by oss.
func (e UnexpectedStatusCodeError) Got() int {
return e.got
}
// checkRespCode returns UnexpectedStatusError if the given response code is not
// one of the allowed status codes; otherwise nil.
func checkRespCode(respCode int, allowed []int) error {
for _, v := range allowed {
if respCode == v {
return nil
}
}
return UnexpectedStatusCodeError{allowed, respCode}
}
// CRCCheckError is returned when crc check is inconsistent between client and server
type CRCCheckError struct {
clientCRC uint64 // Calculated CRC64 in client
serverCRC uint64 // Calculated CRC64 in server
operation string // Upload operations such as PutObject/AppendObject/UploadPart, etc
requestID string // The request id of this operation
}
// Error implements interface error
func (e CRCCheckError) Error() string {
return fmt.Sprintf("oss: the crc of %s is inconsistent, client %d but server %d; request id is %s",
e.operation, e.clientCRC, e.serverCRC, e.requestID)
}
func checkDownloadCRC(clientCRC, serverCRC uint64) error {
if clientCRC == serverCRC {
return nil
}
return CRCCheckError{clientCRC, serverCRC, "DownloadFile", ""}
}
func checkCRC(resp *Response, operation string) error {
if resp.Headers.Get(HTTPHeaderOssCRC64) == "" || resp.ClientCRC == resp.ServerCRC {
return nil
}
return CRCCheckError{resp.ClientCRC, resp.ServerCRC, operation, resp.Headers.Get(HTTPHeaderOssRequestID)}
}

245
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/mime.go generated vendored Normal file
View File

@ -0,0 +1,245 @@
package oss
import (
"mime"
"path"
"strings"
)
var extToMimeType = map[string]string{
".xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
".xltx": "application/vnd.openxmlformats-officedocument.spreadsheetml.template",
".potx": "application/vnd.openxmlformats-officedocument.presentationml.template",
".ppsx": "application/vnd.openxmlformats-officedocument.presentationml.slideshow",
".pptx": "application/vnd.openxmlformats-officedocument.presentationml.presentation",
".sldx": "application/vnd.openxmlformats-officedocument.presentationml.slide",
".docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
".dotx": "application/vnd.openxmlformats-officedocument.wordprocessingml.template",
".xlam": "application/vnd.ms-excel.addin.macroEnabled.12",
".xlsb": "application/vnd.ms-excel.sheet.binary.macroEnabled.12",
".apk": "application/vnd.android.package-archive",
".hqx": "application/mac-binhex40",
".cpt": "application/mac-compactpro",
".doc": "application/msword",
".ogg": "application/ogg",
".pdf": "application/pdf",
".rtf": "text/rtf",
".mif": "application/vnd.mif",
".xls": "application/vnd.ms-excel",
".ppt": "application/vnd.ms-powerpoint",
".odc": "application/vnd.oasis.opendocument.chart",
".odb": "application/vnd.oasis.opendocument.database",
".odf": "application/vnd.oasis.opendocument.formula",
".odg": "application/vnd.oasis.opendocument.graphics",
".otg": "application/vnd.oasis.opendocument.graphics-template",
".odi": "application/vnd.oasis.opendocument.image",
".odp": "application/vnd.oasis.opendocument.presentation",
".otp": "application/vnd.oasis.opendocument.presentation-template",
".ods": "application/vnd.oasis.opendocument.spreadsheet",
".ots": "application/vnd.oasis.opendocument.spreadsheet-template",
".odt": "application/vnd.oasis.opendocument.text",
".odm": "application/vnd.oasis.opendocument.text-master",
".ott": "application/vnd.oasis.opendocument.text-template",
".oth": "application/vnd.oasis.opendocument.text-web",
".sxw": "application/vnd.sun.xml.writer",
".stw": "application/vnd.sun.xml.writer.template",
".sxc": "application/vnd.sun.xml.calc",
".stc": "application/vnd.sun.xml.calc.template",
".sxd": "application/vnd.sun.xml.draw",
".std": "application/vnd.sun.xml.draw.template",
".sxi": "application/vnd.sun.xml.impress",
".sti": "application/vnd.sun.xml.impress.template",
".sxg": "application/vnd.sun.xml.writer.global",
".sxm": "application/vnd.sun.xml.math",
".sis": "application/vnd.symbian.install",
".wbxml": "application/vnd.wap.wbxml",
".wmlc": "application/vnd.wap.wmlc",
".wmlsc": "application/vnd.wap.wmlscriptc",
".bcpio": "application/x-bcpio",
".torrent": "application/x-bittorrent",
".bz2": "application/x-bzip2",
".vcd": "application/x-cdlink",
".pgn": "application/x-chess-pgn",
".cpio": "application/x-cpio",
".csh": "application/x-csh",
".dvi": "application/x-dvi",
".spl": "application/x-futuresplash",
".gtar": "application/x-gtar",
".hdf": "application/x-hdf",
".jar": "application/x-java-archive",
".jnlp": "application/x-java-jnlp-file",
".js": "application/x-javascript",
".ksp": "application/x-kspread",
".chrt": "application/x-kchart",
".kil": "application/x-killustrator",
".latex": "application/x-latex",
".rpm": "application/x-rpm",
".sh": "application/x-sh",
".shar": "application/x-shar",
".swf": "application/x-shockwave-flash",
".sit": "application/x-stuffit",
".sv4cpio": "application/x-sv4cpio",
".sv4crc": "application/x-sv4crc",
".tar": "application/x-tar",
".tcl": "application/x-tcl",
".tex": "application/x-tex",
".man": "application/x-troff-man",
".me": "application/x-troff-me",
".ms": "application/x-troff-ms",
".ustar": "application/x-ustar",
".src": "application/x-wais-source",
".zip": "application/zip",
".m3u": "audio/x-mpegurl",
".ra": "audio/x-pn-realaudio",
".wav": "audio/x-wav",
".wma": "audio/x-ms-wma",
".wax": "audio/x-ms-wax",
".pdb": "chemical/x-pdb",
".xyz": "chemical/x-xyz",
".bmp": "image/bmp",
".gif": "image/gif",
".ief": "image/ief",
".png": "image/png",
".wbmp": "image/vnd.wap.wbmp",
".ras": "image/x-cmu-raster",
".pnm": "image/x-portable-anymap",
".pbm": "image/x-portable-bitmap",
".pgm": "image/x-portable-graymap",
".ppm": "image/x-portable-pixmap",
".rgb": "image/x-rgb",
".xbm": "image/x-xbitmap",
".xpm": "image/x-xpixmap",
".xwd": "image/x-xwindowdump",
".css": "text/css",
".rtx": "text/richtext",
".tsv": "text/tab-separated-values",
".jad": "text/vnd.sun.j2me.app-descriptor",
".wml": "text/vnd.wap.wml",
".wmls": "text/vnd.wap.wmlscript",
".etx": "text/x-setext",
".mxu": "video/vnd.mpegurl",
".flv": "video/x-flv",
".wm": "video/x-ms-wm",
".wmv": "video/x-ms-wmv",
".wmx": "video/x-ms-wmx",
".wvx": "video/x-ms-wvx",
".avi": "video/x-msvideo",
".movie": "video/x-sgi-movie",
".ice": "x-conference/x-cooltalk",
".3gp": "video/3gpp",
".ai": "application/postscript",
".aif": "audio/x-aiff",
".aifc": "audio/x-aiff",
".aiff": "audio/x-aiff",
".asc": "text/plain",
".atom": "application/atom+xml",
".au": "audio/basic",
".bin": "application/octet-stream",
".cdf": "application/x-netcdf",
".cgm": "image/cgm",
".class": "application/octet-stream",
".dcr": "application/x-director",
".dif": "video/x-dv",
".dir": "application/x-director",
".djv": "image/vnd.djvu",
".djvu": "image/vnd.djvu",
".dll": "application/octet-stream",
".dmg": "application/octet-stream",
".dms": "application/octet-stream",
".dtd": "application/xml-dtd",
".dv": "video/x-dv",
".dxr": "application/x-director",
".eps": "application/postscript",
".exe": "application/octet-stream",
".ez": "application/andrew-inset",
".gram": "application/srgs",
".grxml": "application/srgs+xml",
".gz": "application/x-gzip",
".htm": "text/html",
".html": "text/html",
".ico": "image/x-icon",
".ics": "text/calendar",
".ifb": "text/calendar",
".iges": "model/iges",
".igs": "model/iges",
".jp2": "image/jp2",
".jpe": "image/jpeg",
".jpeg": "image/jpeg",
".jpg": "image/jpeg",
".kar": "audio/midi",
".lha": "application/octet-stream",
".lzh": "application/octet-stream",
".m4a": "audio/mp4a-latm",
".m4p": "audio/mp4a-latm",
".m4u": "video/vnd.mpegurl",
".m4v": "video/x-m4v",
".mac": "image/x-macpaint",
".mathml": "application/mathml+xml",
".mesh": "model/mesh",
".mid": "audio/midi",
".midi": "audio/midi",
".mov": "video/quicktime",
".mp2": "audio/mpeg",
".mp3": "audio/mpeg",
".mp4": "video/mp4",
".mpe": "video/mpeg",
".mpeg": "video/mpeg",
".mpg": "video/mpeg",
".mpga": "audio/mpeg",
".msh": "model/mesh",
".nc": "application/x-netcdf",
".oda": "application/oda",
".ogv": "video/ogv",
".pct": "image/pict",
".pic": "image/pict",
".pict": "image/pict",
".pnt": "image/x-macpaint",
".pntg": "image/x-macpaint",
".ps": "application/postscript",
".qt": "video/quicktime",
".qti": "image/x-quicktime",
".qtif": "image/x-quicktime",
".ram": "audio/x-pn-realaudio",
".rdf": "application/rdf+xml",
".rm": "application/vnd.rn-realmedia",
".roff": "application/x-troff",
".sgm": "text/sgml",
".sgml": "text/sgml",
".silo": "model/mesh",
".skd": "application/x-koan",
".skm": "application/x-koan",
".skp": "application/x-koan",
".skt": "application/x-koan",
".smi": "application/smil",
".smil": "application/smil",
".snd": "audio/basic",
".so": "application/octet-stream",
".svg": "image/svg+xml",
".t": "application/x-troff",
".texi": "application/x-texinfo",
".texinfo": "application/x-texinfo",
".tif": "image/tiff",
".tiff": "image/tiff",
".tr": "application/x-troff",
".txt": "text/plain",
".vrml": "model/vrml",
".vxml": "application/voicexml+xml",
".webm": "video/webm",
".wrl": "model/vrml",
".xht": "application/xhtml+xml",
".xhtml": "application/xhtml+xml",
".xml": "application/xml",
".xsl": "application/xml",
".xslt": "application/xslt+xml",
".xul": "application/vnd.mozilla.xul+xml",
}
// TypeByExtension returns the MIME type associated with the file extension ext.
// gets the file's MIME type for HTTP header Content-Type
func TypeByExtension(filePath string) string {
typ := mime.TypeByExtension(path.Ext(filePath))
if typ == "" {
typ = extToMimeType[strings.ToLower(path.Ext(filePath))]
}
return typ
}

View File

@ -0,0 +1,60 @@
package oss
import (
"hash"
"io"
"net/http"
)
// Response defines HTTP response from OSS
type Response struct {
StatusCode int
Headers http.Header
Body io.ReadCloser
ClientCRC uint64
ServerCRC uint64
}
// PutObjectRequest is the request of DoPutObject
type PutObjectRequest struct {
ObjectKey string
Reader io.Reader
}
// GetObjectRequest is the request of DoGetObject
type GetObjectRequest struct {
ObjectKey string
}
// GetObjectResult is the result of DoGetObject
type GetObjectResult struct {
Response *Response
ClientCRC hash.Hash64
ServerCRC uint64
}
// AppendObjectRequest is the requtest of DoAppendObject
type AppendObjectRequest struct {
ObjectKey string
Reader io.Reader
Position int64
}
// AppendObjectResult is the result of DoAppendObject
type AppendObjectResult struct {
NextPosition int64
CRC uint64
}
// UploadPartRequest is the request of DoUploadPart
type UploadPartRequest struct {
InitResult *InitiateMultipartUploadResult
Reader io.Reader
PartSize int64
PartNumber int
}
// UploadPartResult is the result of DoUploadPart
type UploadPartResult struct {
Part UploadPart
}

View File

@ -0,0 +1,460 @@
package oss
import (
"crypto/md5"
"encoding/base64"
"encoding/json"
"errors"
"io/ioutil"
"os"
"path/filepath"
"strconv"
)
// CopyFile is multipart copy object
//
// srcBucketName source bucket name
// srcObjectKey source object name
// destObjectKey target object name in the form of bucketname.objectkey
// partSize the part size in byte.
// options object's contraints. Check out function InitiateMultipartUpload.
//
// error it's nil if the operation succeeds, otherwise it's an error object.
//
func (bucket Bucket) CopyFile(srcBucketName, srcObjectKey, destObjectKey string, partSize int64, options ...Option) error {
destBucketName := bucket.BucketName
if partSize < MinPartSize || partSize > MaxPartSize {
return errors.New("oss: part size invalid range (1024KB, 5GB]")
}
cpConf, err := getCpConfig(options, filepath.Base(destObjectKey))
if err != nil {
return err
}
routines := getRoutines(options)
if cpConf.IsEnable {
return bucket.copyFileWithCp(srcBucketName, srcObjectKey, destBucketName, destObjectKey,
partSize, options, cpConf.FilePath, routines)
}
return bucket.copyFile(srcBucketName, srcObjectKey, destBucketName, destObjectKey,
partSize, options, routines)
}
// ----- Concurrently copy without checkpoint ---------
// copyWorkerArg defines the copy worker arguments
type copyWorkerArg struct {
bucket *Bucket
imur InitiateMultipartUploadResult
srcBucketName string
srcObjectKey string
options []Option
hook copyPartHook
}
// copyPartHook is the hook for testing purpose
type copyPartHook func(part copyPart) error
var copyPartHooker copyPartHook = defaultCopyPartHook
func defaultCopyPartHook(part copyPart) error {
return nil
}
// copyWorker copies worker
func copyWorker(id int, arg copyWorkerArg, jobs <-chan copyPart, results chan<- UploadPart, failed chan<- error, die <-chan bool) {
for chunk := range jobs {
if err := arg.hook(chunk); err != nil {
failed <- err
break
}
chunkSize := chunk.End - chunk.Start + 1
part, err := arg.bucket.UploadPartCopy(arg.imur, arg.srcBucketName, arg.srcObjectKey,
chunk.Start, chunkSize, chunk.Number, arg.options...)
if err != nil {
failed <- err
break
}
select {
case <-die:
return
default:
}
results <- part
}
}
// copyScheduler
func copyScheduler(jobs chan copyPart, parts []copyPart) {
for _, part := range parts {
jobs <- part
}
close(jobs)
}
// copyPart structure
type copyPart struct {
Number int // Part number (from 1 to 10,000)
Start int64 // The start index in the source file.
End int64 // The end index in the source file
}
// getCopyParts calculates copy parts
func getCopyParts(bucket *Bucket, objectKey string, partSize int64) ([]copyPart, error) {
meta, err := bucket.GetObjectDetailedMeta(objectKey)
if err != nil {
return nil, err
}
parts := []copyPart{}
objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0)
if err != nil {
return nil, err
}
part := copyPart{}
i := 0
for offset := int64(0); offset < objectSize; offset += partSize {
part.Number = i + 1
part.Start = offset
part.End = GetPartEnd(offset, objectSize, partSize)
parts = append(parts, part)
i++
}
return parts, nil
}
// getSrcObjectBytes gets the source file size
func getSrcObjectBytes(parts []copyPart) int64 {
var ob int64
for _, part := range parts {
ob += (part.End - part.Start + 1)
}
return ob
}
// copyFile is a concurrently copy without checkpoint
func (bucket Bucket) copyFile(srcBucketName, srcObjectKey, destBucketName, destObjectKey string,
partSize int64, options []Option, routines int) error {
descBucket, err := bucket.Client.Bucket(destBucketName)
srcBucket, err := bucket.Client.Bucket(srcBucketName)
listener := getProgressListener(options)
// Get copy parts
parts, err := getCopyParts(srcBucket, srcObjectKey, partSize)
if err != nil {
return err
}
// Initialize the multipart upload
imur, err := descBucket.InitiateMultipartUpload(destObjectKey, options...)
if err != nil {
return err
}
jobs := make(chan copyPart, len(parts))
results := make(chan UploadPart, len(parts))
failed := make(chan error)
die := make(chan bool)
var completedBytes int64
totalBytes := getSrcObjectBytes(parts)
event := newProgressEvent(TransferStartedEvent, 0, totalBytes)
publishProgress(listener, event)
// Start to copy workers
arg := copyWorkerArg{descBucket, imur, srcBucketName, srcObjectKey, options, copyPartHooker}
for w := 1; w <= routines; w++ {
go copyWorker(w, arg, jobs, results, failed, die)
}
// Start the scheduler
go copyScheduler(jobs, parts)
// Wait for the parts finished.
completed := 0
ups := make([]UploadPart, len(parts))
for completed < len(parts) {
select {
case part := <-results:
completed++
ups[part.PartNumber-1] = part
completedBytes += (parts[part.PartNumber-1].End - parts[part.PartNumber-1].Start + 1)
event = newProgressEvent(TransferDataEvent, completedBytes, totalBytes)
publishProgress(listener, event)
case err := <-failed:
close(die)
descBucket.AbortMultipartUpload(imur)
event = newProgressEvent(TransferFailedEvent, completedBytes, totalBytes)
publishProgress(listener, event)
return err
}
if completed >= len(parts) {
break
}
}
event = newProgressEvent(TransferCompletedEvent, completedBytes, totalBytes)
publishProgress(listener, event)
// Complete the multipart upload
_, err = descBucket.CompleteMultipartUpload(imur, ups)
if err != nil {
bucket.AbortMultipartUpload(imur)
return err
}
return nil
}
// ----- Concurrently copy with checkpoint -----
const copyCpMagic = "84F1F18C-FF1D-403B-A1D8-9DEB5F65910A"
type copyCheckpoint struct {
Magic string // Magic
MD5 string // CP content MD5
SrcBucketName string // Source bucket
SrcObjectKey string // Source object
DestBucketName string // Target bucket
DestObjectKey string // Target object
CopyID string // Copy ID
ObjStat objectStat // Object stat
Parts []copyPart // Copy parts
CopyParts []UploadPart // The uploaded parts
PartStat []bool // The part status
}
// isValid checks if the data is valid which means CP is valid and object is not updated.
func (cp copyCheckpoint) isValid(bucket *Bucket, objectKey string) (bool, error) {
// Compare CP's magic number and the MD5.
cpb := cp
cpb.MD5 = ""
js, _ := json.Marshal(cpb)
sum := md5.Sum(js)
b64 := base64.StdEncoding.EncodeToString(sum[:])
if cp.Magic != downloadCpMagic || b64 != cp.MD5 {
return false, nil
}
// Make sure the object is not updated.
meta, err := bucket.GetObjectDetailedMeta(objectKey)
if err != nil {
return false, err
}
objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0)
if err != nil {
return false, err
}
// Compare the object size and last modified time and etag.
if cp.ObjStat.Size != objectSize ||
cp.ObjStat.LastModified != meta.Get(HTTPHeaderLastModified) ||
cp.ObjStat.Etag != meta.Get(HTTPHeaderEtag) {
return false, nil
}
return true, nil
}
// load loads from the checkpoint file
func (cp *copyCheckpoint) load(filePath string) error {
contents, err := ioutil.ReadFile(filePath)
if err != nil {
return err
}
err = json.Unmarshal(contents, cp)
return err
}
// update updates the parts status
func (cp *copyCheckpoint) update(part UploadPart) {
cp.CopyParts[part.PartNumber-1] = part
cp.PartStat[part.PartNumber-1] = true
}
// dump dumps the CP to the file
func (cp *copyCheckpoint) dump(filePath string) error {
bcp := *cp
// Calculate MD5
bcp.MD5 = ""
js, err := json.Marshal(bcp)
if err != nil {
return err
}
sum := md5.Sum(js)
b64 := base64.StdEncoding.EncodeToString(sum[:])
bcp.MD5 = b64
// Serialization
js, err = json.Marshal(bcp)
if err != nil {
return err
}
// Dump
return ioutil.WriteFile(filePath, js, FilePermMode)
}
// todoParts returns unfinished parts
func (cp copyCheckpoint) todoParts() []copyPart {
dps := []copyPart{}
for i, ps := range cp.PartStat {
if !ps {
dps = append(dps, cp.Parts[i])
}
}
return dps
}
// getCompletedBytes returns finished bytes count
func (cp copyCheckpoint) getCompletedBytes() int64 {
var completedBytes int64
for i, part := range cp.Parts {
if cp.PartStat[i] {
completedBytes += (part.End - part.Start + 1)
}
}
return completedBytes
}
// prepare initializes the multipart upload
func (cp *copyCheckpoint) prepare(srcBucket *Bucket, srcObjectKey string, destBucket *Bucket, destObjectKey string,
partSize int64, options []Option) error {
// CP
cp.Magic = copyCpMagic
cp.SrcBucketName = srcBucket.BucketName
cp.SrcObjectKey = srcObjectKey
cp.DestBucketName = destBucket.BucketName
cp.DestObjectKey = destObjectKey
// Object
meta, err := srcBucket.GetObjectDetailedMeta(srcObjectKey)
if err != nil {
return err
}
objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0)
if err != nil {
return err
}
cp.ObjStat.Size = objectSize
cp.ObjStat.LastModified = meta.Get(HTTPHeaderLastModified)
cp.ObjStat.Etag = meta.Get(HTTPHeaderEtag)
// Parts
cp.Parts, err = getCopyParts(srcBucket, srcObjectKey, partSize)
if err != nil {
return err
}
cp.PartStat = make([]bool, len(cp.Parts))
for i := range cp.PartStat {
cp.PartStat[i] = false
}
cp.CopyParts = make([]UploadPart, len(cp.Parts))
// Init copy
imur, err := destBucket.InitiateMultipartUpload(destObjectKey, options...)
if err != nil {
return err
}
cp.CopyID = imur.UploadID
return nil
}
func (cp *copyCheckpoint) complete(bucket *Bucket, parts []UploadPart, cpFilePath string) error {
imur := InitiateMultipartUploadResult{Bucket: cp.DestBucketName,
Key: cp.DestObjectKey, UploadID: cp.CopyID}
_, err := bucket.CompleteMultipartUpload(imur, parts)
if err != nil {
return err
}
os.Remove(cpFilePath)
return err
}
// copyFileWithCp is concurrently copy with checkpoint
func (bucket Bucket) copyFileWithCp(srcBucketName, srcObjectKey, destBucketName, destObjectKey string,
partSize int64, options []Option, cpFilePath string, routines int) error {
descBucket, err := bucket.Client.Bucket(destBucketName)
srcBucket, err := bucket.Client.Bucket(srcBucketName)
listener := getProgressListener(options)
// Load CP data
ccp := copyCheckpoint{}
err = ccp.load(cpFilePath)
if err != nil {
os.Remove(cpFilePath)
}
// Load error or the CP data is invalid---reinitialize
valid, err := ccp.isValid(srcBucket, srcObjectKey)
if err != nil || !valid {
if err = ccp.prepare(srcBucket, srcObjectKey, descBucket, destObjectKey, partSize, options); err != nil {
return err
}
os.Remove(cpFilePath)
}
// Unfinished parts
parts := ccp.todoParts()
imur := InitiateMultipartUploadResult{
Bucket: destBucketName,
Key: destObjectKey,
UploadID: ccp.CopyID}
jobs := make(chan copyPart, len(parts))
results := make(chan UploadPart, len(parts))
failed := make(chan error)
die := make(chan bool)
completedBytes := ccp.getCompletedBytes()
event := newProgressEvent(TransferStartedEvent, completedBytes, ccp.ObjStat.Size)
publishProgress(listener, event)
// Start the worker coroutines
arg := copyWorkerArg{descBucket, imur, srcBucketName, srcObjectKey, options, copyPartHooker}
for w := 1; w <= routines; w++ {
go copyWorker(w, arg, jobs, results, failed, die)
}
// Start the scheduler
go copyScheduler(jobs, parts)
// Wait for the parts completed.
completed := 0
for completed < len(parts) {
select {
case part := <-results:
completed++
ccp.update(part)
ccp.dump(cpFilePath)
completedBytes += (parts[part.PartNumber-1].End - parts[part.PartNumber-1].Start + 1)
event = newProgressEvent(TransferDataEvent, completedBytes, ccp.ObjStat.Size)
publishProgress(listener, event)
case err := <-failed:
close(die)
event = newProgressEvent(TransferFailedEvent, completedBytes, ccp.ObjStat.Size)
publishProgress(listener, event)
return err
}
if completed >= len(parts) {
break
}
}
event = newProgressEvent(TransferCompletedEvent, completedBytes, ccp.ObjStat.Size)
publishProgress(listener, event)
return ccp.complete(descBucket, ccp.CopyParts, cpFilePath)
}

View File

@ -0,0 +1,278 @@
package oss
import (
"bytes"
"encoding/xml"
"io"
"net/http"
"os"
"sort"
"strconv"
)
// InitiateMultipartUpload initializes multipart upload
//
// objectKey object name
// options the object constricts for upload. The valid options are CacheControl, ContentDisposition, ContentEncoding, Expires,
// ServerSideEncryption, Meta, check out the following link:
// https://help.aliyun.com/document_detail/oss/api-reference/multipart-upload/InitiateMultipartUpload.html
//
// InitiateMultipartUploadResult the return value of the InitiateMultipartUpload, which is used for calls later on such as UploadPartFromFile,UploadPartCopy.
// error it's nil if the operation succeeds, otherwise it's an error object.
//
func (bucket Bucket) InitiateMultipartUpload(objectKey string, options ...Option) (InitiateMultipartUploadResult, error) {
var imur InitiateMultipartUploadResult
opts := addContentType(options, objectKey)
params := map[string]interface{}{}
params["uploads"] = nil
resp, err := bucket.do("POST", objectKey, params, opts, nil, nil)
if err != nil {
return imur, err
}
defer resp.Body.Close()
err = xmlUnmarshal(resp.Body, &imur)
return imur, err
}
// UploadPart uploads parts
//
// After initializing a Multipart Upload, the upload Id and object key could be used for uploading the parts.
// Each part has its part number (ranges from 1 to 10,000). And for each upload Id, the part number identifies the position of the part in the whole file.
// And thus with the same part number and upload Id, another part upload will overwrite the data.
// Except the last one, minimal part size is 100KB. There's no limit on the last part size.
//
// imur the returned value of InitiateMultipartUpload.
// reader io.Reader the reader for the part's data.
// size the part size.
// partNumber the part number (ranges from 1 to 10,000). Invalid part number will lead to InvalidArgument error.
//
// UploadPart the return value of the upload part. It consists of PartNumber and ETag. It's valid when error is nil.
// error it's nil if the operation succeeds, otherwise it's an error object.
//
func (bucket Bucket) UploadPart(imur InitiateMultipartUploadResult, reader io.Reader,
partSize int64, partNumber int, options ...Option) (UploadPart, error) {
request := &UploadPartRequest{
InitResult: &imur,
Reader: reader,
PartSize: partSize,
PartNumber: partNumber,
}
result, err := bucket.DoUploadPart(request, options)
return result.Part, err
}
// UploadPartFromFile uploads part from the file.
//
// imur the return value of a successful InitiateMultipartUpload.
// filePath the local file path to upload.
// startPosition the start position in the local file.
// partSize the part size.
// partNumber the part number (from 1 to 10,000)
//
// UploadPart the return value consists of PartNumber and ETag.
// error it's nil if the operation succeeds, otherwise it's an error object.
//
func (bucket Bucket) UploadPartFromFile(imur InitiateMultipartUploadResult, filePath string,
startPosition, partSize int64, partNumber int, options ...Option) (UploadPart, error) {
var part = UploadPart{}
fd, err := os.Open(filePath)
if err != nil {
return part, err
}
defer fd.Close()
fd.Seek(startPosition, os.SEEK_SET)
request := &UploadPartRequest{
InitResult: &imur,
Reader: fd,
PartSize: partSize,
PartNumber: partNumber,
}
result, err := bucket.DoUploadPart(request, options)
return result.Part, err
}
// DoUploadPart does the actual part upload.
//
// request part upload request
//
// UploadPartResult the result of uploading part.
// error it's nil if the operation succeeds, otherwise it's an error object.
//
func (bucket Bucket) DoUploadPart(request *UploadPartRequest, options []Option) (*UploadPartResult, error) {
listener := getProgressListener(options)
opts := []Option{ContentLength(request.PartSize)}
params := map[string]interface{}{}
params["partNumber"] = strconv.Itoa(request.PartNumber)
params["uploadId"] = request.InitResult.UploadID
resp, err := bucket.do("PUT", request.InitResult.Key, params, opts,
&io.LimitedReader{R: request.Reader, N: request.PartSize}, listener)
if err != nil {
return &UploadPartResult{}, err
}
defer resp.Body.Close()
part := UploadPart{
ETag: resp.Headers.Get(HTTPHeaderEtag),
PartNumber: request.PartNumber,
}
if bucket.getConfig().IsEnableCRC {
err = checkCRC(resp, "DoUploadPart")
if err != nil {
return &UploadPartResult{part}, err
}
}
return &UploadPartResult{part}, nil
}
// UploadPartCopy uploads part copy
//
// imur the return value of InitiateMultipartUpload
// copySrc source Object name
// startPosition the part's start index in the source file
// partSize the part size
// partNumber the part number, ranges from 1 to 10,000. If it exceeds the range OSS returns InvalidArgument error.
// options the constraints of source object for the copy. The copy happens only when these contraints are met. Otherwise it returns error.
// CopySourceIfNoneMatch, CopySourceIfModifiedSince CopySourceIfUnmodifiedSince, check out the following link for the detail
// https://help.aliyun.com/document_detail/oss/api-reference/multipart-upload/UploadPartCopy.html
//
// UploadPart the return value consists of PartNumber and ETag.
// error it's nil if the operation succeeds, otherwise it's an error object.
//
func (bucket Bucket) UploadPartCopy(imur InitiateMultipartUploadResult, srcBucketName, srcObjectKey string,
startPosition, partSize int64, partNumber int, options ...Option) (UploadPart, error) {
var out UploadPartCopyResult
var part UploadPart
opts := []Option{CopySource(srcBucketName, srcObjectKey),
CopySourceRange(startPosition, partSize)}
opts = append(opts, options...)
params := map[string]interface{}{}
params["partNumber"] = strconv.Itoa(partNumber)
params["uploadId"] = imur.UploadID
resp, err := bucket.do("PUT", imur.Key, params, opts, nil, nil)
if err != nil {
return part, err
}
defer resp.Body.Close()
err = xmlUnmarshal(resp.Body, &out)
if err != nil {
return part, err
}
part.ETag = out.ETag
part.PartNumber = partNumber
return part, nil
}
// CompleteMultipartUpload completes the multipart upload.
//
// imur the return value of InitiateMultipartUpload.
// parts the array of return value of UploadPart/UploadPartFromFile/UploadPartCopy.
//
// CompleteMultipartUploadResponse the return value when the call succeeds. Only valid when the error is nil.
// error it's nil if the operation succeeds, otherwise it's an error object.
//
func (bucket Bucket) CompleteMultipartUpload(imur InitiateMultipartUploadResult,
parts []UploadPart) (CompleteMultipartUploadResult, error) {
var out CompleteMultipartUploadResult
sort.Sort(uploadParts(parts))
cxml := completeMultipartUploadXML{}
cxml.Part = parts
bs, err := xml.Marshal(cxml)
if err != nil {
return out, err
}
buffer := new(bytes.Buffer)
buffer.Write(bs)
params := map[string]interface{}{}
params["uploadId"] = imur.UploadID
resp, err := bucket.do("POST", imur.Key, params, nil, buffer, nil)
if err != nil {
return out, err
}
defer resp.Body.Close()
err = xmlUnmarshal(resp.Body, &out)
return out, err
}
// AbortMultipartUpload aborts the multipart upload.
//
// imur the return value of InitiateMultipartUpload.
//
// error it's nil if the operation succeeds, otherwise it's an error object.
//
func (bucket Bucket) AbortMultipartUpload(imur InitiateMultipartUploadResult) error {
params := map[string]interface{}{}
params["uploadId"] = imur.UploadID
resp, err := bucket.do("DELETE", imur.Key, params, nil, nil, nil)
if err != nil {
return err
}
defer resp.Body.Close()
return checkRespCode(resp.StatusCode, []int{http.StatusNoContent})
}
// ListUploadedParts lists the uploaded parts.
//
// imur the return value of InitiateMultipartUpload.
//
// ListUploadedPartsResponse the return value if it succeeds, only valid when error is nil.
// error it's nil if the operation succeeds, otherwise it's an error object.
//
func (bucket Bucket) ListUploadedParts(imur InitiateMultipartUploadResult) (ListUploadedPartsResult, error) {
var out ListUploadedPartsResult
params := map[string]interface{}{}
params["uploadId"] = imur.UploadID
resp, err := bucket.do("GET", imur.Key, params, nil, nil, nil)
if err != nil {
return out, err
}
defer resp.Body.Close()
err = xmlUnmarshal(resp.Body, &out)
return out, err
}
// ListMultipartUploads lists all ongoing multipart upload tasks
//
// options listObject's filter. Prefix specifies the returned object's prefix; KeyMarker specifies the returned object's start point in lexicographic order;
// MaxKeys specifies the max entries to return; Delimiter is the character for grouping object keys.
//
// ListMultipartUploadResponse the return value if it succeeds, only valid when error is nil.
// error it's nil if the operation succeeds, otherwise it's an error object.
//
func (bucket Bucket) ListMultipartUploads(options ...Option) (ListMultipartUploadResult, error) {
var out ListMultipartUploadResult
options = append(options, EncodingType("url"))
params, err := getRawParams(options)
if err != nil {
return out, err
}
params["uploads"] = nil
resp, err := bucket.do("GET", "", params, nil, nil, nil)
if err != nil {
return out, err
}
defer resp.Body.Close()
err = xmlUnmarshal(resp.Body, &out)
if err != nil {
return out, err
}
err = decodeListMultipartUploadResult(&out)
return out, err
}

View File

@ -0,0 +1,386 @@
package oss
import (
"fmt"
"net/http"
"strconv"
"strings"
"time"
)
type optionType string
const (
optionParam optionType = "HTTPParameter" // URL parameter
optionHTTP optionType = "HTTPHeader" // HTTP header
optionArg optionType = "FuncArgument" // Function argument
)
const (
deleteObjectsQuiet = "delete-objects-quiet"
routineNum = "x-routine-num"
checkpointConfig = "x-cp-config"
initCRC64 = "init-crc64"
progressListener = "x-progress-listener"
storageClass = "storage-class"
)
type (
optionValue struct {
Value interface{}
Type optionType
}
// Option HTTP option
Option func(map[string]optionValue) error
)
// ACL is an option to set X-Oss-Acl header
func ACL(acl ACLType) Option {
return setHeader(HTTPHeaderOssACL, string(acl))
}
// ContentType is an option to set Content-Type header
func ContentType(value string) Option {
return setHeader(HTTPHeaderContentType, value)
}
// ContentLength is an option to set Content-Length header
func ContentLength(length int64) Option {
return setHeader(HTTPHeaderContentLength, strconv.FormatInt(length, 10))
}
// CacheControl is an option to set Cache-Control header
func CacheControl(value string) Option {
return setHeader(HTTPHeaderCacheControl, value)
}
// ContentDisposition is an option to set Content-Disposition header
func ContentDisposition(value string) Option {
return setHeader(HTTPHeaderContentDisposition, value)
}
// ContentEncoding is an option to set Content-Encoding header
func ContentEncoding(value string) Option {
return setHeader(HTTPHeaderContentEncoding, value)
}
// ContentMD5 is an option to set Content-MD5 header
func ContentMD5(value string) Option {
return setHeader(HTTPHeaderContentMD5, value)
}
// Expires is an option to set Expires header
func Expires(t time.Time) Option {
return setHeader(HTTPHeaderExpires, t.Format(http.TimeFormat))
}
// Meta is an option to set Meta header
func Meta(key, value string) Option {
return setHeader(HTTPHeaderOssMetaPrefix+key, value)
}
// Range is an option to set Range header, [start, end]
func Range(start, end int64) Option {
return setHeader(HTTPHeaderRange, fmt.Sprintf("bytes=%d-%d", start, end))
}
// NormalizedRange is an option to set Range header, such as 1024-2048 or 1024- or -2048
func NormalizedRange(nr string) Option {
return setHeader(HTTPHeaderRange, fmt.Sprintf("bytes=%s", strings.TrimSpace(nr)))
}
// AcceptEncoding is an option to set Accept-Encoding header
func AcceptEncoding(value string) Option {
return setHeader(HTTPHeaderAcceptEncoding, value)
}
// IfModifiedSince is an option to set If-Modified-Since header
func IfModifiedSince(t time.Time) Option {
return setHeader(HTTPHeaderIfModifiedSince, t.Format(http.TimeFormat))
}
// IfUnmodifiedSince is an option to set If-Unmodified-Since header
func IfUnmodifiedSince(t time.Time) Option {
return setHeader(HTTPHeaderIfUnmodifiedSince, t.Format(http.TimeFormat))
}
// IfMatch is an option to set If-Match header
func IfMatch(value string) Option {
return setHeader(HTTPHeaderIfMatch, value)
}
// IfNoneMatch is an option to set IfNoneMatch header
func IfNoneMatch(value string) Option {
return setHeader(HTTPHeaderIfNoneMatch, value)
}
// CopySource is an option to set X-Oss-Copy-Source header
func CopySource(sourceBucket, sourceObject string) Option {
return setHeader(HTTPHeaderOssCopySource, "/"+sourceBucket+"/"+sourceObject)
}
// CopySourceRange is an option to set X-Oss-Copy-Source header
func CopySourceRange(startPosition, partSize int64) Option {
val := "bytes=" + strconv.FormatInt(startPosition, 10) + "-" +
strconv.FormatInt((startPosition+partSize-1), 10)
return setHeader(HTTPHeaderOssCopySourceRange, val)
}
// CopySourceIfMatch is an option to set X-Oss-Copy-Source-If-Match header
func CopySourceIfMatch(value string) Option {
return setHeader(HTTPHeaderOssCopySourceIfMatch, value)
}
// CopySourceIfNoneMatch is an option to set X-Oss-Copy-Source-If-None-Match header
func CopySourceIfNoneMatch(value string) Option {
return setHeader(HTTPHeaderOssCopySourceIfNoneMatch, value)
}
// CopySourceIfModifiedSince is an option to set X-Oss-CopySource-If-Modified-Since header
func CopySourceIfModifiedSince(t time.Time) Option {
return setHeader(HTTPHeaderOssCopySourceIfModifiedSince, t.Format(http.TimeFormat))
}
// CopySourceIfUnmodifiedSince is an option to set X-Oss-Copy-Source-If-Unmodified-Since header
func CopySourceIfUnmodifiedSince(t time.Time) Option {
return setHeader(HTTPHeaderOssCopySourceIfUnmodifiedSince, t.Format(http.TimeFormat))
}
// MetadataDirective is an option to set X-Oss-Metadata-Directive header
func MetadataDirective(directive MetadataDirectiveType) Option {
return setHeader(HTTPHeaderOssMetadataDirective, string(directive))
}
// ServerSideEncryption is an option to set X-Oss-Server-Side-Encryption header
func ServerSideEncryption(value string) Option {
return setHeader(HTTPHeaderOssServerSideEncryption, value)
}
// ObjectACL is an option to set X-Oss-Object-Acl header
func ObjectACL(acl ACLType) Option {
return setHeader(HTTPHeaderOssObjectACL, string(acl))
}
// symlinkTarget is an option to set X-Oss-Symlink-Target
func symlinkTarget(targetObjectKey string) Option {
return setHeader(HTTPHeaderOssSymlinkTarget, targetObjectKey)
}
// Origin is an option to set Origin header
func Origin(value string) Option {
return setHeader(HTTPHeaderOrigin, value)
}
// Delimiter is an option to set delimiler parameter
func Delimiter(value string) Option {
return addParam("delimiter", value)
}
// Marker is an option to set marker parameter
func Marker(value string) Option {
return addParam("marker", value)
}
// MaxKeys is an option to set maxkeys parameter
func MaxKeys(value int) Option {
return addParam("max-keys", strconv.Itoa(value))
}
// Prefix is an option to set prefix parameter
func Prefix(value string) Option {
return addParam("prefix", value)
}
// EncodingType is an option to set encoding-type parameter
func EncodingType(value string) Option {
return addParam("encoding-type", value)
}
// MaxUploads is an option to set max-uploads parameter
func MaxUploads(value int) Option {
return addParam("max-uploads", strconv.Itoa(value))
}
// KeyMarker is an option to set key-marker parameter
func KeyMarker(value string) Option {
return addParam("key-marker", value)
}
// UploadIDMarker is an option to set upload-id-marker parameter
func UploadIDMarker(value string) Option {
return addParam("upload-id-marker", value)
}
// DeleteObjectsQuiet false:DeleteObjects in verbose mode; true:DeleteObjects in quite mode. Default is false.
func DeleteObjectsQuiet(isQuiet bool) Option {
return addArg(deleteObjectsQuiet, isQuiet)
}
// StorageClass bucket storage class
func StorageClass(value StorageClassType) Option {
return addArg(storageClass, value)
}
// Checkpoint configuration
type cpConfig struct {
IsEnable bool
FilePath string
}
// Checkpoint sets the isEnable flag and checkpoint file path for DownloadFile/UploadFile.
func Checkpoint(isEnable bool, filePath string) Option {
return addArg(checkpointConfig, &cpConfig{isEnable, filePath})
}
// Routines DownloadFile/UploadFile routine count
func Routines(n int) Option {
return addArg(routineNum, n)
}
// InitCRC Init AppendObject CRC
func InitCRC(initCRC uint64) Option {
return addArg(initCRC64, initCRC)
}
// Progress set progress listener
func Progress(listener ProgressListener) Option {
return addArg(progressListener, listener)
}
// ResponseContentType is an option to set response-content-type param
func ResponseContentType(value string) Option {
return addParam("response-content-type", value)
}
// ResponseContentLanguage is an option to set response-content-language param
func ResponseContentLanguage(value string) Option {
return addParam("response-content-language", value)
}
// ResponseExpires is an option to set response-expires param
func ResponseExpires(value string) Option {
return addParam("response-expires", value)
}
// ResponseCacheControl is an option to set response-cache-control param
func ResponseCacheControl(value string) Option {
return addParam("response-cache-control", value)
}
// ResponseContentDisposition is an option to set response-content-disposition param
func ResponseContentDisposition(value string) Option {
return addParam("response-content-disposition", value)
}
// ResponseContentEncoding is an option to set response-content-encoding param
func ResponseContentEncoding(value string) Option {
return addParam("response-content-encoding", value)
}
// Process is an option to set X-Oss-Process param
func Process(value string) Option {
return addParam("X-Oss-Process", value)
}
func setHeader(key string, value interface{}) Option {
return func(params map[string]optionValue) error {
if value == nil {
return nil
}
params[key] = optionValue{value, optionHTTP}
return nil
}
}
func addParam(key string, value interface{}) Option {
return func(params map[string]optionValue) error {
if value == nil {
return nil
}
params[key] = optionValue{value, optionParam}
return nil
}
}
func addArg(key string, value interface{}) Option {
return func(params map[string]optionValue) error {
if value == nil {
return nil
}
params[key] = optionValue{value, optionArg}
return nil
}
}
func handleOptions(headers map[string]string, options []Option) error {
params := map[string]optionValue{}
for _, option := range options {
if option != nil {
if err := option(params); err != nil {
return err
}
}
}
for k, v := range params {
if v.Type == optionHTTP {
headers[k] = v.Value.(string)
}
}
return nil
}
func getRawParams(options []Option) (map[string]interface{}, error) {
// Option
params := map[string]optionValue{}
for _, option := range options {
if option != nil {
if err := option(params); err != nil {
return nil, err
}
}
}
paramsm := map[string]interface{}{}
// Serialize
for k, v := range params {
if v.Type == optionParam {
vs := params[k]
paramsm[k] = vs.Value.(string)
}
}
return paramsm, nil
}
func findOption(options []Option, param string, defaultVal interface{}) (interface{}, error) {
params := map[string]optionValue{}
for _, option := range options {
if option != nil {
if err := option(params); err != nil {
return nil, err
}
}
}
if val, ok := params[param]; ok {
return val.Value, nil
}
return defaultVal, nil
}
func isOptionSet(options []Option, option string) (bool, interface{}, error) {
params := map[string]optionValue{}
for _, option := range options {
if option != nil {
if err := option(params); err != nil {
return false, nil, err
}
}
}
if val, ok := params[option]; ok {
return true, val.Value, nil
}
return false, nil, nil
}

View File

@ -0,0 +1,105 @@
package oss
import "io"
// ProgressEventType defines transfer progress event type
type ProgressEventType int
const (
// TransferStartedEvent transfer started, set TotalBytes
TransferStartedEvent ProgressEventType = 1 + iota
// TransferDataEvent transfer data, set ConsumedBytes anmd TotalBytes
TransferDataEvent
// TransferCompletedEvent transfer completed
TransferCompletedEvent
// TransferFailedEvent transfer encounters an error
TransferFailedEvent
)
// ProgressEvent defines progress event
type ProgressEvent struct {
ConsumedBytes int64
TotalBytes int64
EventType ProgressEventType
}
// ProgressListener listens progress change
type ProgressListener interface {
ProgressChanged(event *ProgressEvent)
}
// -------------------- Private --------------------
func newProgressEvent(eventType ProgressEventType, consumed, total int64) *ProgressEvent {
return &ProgressEvent{
ConsumedBytes: consumed,
TotalBytes: total,
EventType: eventType}
}
// publishProgress
func publishProgress(listener ProgressListener, event *ProgressEvent) {
if listener != nil && event != nil {
listener.ProgressChanged(event)
}
}
type readerTracker struct {
completedBytes int64
}
type teeReader struct {
reader io.Reader
writer io.Writer
listener ProgressListener
consumedBytes int64
totalBytes int64
tracker *readerTracker
}
// TeeReader returns a Reader that writes to w what it reads from r.
// All reads from r performed through it are matched with
// corresponding writes to w. There is no internal buffering -
// the write must complete before the read completes.
// Any error encountered while writing is reported as a read error.
func TeeReader(reader io.Reader, writer io.Writer, totalBytes int64, listener ProgressListener, tracker *readerTracker) io.Reader {
return &teeReader{
reader: reader,
writer: writer,
listener: listener,
consumedBytes: 0,
totalBytes: totalBytes,
tracker: tracker,
}
}
func (t *teeReader) Read(p []byte) (n int, err error) {
n, err = t.reader.Read(p)
// Read encountered error
if err != nil && err != io.EOF {
event := newProgressEvent(TransferFailedEvent, t.consumedBytes, t.totalBytes)
publishProgress(t.listener, event)
}
if n > 0 {
t.consumedBytes += int64(n)
// CRC
if t.writer != nil {
if n, err := t.writer.Write(p[:n]); err != nil {
return n, err
}
}
// Progress
if t.listener != nil {
event := newProgressEvent(TransferDataEvent, t.consumedBytes, t.totalBytes)
publishProgress(t.listener, event)
}
// Track
if t.tracker != nil {
t.tracker.completedBytes = t.consumedBytes
}
}
return
}

View File

@ -0,0 +1,24 @@
// +build !go1.7
package oss
import (
"net"
"net/http"
)
func newTransport(conn *Conn, config *Config) *http.Transport {
httpTimeOut := conn.config.HTTPTimeout
// New Transport
transport := &http.Transport{
Dial: func(netw, addr string) (net.Conn, error) {
conn, err := net.DialTimeout(netw, addr, httpTimeOut.ConnectTimeout)
if err != nil {
return nil, err
}
return newTimeoutConn(conn, httpTimeOut.ReadWriteTimeout, httpTimeOut.LongTimeout), nil
},
ResponseHeaderTimeout: httpTimeOut.HeaderTimeout,
}
return transport
}

View File

@ -0,0 +1,25 @@
// +build go1.7
package oss
import (
"net"
"net/http"
)
func newTransport(conn *Conn, config *Config) *http.Transport {
httpTimeOut := conn.config.HTTPTimeout
// New Transport
transport := &http.Transport{
Dial: func(netw, addr string) (net.Conn, error) {
conn, err := net.DialTimeout(netw, addr, httpTimeOut.ConnectTimeout)
if err != nil {
return nil, err
}
return newTimeoutConn(conn, httpTimeOut.ReadWriteTimeout, httpTimeOut.LongTimeout), nil
},
IdleConnTimeout: httpTimeOut.IdleConnTimeout,
ResponseHeaderTimeout: httpTimeOut.HeaderTimeout,
}
return transport
}

450
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/type.go generated vendored Normal file
View File

@ -0,0 +1,450 @@
package oss
import (
"encoding/xml"
"net/url"
"time"
)
// ListBucketsResult defines the result object from ListBuckets request
type ListBucketsResult struct {
XMLName xml.Name `xml:"ListAllMyBucketsResult"`
Prefix string `xml:"Prefix"` // The prefix in this query
Marker string `xml:"Marker"` // The marker filter
MaxKeys int `xml:"MaxKeys"` // The max entry count to return. This information is returned when IsTruncated is true.
IsTruncated bool `xml:"IsTruncated"` // Flag true means there's remaining buckets to return.
NextMarker string `xml:"NextMarker"` // The marker filter for the next list call
Owner Owner `xml:"Owner"` // The owner information
Buckets []BucketProperties `xml:"Buckets>Bucket"` // The bucket list
}
// BucketProperties defines bucket properties
type BucketProperties struct {
XMLName xml.Name `xml:"Bucket"`
Name string `xml:"Name"` // Bucket name
Location string `xml:"Location"` // Bucket datacenter
CreationDate time.Time `xml:"CreationDate"` // Bucket create time
StorageClass string `xml:"StorageClass"` // Bucket storage class
}
// GetBucketACLResult defines GetBucketACL request's result
type GetBucketACLResult struct {
XMLName xml.Name `xml:"AccessControlPolicy"`
ACL string `xml:"AccessControlList>Grant"` // Bucket ACL
Owner Owner `xml:"Owner"` // Bucket owner
}
// LifecycleConfiguration is the Bucket Lifecycle configuration
type LifecycleConfiguration struct {
XMLName xml.Name `xml:"LifecycleConfiguration"`
Rules []LifecycleRule `xml:"Rule"`
}
// LifecycleRule defines Lifecycle rules
type LifecycleRule struct {
XMLName xml.Name `xml:"Rule"`
ID string `xml:"ID"` // The rule ID
Prefix string `xml:"Prefix"` // The object key prefix
Status string `xml:"Status"` // The rule status (enabled or not)
Expiration LifecycleExpiration `xml:"Expiration"` // The expiration property
}
// LifecycleExpiration defines the rule's expiration property
type LifecycleExpiration struct {
XMLName xml.Name `xml:"Expiration"`
Days int `xml:"Days,omitempty"` // Relative expiration time: The expiration time in days after the last modified time
Date time.Time `xml:"Date,omitempty"` // Absolute expiration time: The expiration time in date.
}
type lifecycleXML struct {
XMLName xml.Name `xml:"LifecycleConfiguration"`
Rules []lifecycleRule `xml:"Rule"`
}
type lifecycleRule struct {
XMLName xml.Name `xml:"Rule"`
ID string `xml:"ID"`
Prefix string `xml:"Prefix"`
Status string `xml:"Status"`
Expiration lifecycleExpiration `xml:"Expiration"`
}
type lifecycleExpiration struct {
XMLName xml.Name `xml:"Expiration"`
Days int `xml:"Days,omitempty"`
Date string `xml:"Date,omitempty"`
}
const expirationDateFormat = "2006-01-02T15:04:05.000Z"
func convLifecycleRule(rules []LifecycleRule) []lifecycleRule {
rs := []lifecycleRule{}
for _, rule := range rules {
r := lifecycleRule{}
r.ID = rule.ID
r.Prefix = rule.Prefix
r.Status = rule.Status
if rule.Expiration.Date.IsZero() {
r.Expiration.Days = rule.Expiration.Days
} else {
r.Expiration.Date = rule.Expiration.Date.Format(expirationDateFormat)
}
rs = append(rs, r)
}
return rs
}
// BuildLifecycleRuleByDays builds a lifecycle rule with specified expiration days
func BuildLifecycleRuleByDays(id, prefix string, status bool, days int) LifecycleRule {
var statusStr = "Enabled"
if !status {
statusStr = "Disabled"
}
return LifecycleRule{ID: id, Prefix: prefix, Status: statusStr,
Expiration: LifecycleExpiration{Days: days}}
}
// BuildLifecycleRuleByDate builds a lifecycle rule with specified expiration time.
func BuildLifecycleRuleByDate(id, prefix string, status bool, year, month, day int) LifecycleRule {
var statusStr = "Enabled"
if !status {
statusStr = "Disabled"
}
date := time.Date(year, time.Month(month), day, 0, 0, 0, 0, time.UTC)
return LifecycleRule{ID: id, Prefix: prefix, Status: statusStr,
Expiration: LifecycleExpiration{Date: date}}
}
// GetBucketLifecycleResult defines GetBucketLifecycle's result object
type GetBucketLifecycleResult LifecycleConfiguration
// RefererXML defines Referer configuration
type RefererXML struct {
XMLName xml.Name `xml:"RefererConfiguration"`
AllowEmptyReferer bool `xml:"AllowEmptyReferer"` // Allow empty referrer
RefererList []string `xml:"RefererList>Referer"` // Referer whitelist
}
// GetBucketRefererResult defines result object for GetBucketReferer request
type GetBucketRefererResult RefererXML
// LoggingXML defines logging configuration
type LoggingXML struct {
XMLName xml.Name `xml:"BucketLoggingStatus"`
LoggingEnabled LoggingEnabled `xml:"LoggingEnabled"` // The logging configuration information
}
type loggingXMLEmpty struct {
XMLName xml.Name `xml:"BucketLoggingStatus"`
}
// LoggingEnabled defines the logging configuration information
type LoggingEnabled struct {
XMLName xml.Name `xml:"LoggingEnabled"`
TargetBucket string `xml:"TargetBucket"` // The bucket name for storing the log files
TargetPrefix string `xml:"TargetPrefix"` // The log file prefix
}
// GetBucketLoggingResult defines the result from GetBucketLogging request
type GetBucketLoggingResult LoggingXML
// WebsiteXML defines Website configuration
type WebsiteXML struct {
XMLName xml.Name `xml:"WebsiteConfiguration"`
IndexDocument IndexDocument `xml:"IndexDocument"` // The index page
ErrorDocument ErrorDocument `xml:"ErrorDocument"` // The error page
}
// IndexDocument defines the index page info
type IndexDocument struct {
XMLName xml.Name `xml:"IndexDocument"`
Suffix string `xml:"Suffix"` // The file name for the index page
}
// ErrorDocument defines the 404 error page info
type ErrorDocument struct {
XMLName xml.Name `xml:"ErrorDocument"`
Key string `xml:"Key"` // 404 error file name
}
// GetBucketWebsiteResult defines the result from GetBucketWebsite request.
type GetBucketWebsiteResult WebsiteXML
// CORSXML defines CORS configuration
type CORSXML struct {
XMLName xml.Name `xml:"CORSConfiguration"`
CORSRules []CORSRule `xml:"CORSRule"` // CORS rules
}
// CORSRule defines CORS rules
type CORSRule struct {
XMLName xml.Name `xml:"CORSRule"`
AllowedOrigin []string `xml:"AllowedOrigin"` // Allowed origins. By default it's wildcard '*'
AllowedMethod []string `xml:"AllowedMethod"` // Allowed methods
AllowedHeader []string `xml:"AllowedHeader"` // Allowed headers
ExposeHeader []string `xml:"ExposeHeader"` // Allowed response headers
MaxAgeSeconds int `xml:"MaxAgeSeconds"` // Max cache ages in seconds
}
// GetBucketCORSResult defines the result from GetBucketCORS request.
type GetBucketCORSResult CORSXML
// GetBucketInfoResult defines the result from GetBucketInfo request.
type GetBucketInfoResult struct {
XMLName xml.Name `xml:"BucketInfo"`
BucketInfo BucketInfo `xml:"Bucket"`
}
// BucketInfo defines Bucket information
type BucketInfo struct {
XMLName xml.Name `xml:"Bucket"`
Name string `xml:"Name"` // Bucket name
Location string `xml:"Location"` // Bucket datacenter
CreationDate time.Time `xml:"CreationDate"` // Bucket creation time
ExtranetEndpoint string `xml:"ExtranetEndpoint"` // Bucket external endpoint
IntranetEndpoint string `xml:"IntranetEndpoint"` // Bucket internal endpoint
ACL string `xml:"AccessControlList>Grant"` // Bucket ACL
Owner Owner `xml:"Owner"` // Bucket owner
StorageClass string `xml:"StorageClass"` // Bucket storage class
}
// ListObjectsResult defines the result from ListObjects request
type ListObjectsResult struct {
XMLName xml.Name `xml:"ListBucketResult"`
Prefix string `xml:"Prefix"` // The object prefix
Marker string `xml:"Marker"` // The marker filter.
MaxKeys int `xml:"MaxKeys"` // Max keys to return
Delimiter string `xml:"Delimiter"` // The delimiter for grouping objects' name
IsTruncated bool `xml:"IsTruncated"` // Flag indicates if all results are returned (when it's false)
NextMarker string `xml:"NextMarker"` // The start point of the next query
Objects []ObjectProperties `xml:"Contents"` // Object list
CommonPrefixes []string `xml:"CommonPrefixes>Prefix"` // You can think of commonprefixes as "folders" whose names end with the delimiter
}
// ObjectProperties defines Objecct properties
type ObjectProperties struct {
XMLName xml.Name `xml:"Contents"`
Key string `xml:"Key"` // Object key
Type string `xml:"Type"` // Object type
Size int64 `xml:"Size"` // Object size
ETag string `xml:"ETag"` // Object ETag
Owner Owner `xml:"Owner"` // Object owner information
LastModified time.Time `xml:"LastModified"` // Object last modified time
StorageClass string `xml:"StorageClass"` // Object storage class (Standard, IA, Archive)
}
// Owner defines Bucket/Object's owner
type Owner struct {
XMLName xml.Name `xml:"Owner"`
ID string `xml:"ID"` // Owner ID
DisplayName string `xml:"DisplayName"` // Owner's display name
}
// CopyObjectResult defines result object of CopyObject
type CopyObjectResult struct {
XMLName xml.Name `xml:"CopyObjectResult"`
LastModified time.Time `xml:"LastModified"` // New object's last modified time.
ETag string `xml:"ETag"` // New object's ETag
}
// GetObjectACLResult defines result of GetObjectACL request
type GetObjectACLResult GetBucketACLResult
type deleteXML struct {
XMLName xml.Name `xml:"Delete"`
Objects []DeleteObject `xml:"Object"` // Objects to delete
Quiet bool `xml:"Quiet"` // Flag of quiet mode.
}
// DeleteObject defines the struct for deleting object
type DeleteObject struct {
XMLName xml.Name `xml:"Object"`
Key string `xml:"Key"` // Object name
}
// DeleteObjectsResult defines result of DeleteObjects request
type DeleteObjectsResult struct {
XMLName xml.Name `xml:"DeleteResult"`
DeletedObjects []string `xml:"Deleted>Key"` // Deleted object list
}
// InitiateMultipartUploadResult defines result of InitiateMultipartUpload request
type InitiateMultipartUploadResult struct {
XMLName xml.Name `xml:"InitiateMultipartUploadResult"`
Bucket string `xml:"Bucket"` // Bucket name
Key string `xml:"Key"` // Object name to upload
UploadID string `xml:"UploadId"` // Generated UploadId
}
// UploadPart defines the upload/copy part
type UploadPart struct {
XMLName xml.Name `xml:"Part"`
PartNumber int `xml:"PartNumber"` // Part number
ETag string `xml:"ETag"` // ETag value of the part's data
}
type uploadParts []UploadPart
func (slice uploadParts) Len() int {
return len(slice)
}
func (slice uploadParts) Less(i, j int) bool {
return slice[i].PartNumber < slice[j].PartNumber
}
func (slice uploadParts) Swap(i, j int) {
slice[i], slice[j] = slice[j], slice[i]
}
// UploadPartCopyResult defines result object of multipart copy request.
type UploadPartCopyResult struct {
XMLName xml.Name `xml:"CopyPartResult"`
LastModified time.Time `xml:"LastModified"` // Last modified time
ETag string `xml:"ETag"` // ETag
}
type completeMultipartUploadXML struct {
XMLName xml.Name `xml:"CompleteMultipartUpload"`
Part []UploadPart `xml:"Part"`
}
// CompleteMultipartUploadResult defines result object of CompleteMultipartUploadRequest
type CompleteMultipartUploadResult struct {
XMLName xml.Name `xml:"CompleteMultipartUploadResult"`
Location string `xml:"Location"` // Object URL
Bucket string `xml:"Bucket"` // Bucket name
ETag string `xml:"ETag"` // Object ETag
Key string `xml:"Key"` // Object name
}
// ListUploadedPartsResult defines result object of ListUploadedParts
type ListUploadedPartsResult struct {
XMLName xml.Name `xml:"ListPartsResult"`
Bucket string `xml:"Bucket"` // Bucket name
Key string `xml:"Key"` // Object name
UploadID string `xml:"UploadId"` // Upload ID
NextPartNumberMarker string `xml:"NextPartNumberMarker"` // Next part number
MaxParts int `xml:"MaxParts"` // Max parts count
IsTruncated bool `xml:"IsTruncated"` // Flag indicates all entries returned.false: all entries returned.
UploadedParts []UploadedPart `xml:"Part"` // Uploaded parts
}
// UploadedPart defines uploaded part
type UploadedPart struct {
XMLName xml.Name `xml:"Part"`
PartNumber int `xml:"PartNumber"` // Part number
LastModified time.Time `xml:"LastModified"` // Last modified time
ETag string `xml:"ETag"` // ETag cache
Size int `xml:"Size"` // Part size
}
// ListMultipartUploadResult defines result object of ListMultipartUpload
type ListMultipartUploadResult struct {
XMLName xml.Name `xml:"ListMultipartUploadsResult"`
Bucket string `xml:"Bucket"` // Bucket name
Delimiter string `xml:"Delimiter"` // Delimiter for grouping object.
Prefix string `xml:"Prefix"` // Object prefix
KeyMarker string `xml:"KeyMarker"` // Object key marker
UploadIDMarker string `xml:"UploadIdMarker"` // UploadId marker
NextKeyMarker string `xml:"NextKeyMarker"` // Next key marker, if not all entries returned.
NextUploadIDMarker string `xml:"NextUploadIdMarker"` // Next uploadId marker, if not all entries returned.
MaxUploads int `xml:"MaxUploads"` // Max uploads to return
IsTruncated bool `xml:"IsTruncated"` // Flag indicates all entries are returned.
Uploads []UncompletedUpload `xml:"Upload"` // Ongoing uploads (not completed, not aborted)
CommonPrefixes []string `xml:"CommonPrefixes>Prefix"` // Common prefixes list.
}
// UncompletedUpload structure wraps an uncompleted upload task
type UncompletedUpload struct {
XMLName xml.Name `xml:"Upload"`
Key string `xml:"Key"` // Object name
UploadID string `xml:"UploadId"` // The UploadId
Initiated time.Time `xml:"Initiated"` // Initialization time in the format such as 2012-02-23T04:18:23.000Z
}
// decodeDeleteObjectsResult decodes deleting objects result in URL encoding
func decodeDeleteObjectsResult(result *DeleteObjectsResult) error {
var err error
for i := 0; i < len(result.DeletedObjects); i++ {
result.DeletedObjects[i], err = url.QueryUnescape(result.DeletedObjects[i])
if err != nil {
return err
}
}
return nil
}
// decodeListObjectsResult decodes list objects result in URL encoding
func decodeListObjectsResult(result *ListObjectsResult) error {
var err error
result.Prefix, err = url.QueryUnescape(result.Prefix)
if err != nil {
return err
}
result.Marker, err = url.QueryUnescape(result.Marker)
if err != nil {
return err
}
result.Delimiter, err = url.QueryUnescape(result.Delimiter)
if err != nil {
return err
}
result.NextMarker, err = url.QueryUnescape(result.NextMarker)
if err != nil {
return err
}
for i := 0; i < len(result.Objects); i++ {
result.Objects[i].Key, err = url.QueryUnescape(result.Objects[i].Key)
if err != nil {
return err
}
}
for i := 0; i < len(result.CommonPrefixes); i++ {
result.CommonPrefixes[i], err = url.QueryUnescape(result.CommonPrefixes[i])
if err != nil {
return err
}
}
return nil
}
// decodeListMultipartUploadResult decodes list multipart upload result in URL encoding
func decodeListMultipartUploadResult(result *ListMultipartUploadResult) error {
var err error
result.Prefix, err = url.QueryUnescape(result.Prefix)
if err != nil {
return err
}
result.Delimiter, err = url.QueryUnescape(result.Delimiter)
if err != nil {
return err
}
result.KeyMarker, err = url.QueryUnescape(result.KeyMarker)
if err != nil {
return err
}
result.NextKeyMarker, err = url.QueryUnescape(result.NextKeyMarker)
if err != nil {
return err
}
for i := 0; i < len(result.Uploads); i++ {
result.Uploads[i].Key, err = url.QueryUnescape(result.Uploads[i].Key)
if err != nil {
return err
}
}
for i := 0; i < len(result.CommonPrefixes); i++ {
result.CommonPrefixes[i], err = url.QueryUnescape(result.CommonPrefixes[i])
if err != nil {
return err
}
}
return nil
}
// createBucketConfiguration defines the configuration for creating a bucket.
type createBucketConfiguration struct {
XMLName xml.Name `xml:"CreateBucketConfiguration"`
StorageClass StorageClassType `xml:"StorageClass,omitempty"`
}

View File

@ -0,0 +1,484 @@
package oss
import (
"crypto/md5"
"encoding/base64"
"encoding/json"
"errors"
"io/ioutil"
"os"
"time"
)
// UploadFile is multipart file upload.
//
// objectKey the object name.
// filePath the local file path to upload.
// partSize the part size in byte.
// options the options for uploading object.
//
// error it's nil if the operation succeeds, otherwise it's an error object.
//
func (bucket Bucket) UploadFile(objectKey, filePath string, partSize int64, options ...Option) error {
if partSize < MinPartSize || partSize > MaxPartSize {
return errors.New("oss: part size invalid range (1024KB, 5GB]")
}
cpConf, err := getCpConfig(options, filePath)
if err != nil {
return err
}
routines := getRoutines(options)
if cpConf.IsEnable {
return bucket.uploadFileWithCp(objectKey, filePath, partSize, options, cpConf.FilePath, routines)
}
return bucket.uploadFile(objectKey, filePath, partSize, options, routines)
}
// ----- concurrent upload without checkpoint -----
// getCpConfig gets checkpoint configuration
func getCpConfig(options []Option, filePath string) (*cpConfig, error) {
cpc := &cpConfig{}
cpcOpt, err := findOption(options, checkpointConfig, nil)
if err != nil || cpcOpt == nil {
return cpc, err
}
cpc = cpcOpt.(*cpConfig)
if cpc.IsEnable && cpc.FilePath == "" {
cpc.FilePath = filePath + CheckpointFileSuffix
}
return cpc, nil
}
// getRoutines gets the routine count. by default it's 1.
func getRoutines(options []Option) int {
rtnOpt, err := findOption(options, routineNum, nil)
if err != nil || rtnOpt == nil {
return 1
}
rs := rtnOpt.(int)
if rs < 1 {
rs = 1
} else if rs > 100 {
rs = 100
}
return rs
}
// getProgressListener gets the progress callback
func getProgressListener(options []Option) ProgressListener {
isSet, listener, _ := isOptionSet(options, progressListener)
if !isSet {
return nil
}
return listener.(ProgressListener)
}
// uploadPartHook is for testing usage
type uploadPartHook func(id int, chunk FileChunk) error
var uploadPartHooker uploadPartHook = defaultUploadPart
func defaultUploadPart(id int, chunk FileChunk) error {
return nil
}
// workerArg defines worker argument structure
type workerArg struct {
bucket *Bucket
filePath string
imur InitiateMultipartUploadResult
hook uploadPartHook
}
// worker is the worker coroutine function
func worker(id int, arg workerArg, jobs <-chan FileChunk, results chan<- UploadPart, failed chan<- error, die <-chan bool) {
for chunk := range jobs {
if err := arg.hook(id, chunk); err != nil {
failed <- err
break
}
part, err := arg.bucket.UploadPartFromFile(arg.imur, arg.filePath, chunk.Offset, chunk.Size, chunk.Number)
if err != nil {
failed <- err
break
}
select {
case <-die:
return
default:
}
results <- part
}
}
// scheduler function
func scheduler(jobs chan FileChunk, chunks []FileChunk) {
for _, chunk := range chunks {
jobs <- chunk
}
close(jobs)
}
func getTotalBytes(chunks []FileChunk) int64 {
var tb int64
for _, chunk := range chunks {
tb += chunk.Size
}
return tb
}
// uploadFile is a concurrent upload, without checkpoint
func (bucket Bucket) uploadFile(objectKey, filePath string, partSize int64, options []Option, routines int) error {
listener := getProgressListener(options)
chunks, err := SplitFileByPartSize(filePath, partSize)
if err != nil {
return err
}
// Initialize the multipart upload
imur, err := bucket.InitiateMultipartUpload(objectKey, options...)
if err != nil {
return err
}
jobs := make(chan FileChunk, len(chunks))
results := make(chan UploadPart, len(chunks))
failed := make(chan error)
die := make(chan bool)
var completedBytes int64
totalBytes := getTotalBytes(chunks)
event := newProgressEvent(TransferStartedEvent, 0, totalBytes)
publishProgress(listener, event)
// Start the worker coroutine
arg := workerArg{&bucket, filePath, imur, uploadPartHooker}
for w := 1; w <= routines; w++ {
go worker(w, arg, jobs, results, failed, die)
}
// Schedule the jobs
go scheduler(jobs, chunks)
// Waiting for the upload finished
completed := 0
parts := make([]UploadPart, len(chunks))
for completed < len(chunks) {
select {
case part := <-results:
completed++
parts[part.PartNumber-1] = part
completedBytes += chunks[part.PartNumber-1].Size
event = newProgressEvent(TransferDataEvent, completedBytes, totalBytes)
publishProgress(listener, event)
case err := <-failed:
close(die)
event = newProgressEvent(TransferFailedEvent, completedBytes, totalBytes)
publishProgress(listener, event)
bucket.AbortMultipartUpload(imur)
return err
}
if completed >= len(chunks) {
break
}
}
event = newProgressEvent(TransferStartedEvent, completedBytes, totalBytes)
publishProgress(listener, event)
// Complete the multpart upload
_, err = bucket.CompleteMultipartUpload(imur, parts)
if err != nil {
bucket.AbortMultipartUpload(imur)
return err
}
return nil
}
// ----- concurrent upload with checkpoint -----
const uploadCpMagic = "FE8BB4EA-B593-4FAC-AD7A-2459A36E2E62"
type uploadCheckpoint struct {
Magic string // Magic
MD5 string // Checkpoint file content's MD5
FilePath string // Local file path
FileStat cpStat // File state
ObjectKey string // Key
UploadID string // Upload ID
Parts []cpPart // All parts of the local file
}
type cpStat struct {
Size int64 // File size
LastModified time.Time // File's last modified time
MD5 string // Local file's MD5
}
type cpPart struct {
Chunk FileChunk // File chunk
Part UploadPart // Uploaded part
IsCompleted bool // Upload complete flag
}
// isValid checks if the uploaded data is valid---it's valid when the file is not updated and the checkpoint data is valid.
func (cp uploadCheckpoint) isValid(filePath string) (bool, error) {
// Compare the CP's magic number and MD5.
cpb := cp
cpb.MD5 = ""
js, _ := json.Marshal(cpb)
sum := md5.Sum(js)
b64 := base64.StdEncoding.EncodeToString(sum[:])
if cp.Magic != uploadCpMagic || b64 != cp.MD5 {
return false, nil
}
// Make sure if the local file is updated.
fd, err := os.Open(filePath)
if err != nil {
return false, err
}
defer fd.Close()
st, err := fd.Stat()
if err != nil {
return false, err
}
md, err := calcFileMD5(filePath)
if err != nil {
return false, err
}
// Compare the file size, file's last modified time and file's MD5
if cp.FileStat.Size != st.Size() ||
cp.FileStat.LastModified != st.ModTime() ||
cp.FileStat.MD5 != md {
return false, nil
}
return true, nil
}
// load loads from the file
func (cp *uploadCheckpoint) load(filePath string) error {
contents, err := ioutil.ReadFile(filePath)
if err != nil {
return err
}
err = json.Unmarshal(contents, cp)
return err
}
// dump dumps to the local file
func (cp *uploadCheckpoint) dump(filePath string) error {
bcp := *cp
// Calculate MD5
bcp.MD5 = ""
js, err := json.Marshal(bcp)
if err != nil {
return err
}
sum := md5.Sum(js)
b64 := base64.StdEncoding.EncodeToString(sum[:])
bcp.MD5 = b64
// Serialization
js, err = json.Marshal(bcp)
if err != nil {
return err
}
// Dump
return ioutil.WriteFile(filePath, js, FilePermMode)
}
// updatePart updates the part status
func (cp *uploadCheckpoint) updatePart(part UploadPart) {
cp.Parts[part.PartNumber-1].Part = part
cp.Parts[part.PartNumber-1].IsCompleted = true
}
// todoParts returns unfinished parts
func (cp *uploadCheckpoint) todoParts() []FileChunk {
fcs := []FileChunk{}
for _, part := range cp.Parts {
if !part.IsCompleted {
fcs = append(fcs, part.Chunk)
}
}
return fcs
}
// allParts returns all parts
func (cp *uploadCheckpoint) allParts() []UploadPart {
ps := []UploadPart{}
for _, part := range cp.Parts {
ps = append(ps, part.Part)
}
return ps
}
// getCompletedBytes returns completed bytes count
func (cp *uploadCheckpoint) getCompletedBytes() int64 {
var completedBytes int64
for _, part := range cp.Parts {
if part.IsCompleted {
completedBytes += part.Chunk.Size
}
}
return completedBytes
}
// calcFileMD5 calculates the MD5 for the specified local file
func calcFileMD5(filePath string) (string, error) {
return "", nil
}
// prepare initializes the multipart upload
func prepare(cp *uploadCheckpoint, objectKey, filePath string, partSize int64, bucket *Bucket, options []Option) error {
// CP
cp.Magic = uploadCpMagic
cp.FilePath = filePath
cp.ObjectKey = objectKey
// Local file
fd, err := os.Open(filePath)
if err != nil {
return err
}
defer fd.Close()
st, err := fd.Stat()
if err != nil {
return err
}
cp.FileStat.Size = st.Size()
cp.FileStat.LastModified = st.ModTime()
md, err := calcFileMD5(filePath)
if err != nil {
return err
}
cp.FileStat.MD5 = md
// Chunks
parts, err := SplitFileByPartSize(filePath, partSize)
if err != nil {
return err
}
cp.Parts = make([]cpPart, len(parts))
for i, part := range parts {
cp.Parts[i].Chunk = part
cp.Parts[i].IsCompleted = false
}
// Init load
imur, err := bucket.InitiateMultipartUpload(objectKey, options...)
if err != nil {
return err
}
cp.UploadID = imur.UploadID
return nil
}
// complete completes the multipart upload and deletes the local CP files
func complete(cp *uploadCheckpoint, bucket *Bucket, parts []UploadPart, cpFilePath string) error {
imur := InitiateMultipartUploadResult{Bucket: bucket.BucketName,
Key: cp.ObjectKey, UploadID: cp.UploadID}
_, err := bucket.CompleteMultipartUpload(imur, parts)
if err != nil {
return err
}
os.Remove(cpFilePath)
return err
}
// uploadFileWithCp handles concurrent upload with checkpoint
func (bucket Bucket) uploadFileWithCp(objectKey, filePath string, partSize int64, options []Option, cpFilePath string, routines int) error {
listener := getProgressListener(options)
// Load CP data
ucp := uploadCheckpoint{}
err := ucp.load(cpFilePath)
if err != nil {
os.Remove(cpFilePath)
}
// Load error or the CP data is invalid.
valid, err := ucp.isValid(filePath)
if err != nil || !valid {
if err = prepare(&ucp, objectKey, filePath, partSize, &bucket, options); err != nil {
return err
}
os.Remove(cpFilePath)
}
chunks := ucp.todoParts()
imur := InitiateMultipartUploadResult{
Bucket: bucket.BucketName,
Key: objectKey,
UploadID: ucp.UploadID}
jobs := make(chan FileChunk, len(chunks))
results := make(chan UploadPart, len(chunks))
failed := make(chan error)
die := make(chan bool)
completedBytes := ucp.getCompletedBytes()
event := newProgressEvent(TransferStartedEvent, completedBytes, ucp.FileStat.Size)
publishProgress(listener, event)
// Start the workers
arg := workerArg{&bucket, filePath, imur, uploadPartHooker}
for w := 1; w <= routines; w++ {
go worker(w, arg, jobs, results, failed, die)
}
// Schedule jobs
go scheduler(jobs, chunks)
// Waiting for the job finished
completed := 0
for completed < len(chunks) {
select {
case part := <-results:
completed++
ucp.updatePart(part)
ucp.dump(cpFilePath)
completedBytes += ucp.Parts[part.PartNumber-1].Chunk.Size
event = newProgressEvent(TransferDataEvent, completedBytes, ucp.FileStat.Size)
publishProgress(listener, event)
case err := <-failed:
close(die)
event = newProgressEvent(TransferFailedEvent, completedBytes, ucp.FileStat.Size)
publishProgress(listener, event)
return err
}
if completed >= len(chunks) {
break
}
}
event = newProgressEvent(TransferCompletedEvent, completedBytes, ucp.FileStat.Size)
publishProgress(listener, event)
// Complete the multipart upload
err = complete(&ucp, &bucket, ucp.allParts(), cpFilePath)
return err
}

265
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/utils.go generated vendored Normal file
View File

@ -0,0 +1,265 @@
package oss
import (
"bytes"
"errors"
"fmt"
"hash/crc64"
"net/http"
"os"
"os/exec"
"runtime"
"strconv"
"strings"
"time"
)
// userAgent gets user agent
// It has the SDK version information, OS information and GO version
var userAgent = func() string {
sys := getSysInfo()
return fmt.Sprintf("aliyun-sdk-go/%s (%s/%s/%s;%s)", Version, sys.name,
sys.release, sys.machine, runtime.Version())
}()
type sysInfo struct {
name string // OS name such as windows/Linux
release string // OS version 2.6.32-220.23.2.ali1089.el5.x86_64 etc
machine string // CPU type amd64/x86_64
}
// getSysInfo gets system info
// gets the OS information and CPU type
func getSysInfo() sysInfo {
name := runtime.GOOS
release := "-"
machine := runtime.GOARCH
if out, err := exec.Command("uname", "-s").CombinedOutput(); err == nil {
name = string(bytes.TrimSpace(out))
}
if out, err := exec.Command("uname", "-r").CombinedOutput(); err == nil {
release = string(bytes.TrimSpace(out))
}
if out, err := exec.Command("uname", "-m").CombinedOutput(); err == nil {
machine = string(bytes.TrimSpace(out))
}
return sysInfo{name: name, release: release, machine: machine}
}
// unpackedRange
type unpackedRange struct {
hasStart bool // Flag indicates if the start point is specified
hasEnd bool // Flag indicates if the end point is specified
start int64 // Start point
end int64 // End point
}
// invalidRangeError returns invalid range error
func invalidRangeError(r string) error {
return fmt.Errorf("InvalidRange %s", r)
}
// parseRange parse various styles of range such as bytes=M-N
func parseRange(normalizedRange string) (*unpackedRange, error) {
var err error
hasStart := false
hasEnd := false
var start int64
var end int64
// Bytes==M-N or ranges=M-N
nrSlice := strings.Split(normalizedRange, "=")
if len(nrSlice) != 2 || nrSlice[0] != "bytes" {
return nil, invalidRangeError(normalizedRange)
}
// Bytes=M-N,X-Y
rSlice := strings.Split(nrSlice[1], ",")
rStr := rSlice[0]
if strings.HasSuffix(rStr, "-") { // M-
startStr := rStr[:len(rStr)-1]
start, err = strconv.ParseInt(startStr, 10, 64)
if err != nil {
return nil, invalidRangeError(normalizedRange)
}
hasStart = true
} else if strings.HasPrefix(rStr, "-") { // -N
len := rStr[1:]
end, err = strconv.ParseInt(len, 10, 64)
if err != nil {
return nil, invalidRangeError(normalizedRange)
}
if end == 0 { // -0
return nil, invalidRangeError(normalizedRange)
}
hasEnd = true
} else { // M-N
valSlice := strings.Split(rStr, "-")
if len(valSlice) != 2 {
return nil, invalidRangeError(normalizedRange)
}
start, err = strconv.ParseInt(valSlice[0], 10, 64)
if err != nil {
return nil, invalidRangeError(normalizedRange)
}
hasStart = true
end, err = strconv.ParseInt(valSlice[1], 10, 64)
if err != nil {
return nil, invalidRangeError(normalizedRange)
}
hasEnd = true
}
return &unpackedRange{hasStart, hasEnd, start, end}, nil
}
// adjustRange returns adjusted range, adjust the range according to the length of the file
func adjustRange(ur *unpackedRange, size int64) (start, end int64) {
if ur == nil {
return 0, size
}
if ur.hasStart && ur.hasEnd {
start = ur.start
end = ur.end + 1
if ur.start < 0 || ur.start >= size || ur.end > size || ur.start > ur.end {
start = 0
end = size
}
} else if ur.hasStart {
start = ur.start
end = size
if ur.start < 0 || ur.start >= size {
start = 0
}
} else if ur.hasEnd {
start = size - ur.end
end = size
if ur.end < 0 || ur.end > size {
start = 0
end = size
}
}
return
}
// GetNowSec returns Unix time, the number of seconds elapsed since January 1, 1970 UTC.
// gets the current time in Unix time, in seconds.
func GetNowSec() int64 {
return time.Now().Unix()
}
// GetNowNanoSec returns t as a Unix time, the number of nanoseconds elapsed
// since January 1, 1970 UTC. The result is undefined if the Unix time
// in nanoseconds cannot be represented by an int64. Note that this
// means the result of calling UnixNano on the zero Time is undefined.
// gets the current time in Unix time, in nanoseconds.
func GetNowNanoSec() int64 {
return time.Now().UnixNano()
}
// GetNowGMT gets the current time in GMT format.
func GetNowGMT() string {
return time.Now().UTC().Format(http.TimeFormat)
}
// FileChunk is the file chunk definition
type FileChunk struct {
Number int // Chunk number
Offset int64 // Chunk offset
Size int64 // Chunk size.
}
// SplitFileByPartNum splits big file into parts by the num of parts.
// Split the file with specified parts count, returns the split result when error is nil.
func SplitFileByPartNum(fileName string, chunkNum int) ([]FileChunk, error) {
if chunkNum <= 0 || chunkNum > 10000 {
return nil, errors.New("chunkNum invalid")
}
file, err := os.Open(fileName)
if err != nil {
return nil, err
}
defer file.Close()
stat, err := file.Stat()
if err != nil {
return nil, err
}
if int64(chunkNum) > stat.Size() {
return nil, errors.New("oss: chunkNum invalid")
}
var chunks []FileChunk
var chunk = FileChunk{}
var chunkN = (int64)(chunkNum)
for i := int64(0); i < chunkN; i++ {
chunk.Number = int(i + 1)
chunk.Offset = i * (stat.Size() / chunkN)
if i == chunkN-1 {
chunk.Size = stat.Size()/chunkN + stat.Size()%chunkN
} else {
chunk.Size = stat.Size() / chunkN
}
chunks = append(chunks, chunk)
}
return chunks, nil
}
// SplitFileByPartSize splits big file into parts by the size of parts.
// Splits the file by the part size. Returns the FileChunk when error is nil.
func SplitFileByPartSize(fileName string, chunkSize int64) ([]FileChunk, error) {
if chunkSize <= 0 {
return nil, errors.New("chunkSize invalid")
}
file, err := os.Open(fileName)
if err != nil {
return nil, err
}
defer file.Close()
stat, err := file.Stat()
if err != nil {
return nil, err
}
var chunkN = stat.Size() / chunkSize
if chunkN >= 10000 {
return nil, errors.New("Too many parts, please increase part size")
}
var chunks []FileChunk
var chunk = FileChunk{}
for i := int64(0); i < chunkN; i++ {
chunk.Number = int(i + 1)
chunk.Offset = i * chunkSize
chunk.Size = chunkSize
chunks = append(chunks, chunk)
}
if stat.Size()%chunkSize > 0 {
chunk.Number = len(chunks) + 1
chunk.Offset = int64(len(chunks)) * chunkSize
chunk.Size = stat.Size() % chunkSize
chunks = append(chunks, chunk)
}
return chunks, nil
}
// GetPartEnd calculates the end position
func GetPartEnd(begin int64, total int64, per int64) int64 {
if begin+per > total {
return total - 1
}
return begin + per - 1
}
// crcTable returns the table constructed from the specified polynomial
var crcTable = func() *crc64.Table {
return crc64.MakeTable(crc64.ECMA)
}

View File

@ -0,0 +1,66 @@
---
layout: "docs"
page_title: "Alicloud OSS - Storage Backends - Configuration"
sidebar_current: "docs-configuration-storage-alicloudoss"
description: |-
The Alicloud OSS storage backend is used to persist Vault's data in
an Alicloud OSS bucket.
---
# Alicloud OSS Storage Backend
The Alicloud OSS storage backend is used to persist Vault's data in
an [Alicloud OSS][alicloudoss] bucket.
- **No High Availability** the Alicloud OSS storage backend does not support
high availability.
- **Community Supported** the Alicloud OSS storage backend is supported by the
community. While it has undergone review by HashiCorp employees, they may not
be as knowledgeable about the technology. If you encounter problems with them,
you may be referred to the original author.
```hcl
storage "alicloudoss" {
access_key = "abcd1234"
secret_key = "defg5678"
endpoint = "oss-us-west-1.aliyuncs.com"
bucket = "my-bucket"
}
```
## `alicloudoss` Parameters
- `bucket` `(string: <required>)` Specifies the name of the OSS bucket. This
can also be provided via the environment variable `ALICLOUD_OSS_BUCKET`.
- `endpoint` `(string: "")`  Specifies the OSS endpoint. This can also be
provided via the environment variable `ALICLOUD_OSS_ENDPOINT`.
The following settings are used for authenticating to Alicloud.
- `access_key` Specifies the Alicloud access key. This can also be provided via
the environment variable `ALICLOUD_ACCESS_KEY`.
- `secret_key` Specifies the Alicloud secret key. This can also be provided via
the environment variable `ALICLOUD_SECRET_KEY`.
- `max_parallel` `(string: "128")` Specifies the maximum number of concurrent
requests to Alicloud OSS.
## `alicloudoss` Examples
### Default Example
This example shows using Alicloud OSS as a storage backend.
```hcl
storage "alicloudoss" {
access_key = "abcd1234"
secret_key = "defg5678"
endpoint = "oss-us-west-1.aliyuncs.com"
bucket = "my-bucket"
}
```
[alicloudoss]: https://www.alibabacloud.com/product/oss