From 175e3cc354f294800d63e3a896f5609582a2fe3a Mon Sep 17 00:00:00 2001 From: Tobias Haag Date: Wed, 24 Feb 2016 12:14:23 +0000 Subject: [PATCH] added Azure backend support updated Godeps added website docs updated vendor --- Godeps/Godeps.json | 10 +- physical/azure.go | 144 +++ physical/azure_test.go | 41 + physical/physical.go | 1 + .../github.com/Azure/azure-sdk-for-go/LICENSE | 202 ++++ .../Azure/azure-sdk-for-go/storage/blob.go | 1002 +++++++++++++++++ .../azure-sdk-for-go/storage/blob_test.go | 768 +++++++++++++ .../Azure/azure-sdk-for-go/storage/client.go | 392 +++++++ .../azure-sdk-for-go/storage/client_test.go | 156 +++ .../Azure/azure-sdk-for-go/storage/file.go | 91 ++ .../azure-sdk-for-go/storage/file_test.go | 63 ++ .../Azure/azure-sdk-for-go/storage/queue.go | 306 +++++ .../azure-sdk-for-go/storage/queue_test.go | 132 +++ .../Azure/azure-sdk-for-go/storage/util.go | 71 ++ .../azure-sdk-for-go/storage/util_test.go | 69 ++ vendor/vendor.json | 13 + website/source/docs/config/index.html.md | 13 +- 17 files changed, 3472 insertions(+), 2 deletions(-) create mode 100644 physical/azure.go create mode 100644 physical/azure_test.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/LICENSE create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/blob_test.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/client_test.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/file.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/file_test.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/queue_test.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/util.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/util_test.go create mode 100644 vendor/vendor.json diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 30096718d..fb0a34a45 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -483,6 +483,14 @@ { "ImportPath": "gopkg.in/inf.v0", "Rev": "3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4" - } + }, + { + "ImportPath": "gopkg.in/yaml.v2", + "Rev": "f7716cbe52baa25d2e9b0d0da546fcf909fc16b4" + }, + { + "ImportPath": "github.com/Azure/azure-sdk-for-go/storage", + "Rev": "fd432c66ed19fa59ea411cdde8ca3a239745bcc4" + } ] } diff --git a/physical/azure.go b/physical/azure.go new file mode 100644 index 000000000..67b7131f8 --- /dev/null +++ b/physical/azure.go @@ -0,0 +1,144 @@ +package physical + +import ( + "encoding/base64" + "fmt" + "github.com/Azure/azure-sdk-for-go/storage" + "github.com/armon/go-metrics" + "io/ioutil" + "os" + "sort" + "strings" + "time" +) + +// MaxBlobSize at this time +var MaxBlobSize = 1024 * 1024 * 4 + +// AzureBackend is a physical backend that stores data +// within an Azure blob container. +type AzureBackend struct { + container string + client storage.BlobStorageClient +} + +// newS3Backend constructs a S3 backend using a pre-existing +// bucket. Credentials can be provided to the backend, sourced +// from the environment, AWS credential files or by IAM role. +func newAzureBackend(conf map[string]string) (Backend, error) { + + container := os.Getenv("AZURE_BLOB_CONTAINER") + if container == "" { + container = conf["container"] + if container == "" { + return nil, fmt.Errorf("'container' must be set") + } + } + + accountName := os.Getenv("AZURE_ACCOUNT_NAME") + if accountName == "" { + accountName = conf["accountName"] + if accountName == "" { + return nil, fmt.Errorf("'accountName' must be set") + } + } + + accountKey := os.Getenv("AZURE_ACCOUNT_KEY") + if accountKey == "" { + accountKey = conf["accountKey"] + if accountKey == "" { + return nil, fmt.Errorf("'accountKey' must be set") + } + } + + client, err := storage.NewBasicClient(accountName, accountKey) + + if err != nil { + return nil, fmt.Errorf("Failed to create Azure client: %v", err) + } + + client.GetBlobService().CreateContainerIfNotExists(container, storage.ContainerAccessTypePrivate) + + a := &AzureBackend{ + container: container, + client: client.GetBlobService(), + } + return a, nil +} + +// Put is used to insert or update an entry +func (a *AzureBackend) Put(entry *Entry) error { + defer metrics.MeasureSince([]string{"azure", "put"}, time.Now()) + + if len(entry.Value) >= MaxBlobSize { + return fmt.Errorf("Value is bigger than the current supported limit of 4MBytes") + } + + blockID := base64.StdEncoding.EncodeToString([]byte("AAAA")) + blocks := make([]storage.Block, 1) + blocks[0] = storage.Block{ID: blockID, Status: storage.BlockStatusLatest} + + err := a.client.PutBlock(a.container, entry.Key, blockID, entry.Value) + + err = a.client.PutBlockList(a.container, entry.Key, blocks) + return err +} + +// Get is used to fetch an entry +func (a *AzureBackend) Get(key string) (*Entry, error) { + defer metrics.MeasureSince([]string{"azure", "get"}, time.Now()) + + exists, _ := a.client.BlobExists(a.container, key) + + if !exists { + return nil, nil + } + + reader, err := a.client.GetBlob(a.container, key) + + if err != nil { + return nil, err + } + + data, err := ioutil.ReadAll(reader) + + ent := &Entry{ + Key: key, + Value: data, + } + + return ent, err +} + +// Delete is used to permanently delete an entry +func (a *AzureBackend) Delete(key string) error { + defer metrics.MeasureSince([]string{"azure", "delete"}, time.Now()) + _, err := a.client.DeleteBlobIfExists(a.container, key) + return err +} + +// List is used to list all the keys under a given +// prefix, up to the next prefix. +func (a *AzureBackend) List(prefix string) ([]string, error) { + defer metrics.MeasureSince([]string{"azure", "list"}, time.Now()) + + list, err := a.client.ListBlobs(a.container, storage.ListBlobsParameters{Prefix: prefix}) + + if err != nil { + // Break early. + return nil, err + } + + keys := []string{} + for _, blob := range list.Blobs { + key := strings.TrimPrefix(blob.Name, prefix) + if i := strings.Index(key, "/"); i == -1 { + keys = append(keys, key) + } else { + keys = appendIfMissing(keys, key[:i+1]) + } + } + + sort.Strings(keys) + return keys, nil +} diff --git a/physical/azure_test.go b/physical/azure_test.go new file mode 100644 index 000000000..3721696b2 --- /dev/null +++ b/physical/azure_test.go @@ -0,0 +1,41 @@ +package physical + +import ( + "fmt" + "github.com/Azure/azure-sdk-for-go/storage" + "os" + "testing" + "time" +) + +func TestAzureBackend(t *testing.T) { + if os.Getenv("AZURE_ACCOUNT_NAME") == "" || + os.Getenv("AZURE_ACCOUNT_KEY") == "" { + t.SkipNow() + } + + accountName := os.Getenv("AZURE_ACCOUNT_NAME") + accountKey := os.Getenv("AZURE_ACCOUNT_KEY") + + ts := time.Now().UnixNano() + container := fmt.Sprintf("vault-test-%d", ts) + + cleanupClient, _ := storage.NewBasicClient(accountName, accountKey) + + backend, err := NewBackend("azure", map[string]string{ + "container": container, + "accountName": accountName, + "accountKey": accountKey, + }) + + defer func() { + cleanupClient.GetBlobService().DeleteContainerIfExists(container) + }() + + if err != nil { + t.Fatalf("err: %s", err) + } + + testBackend(t, backend) + testBackend_ListPrefix(t, backend) +} diff --git a/physical/physical.go b/physical/physical.go index c0da2a839..8f28f00a7 100644 --- a/physical/physical.go +++ b/physical/physical.go @@ -85,6 +85,7 @@ var BuiltinBackends = map[string]Factory{ "zookeeper": newZookeeperBackend, "file": newFileBackend, "s3": newS3Backend, + "azure": newAzureBackend, "dynamodb": newDynamoDBBackend, "etcd": newEtcdBackend, "mysql": newMySQLBackend, diff --git a/vendor/github.com/Azure/azure-sdk-for-go/LICENSE b/vendor/github.com/Azure/azure-sdk-for-go/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go new file mode 100644 index 000000000..dc7bfdca0 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go @@ -0,0 +1,1002 @@ +package storage + +import ( + "bytes" + "encoding/xml" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + "time" +) + +// BlobStorageClient contains operations for Microsoft Azure Blob Storage +// Service. +type BlobStorageClient struct { + client Client +} + +// A Container is an entry in ContainerListResponse. +type Container struct { + Name string `xml:"Name"` + Properties ContainerProperties `xml:"Properties"` + // TODO (ahmetalpbalkan) Metadata +} + +// ContainerProperties contains various properties of a container returned from +// various endpoints like ListContainers. +type ContainerProperties struct { + LastModified string `xml:"Last-Modified"` + Etag string `xml:"Etag"` + LeaseStatus string `xml:"LeaseStatus"` + LeaseState string `xml:"LeaseState"` + LeaseDuration string `xml:"LeaseDuration"` + // TODO (ahmetalpbalkan) remaining fields +} + +// ContainerListResponse contains the response fields from +// ListContainers call. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx +type ContainerListResponse struct { + XMLName xml.Name `xml:"EnumerationResults"` + Xmlns string `xml:"xmlns,attr"` + Prefix string `xml:"Prefix"` + Marker string `xml:"Marker"` + NextMarker string `xml:"NextMarker"` + MaxResults int64 `xml:"MaxResults"` + Containers []Container `xml:"Containers>Container"` +} + +// A Blob is an entry in BlobListResponse. +type Blob struct { + Name string `xml:"Name"` + Properties BlobProperties `xml:"Properties"` + // TODO (ahmetalpbalkan) Metadata +} + +// BlobProperties contains various properties of a blob +// returned in various endpoints like ListBlobs or GetBlobProperties. +type BlobProperties struct { + LastModified string `xml:"Last-Modified"` + Etag string `xml:"Etag"` + ContentMD5 string `xml:"Content-MD5"` + ContentLength int64 `xml:"Content-Length"` + ContentType string `xml:"Content-Type"` + ContentEncoding string `xml:"Content-Encoding"` + BlobType BlobType `xml:"x-ms-blob-blob-type"` + SequenceNumber int64 `xml:"x-ms-blob-sequence-number"` + CopyID string `xml:"CopyId"` + CopyStatus string `xml:"CopyStatus"` + CopySource string `xml:"CopySource"` + CopyProgress string `xml:"CopyProgress"` + CopyCompletionTime string `xml:"CopyCompletionTime"` + CopyStatusDescription string `xml:"CopyStatusDescription"` +} + +// BlobListResponse contains the response fields from ListBlobs call. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx +type BlobListResponse struct { + XMLName xml.Name `xml:"EnumerationResults"` + Xmlns string `xml:"xmlns,attr"` + Prefix string `xml:"Prefix"` + Marker string `xml:"Marker"` + NextMarker string `xml:"NextMarker"` + MaxResults int64 `xml:"MaxResults"` + Blobs []Blob `xml:"Blobs>Blob"` +} + +// ListContainersParameters defines the set of customizable parameters to make a +// List Containers call. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx +type ListContainersParameters struct { + Prefix string + Marker string + Include string + MaxResults uint + Timeout uint +} + +func (p ListContainersParameters) getParameters() url.Values { + out := url.Values{} + + if p.Prefix != "" { + out.Set("prefix", p.Prefix) + } + if p.Marker != "" { + out.Set("marker", p.Marker) + } + if p.Include != "" { + out.Set("include", p.Include) + } + if p.MaxResults != 0 { + out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults)) + } + if p.Timeout != 0 { + out.Set("timeout", fmt.Sprintf("%v", p.Timeout)) + } + + return out +} + +// ListBlobsParameters defines the set of customizable +// parameters to make a List Blobs call. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx +type ListBlobsParameters struct { + Prefix string + Delimiter string + Marker string + Include string + MaxResults uint + Timeout uint +} + +func (p ListBlobsParameters) getParameters() url.Values { + out := url.Values{} + + if p.Prefix != "" { + out.Set("prefix", p.Prefix) + } + if p.Delimiter != "" { + out.Set("delimiter", p.Delimiter) + } + if p.Marker != "" { + out.Set("marker", p.Marker) + } + if p.Include != "" { + out.Set("include", p.Include) + } + if p.MaxResults != 0 { + out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults)) + } + if p.Timeout != 0 { + out.Set("timeout", fmt.Sprintf("%v", p.Timeout)) + } + + return out +} + +// BlobType defines the type of the Azure Blob. +type BlobType string + +// Types of page blobs +const ( + BlobTypeBlock BlobType = "BlockBlob" + BlobTypePage BlobType = "PageBlob" + BlobTypeAppend BlobType = "AppendBlob" +) + +// PageWriteType defines the type updates that are going to be +// done on the page blob. +type PageWriteType string + +// Types of operations on page blobs +const ( + PageWriteTypeUpdate PageWriteType = "update" + PageWriteTypeClear PageWriteType = "clear" +) + +const ( + blobCopyStatusPending = "pending" + blobCopyStatusSuccess = "success" + blobCopyStatusAborted = "aborted" + blobCopyStatusFailed = "failed" +) + +// BlockListType is used to filter out types of blocks in a Get Blocks List call +// for a block blob. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx for all +// block types. +type BlockListType string + +// Filters for listing blocks in block blobs +const ( + BlockListTypeAll BlockListType = "all" + BlockListTypeCommitted BlockListType = "committed" + BlockListTypeUncommitted BlockListType = "uncommitted" +) + +// ContainerAccessType defines the access level to the container from a public +// request. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx and "x-ms- +// blob-public-access" header. +type ContainerAccessType string + +// Access options for containers +const ( + ContainerAccessTypePrivate ContainerAccessType = "" + ContainerAccessTypeBlob ContainerAccessType = "blob" + ContainerAccessTypeContainer ContainerAccessType = "container" +) + +// Maximum sizes (per REST API) for various concepts +const ( + MaxBlobBlockSize = 4 * 1024 * 1024 + MaxBlobPageSize = 4 * 1024 * 1024 +) + +// BlockStatus defines states a block for a block blob can +// be in. +type BlockStatus string + +// List of statuses that can be used to refer to a block in a block list +const ( + BlockStatusUncommitted BlockStatus = "Uncommitted" + BlockStatusCommitted BlockStatus = "Committed" + BlockStatusLatest BlockStatus = "Latest" +) + +// Block is used to create Block entities for Put Block List +// call. +type Block struct { + ID string + Status BlockStatus +} + +// BlockListResponse contains the response fields from Get Block List call. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx +type BlockListResponse struct { + XMLName xml.Name `xml:"BlockList"` + CommittedBlocks []BlockResponse `xml:"CommittedBlocks>Block"` + UncommittedBlocks []BlockResponse `xml:"UncommittedBlocks>Block"` +} + +// BlockResponse contains the block information returned +// in the GetBlockListCall. +type BlockResponse struct { + Name string `xml:"Name"` + Size int64 `xml:"Size"` +} + +// GetPageRangesResponse contains the reponse fields from +// Get Page Ranges call. +// +// See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx +type GetPageRangesResponse struct { + XMLName xml.Name `xml:"PageList"` + PageList []PageRange `xml:"PageRange"` +} + +// PageRange contains information about a page of a page blob from +// Get Pages Range call. +// +// See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx +type PageRange struct { + Start int64 `xml:"Start"` + End int64 `xml:"End"` +} + +var ( + errBlobCopyAborted = errors.New("storage: blob copy is aborted") + errBlobCopyIDMismatch = errors.New("storage: blob copy id is a mismatch") +) + +// ListContainers returns the list of containers in a storage account along with +// pagination token and other response details. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx +func (b BlobStorageClient) ListContainers(params ListContainersParameters) (ContainerListResponse, error) { + q := mergeParams(params.getParameters(), url.Values{"comp": {"list"}}) + uri := b.client.getEndpoint(blobServiceName, "", q) + headers := b.client.getStandardHeaders() + + var out ContainerListResponse + resp, err := b.client.exec("GET", uri, headers, nil) + if err != nil { + return out, err + } + defer resp.body.Close() + + err = xmlUnmarshal(resp.body, &out) + return out, err +} + +// CreateContainer creates a blob container within the storage account +// with given name and access level. Returns error if container already exists. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx +func (b BlobStorageClient) CreateContainer(name string, access ContainerAccessType) error { + resp, err := b.createContainer(name, access) + if err != nil { + return err + } + defer resp.body.Close() + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + +// CreateContainerIfNotExists creates a blob container if it does not exist. Returns +// true if container is newly created or false if container already exists. +func (b BlobStorageClient) CreateContainerIfNotExists(name string, access ContainerAccessType) (bool, error) { + resp, err := b.createContainer(name, access) + if resp != nil { + defer resp.body.Close() + if resp.statusCode == http.StatusCreated || resp.statusCode == http.StatusConflict { + return resp.statusCode == http.StatusCreated, nil + } + } + return false, err +} + +func (b BlobStorageClient) createContainer(name string, access ContainerAccessType) (*storageResponse, error) { + verb := "PUT" + uri := b.client.getEndpoint(blobServiceName, pathForContainer(name), url.Values{"restype": {"container"}}) + + headers := b.client.getStandardHeaders() + if access != "" { + headers["x-ms-blob-public-access"] = string(access) + } + return b.client.exec(verb, uri, headers, nil) +} + +// ContainerExists returns true if a container with given name exists +// on the storage account, otherwise returns false. +func (b BlobStorageClient) ContainerExists(name string) (bool, error) { + verb := "HEAD" + uri := b.client.getEndpoint(blobServiceName, pathForContainer(name), url.Values{"restype": {"container"}}) + headers := b.client.getStandardHeaders() + + resp, err := b.client.exec(verb, uri, headers, nil) + if resp != nil { + defer resp.body.Close() + if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound { + return resp.statusCode == http.StatusOK, nil + } + } + return false, err +} + +// DeleteContainer deletes the container with given name on the storage +// account. If the container does not exist returns error. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179408.aspx +func (b BlobStorageClient) DeleteContainer(name string) error { + resp, err := b.deleteContainer(name) + if err != nil { + return err + } + defer resp.body.Close() + return checkRespCode(resp.statusCode, []int{http.StatusAccepted}) +} + +// DeleteContainerIfExists deletes the container with given name on the storage +// account if it exists. Returns true if container is deleted with this call, or +// false if the container did not exist at the time of the Delete Container +// operation. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179408.aspx +func (b BlobStorageClient) DeleteContainerIfExists(name string) (bool, error) { + resp, err := b.deleteContainer(name) + if resp != nil { + defer resp.body.Close() + if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound { + return resp.statusCode == http.StatusAccepted, nil + } + } + return false, err +} + +func (b BlobStorageClient) deleteContainer(name string) (*storageResponse, error) { + verb := "DELETE" + uri := b.client.getEndpoint(blobServiceName, pathForContainer(name), url.Values{"restype": {"container"}}) + + headers := b.client.getStandardHeaders() + return b.client.exec(verb, uri, headers, nil) +} + +// ListBlobs returns an object that contains list of blobs in the container, +// pagination token and other information in the response of List Blobs call. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx +func (b BlobStorageClient) ListBlobs(container string, params ListBlobsParameters) (BlobListResponse, error) { + q := mergeParams(params.getParameters(), url.Values{ + "restype": {"container"}, + "comp": {"list"}}) + uri := b.client.getEndpoint(blobServiceName, pathForContainer(container), q) + headers := b.client.getStandardHeaders() + + var out BlobListResponse + resp, err := b.client.exec("GET", uri, headers, nil) + if err != nil { + return out, err + } + defer resp.body.Close() + + err = xmlUnmarshal(resp.body, &out) + return out, err +} + +// BlobExists returns true if a blob with given name exists on the specified +// container of the storage account. +func (b BlobStorageClient) BlobExists(container, name string) (bool, error) { + verb := "HEAD" + uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) + + headers := b.client.getStandardHeaders() + resp, err := b.client.exec(verb, uri, headers, nil) + if resp != nil { + defer resp.body.Close() + if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound { + return resp.statusCode == http.StatusOK, nil + } + } + return false, err +} + +// GetBlobURL gets the canonical URL to the blob with the specified name in the +// specified container. This method does not create a publicly accessible URL if +// the blob or container is private and this method does not check if the blob +// exists. +func (b BlobStorageClient) GetBlobURL(container, name string) string { + if container == "" { + container = "$root" + } + return b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) +} + +// GetBlob returns a stream to read the blob. Caller must call Close() the +// reader to close on the underlying connection. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179440.aspx +func (b BlobStorageClient) GetBlob(container, name string) (io.ReadCloser, error) { + resp, err := b.getBlobRange(container, name, "") + if err != nil { + return nil, err + } + + if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + return nil, err + } + return resp.body, nil +} + +// GetBlobRange reads the specified range of a blob to a stream. The bytesRange +// string must be in a format like "0-", "10-100" as defined in HTTP 1.1 spec. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179440.aspx +func (b BlobStorageClient) GetBlobRange(container, name, bytesRange string) (io.ReadCloser, error) { + resp, err := b.getBlobRange(container, name, bytesRange) + if err != nil { + return nil, err + } + + if err := checkRespCode(resp.statusCode, []int{http.StatusPartialContent}); err != nil { + return nil, err + } + return resp.body, nil +} + +func (b BlobStorageClient) getBlobRange(container, name, bytesRange string) (*storageResponse, error) { + verb := "GET" + uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) + + headers := b.client.getStandardHeaders() + if bytesRange != "" { + headers["Range"] = fmt.Sprintf("bytes=%s", bytesRange) + } + resp, err := b.client.exec(verb, uri, headers, nil) + if err != nil { + return nil, err + } + return resp, err +} + +// GetBlobProperties provides various information about the specified +// blob. See https://msdn.microsoft.com/en-us/library/azure/dd179394.aspx +func (b BlobStorageClient) GetBlobProperties(container, name string) (*BlobProperties, error) { + verb := "HEAD" + uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) + + headers := b.client.getStandardHeaders() + resp, err := b.client.exec(verb, uri, headers, nil) + if err != nil { + return nil, err + } + defer resp.body.Close() + + if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + return nil, err + } + + var contentLength int64 + contentLengthStr := resp.headers.Get("Content-Length") + if contentLengthStr != "" { + contentLength, err = strconv.ParseInt(contentLengthStr, 0, 64) + if err != nil { + return nil, err + } + } + + var sequenceNum int64 + sequenceNumStr := resp.headers.Get("x-ms-blob-sequence-number") + if sequenceNumStr != "" { + sequenceNum, err = strconv.ParseInt(sequenceNumStr, 0, 64) + if err != nil { + return nil, err + } + } + + return &BlobProperties{ + LastModified: resp.headers.Get("Last-Modified"), + Etag: resp.headers.Get("Etag"), + ContentMD5: resp.headers.Get("Content-MD5"), + ContentLength: contentLength, + ContentEncoding: resp.headers.Get("Content-Encoding"), + SequenceNumber: sequenceNum, + CopyCompletionTime: resp.headers.Get("x-ms-copy-completion-time"), + CopyStatusDescription: resp.headers.Get("x-ms-copy-status-description"), + CopyID: resp.headers.Get("x-ms-copy-id"), + CopyProgress: resp.headers.Get("x-ms-copy-progress"), + CopySource: resp.headers.Get("x-ms-copy-source"), + CopyStatus: resp.headers.Get("x-ms-copy-status"), + BlobType: BlobType(resp.headers.Get("x-ms-blob-type")), + }, nil +} + +// SetBlobMetadata replaces the metadata for the specified blob. +// +// Some keys may be converted to Camel-Case before sending. All keys +// are returned in lower case by GetBlobMetadata. HTTP header names +// are case-insensitive so case munging should not matter to other +// applications either. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx +func (b BlobStorageClient) SetBlobMetadata(container, name string, metadata map[string]string) error { + params := url.Values{"comp": {"metadata"}} + uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params) + headers := b.client.getStandardHeaders() + for k, v := range metadata { + headers[userDefinedMetadataHeaderPrefix+k] = v + } + + resp, err := b.client.exec("PUT", uri, headers, nil) + if err != nil { + return err + } + defer resp.body.Close() + + return checkRespCode(resp.statusCode, []int{http.StatusOK}) +} + +// GetBlobMetadata returns all user-defined metadata for the specified blob. +// +// All metadata keys will be returned in lower case. (HTTP header +// names are case-insensitive.) +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx +func (b BlobStorageClient) GetBlobMetadata(container, name string) (map[string]string, error) { + params := url.Values{"comp": {"metadata"}} + uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params) + headers := b.client.getStandardHeaders() + + resp, err := b.client.exec("GET", uri, headers, nil) + if err != nil { + return nil, err + } + defer resp.body.Close() + + if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + return nil, err + } + + metadata := make(map[string]string) + for k, v := range resp.headers { + // Can't trust CanonicalHeaderKey() to munge case + // reliably. "_" is allowed in identifiers: + // https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx + // https://msdn.microsoft.com/library/aa664670(VS.71).aspx + // http://tools.ietf.org/html/rfc7230#section-3.2 + // ...but "_" is considered invalid by + // CanonicalMIMEHeaderKey in + // https://golang.org/src/net/textproto/reader.go?s=14615:14659#L542 + // so k can be "X-Ms-Meta-Foo" or "x-ms-meta-foo_bar". + k = strings.ToLower(k) + if len(v) == 0 || !strings.HasPrefix(k, strings.ToLower(userDefinedMetadataHeaderPrefix)) { + continue + } + // metadata["foo"] = content of the last X-Ms-Meta-Foo header + k = k[len(userDefinedMetadataHeaderPrefix):] + metadata[k] = v[len(v)-1] + } + return metadata, nil +} + +// CreateBlockBlob initializes an empty block blob with no blocks. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179451.aspx +func (b BlobStorageClient) CreateBlockBlob(container, name string) error { + return b.CreateBlockBlobFromReader(container, name, 0, nil, nil) +} + +// CreateBlockBlobFromReader initializes a block blob using data from +// reader. Size must be the number of bytes read from reader. To +// create an empty blob, use size==0 and reader==nil. +// +// The API rejects requests with size > 64 MiB (but this limit is not +// checked by the SDK). To write a larger blob, use CreateBlockBlob, +// PutBlock, and PutBlockList. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179451.aspx +func (b BlobStorageClient) CreateBlockBlobFromReader(container, name string, size uint64, blob io.Reader, extraHeaders map[string]string) error { + path := fmt.Sprintf("%s/%s", container, name) + uri := b.client.getEndpoint(blobServiceName, path, url.Values{}) + headers := b.client.getStandardHeaders() + headers["x-ms-blob-type"] = string(BlobTypeBlock) + headers["Content-Length"] = fmt.Sprintf("%d", size) + + for k, v := range extraHeaders { + headers[k] = v + } + + resp, err := b.client.exec("PUT", uri, headers, blob) + if err != nil { + return err + } + defer resp.body.Close() + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + +// PutBlock saves the given data chunk to the specified block blob with +// given ID. +// +// The API rejects chunks larger than 4 MiB (but this limit is not +// checked by the SDK). +// +// See https://msdn.microsoft.com/en-us/library/azure/dd135726.aspx +func (b BlobStorageClient) PutBlock(container, name, blockID string, chunk []byte) error { + return b.PutBlockWithLength(container, name, blockID, uint64(len(chunk)), bytes.NewReader(chunk), nil) +} + +// PutBlockWithLength saves the given data stream of exactly specified size to +// the block blob with given ID. It is an alternative to PutBlocks where data +// comes as stream but the length is known in advance. +// +// The API rejects requests with size > 4 MiB (but this limit is not +// checked by the SDK). +// +// See https://msdn.microsoft.com/en-us/library/azure/dd135726.aspx +func (b BlobStorageClient) PutBlockWithLength(container, name, blockID string, size uint64, blob io.Reader, extraHeaders map[string]string) error { + uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{"comp": {"block"}, "blockid": {blockID}}) + headers := b.client.getStandardHeaders() + headers["x-ms-blob-type"] = string(BlobTypeBlock) + headers["Content-Length"] = fmt.Sprintf("%v", size) + + for k, v := range extraHeaders { + headers[k] = v + } + + resp, err := b.client.exec("PUT", uri, headers, blob) + if err != nil { + return err + } + + defer resp.body.Close() + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + +// PutBlockList saves list of blocks to the specified block blob. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179467.aspx +func (b BlobStorageClient) PutBlockList(container, name string, blocks []Block) error { + blockListXML := prepareBlockListRequest(blocks) + + uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{"comp": {"blocklist"}}) + headers := b.client.getStandardHeaders() + headers["Content-Length"] = fmt.Sprintf("%v", len(blockListXML)) + + resp, err := b.client.exec("PUT", uri, headers, strings.NewReader(blockListXML)) + if err != nil { + return err + } + defer resp.body.Close() + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + +// GetBlockList retrieves list of blocks in the specified block blob. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx +func (b BlobStorageClient) GetBlockList(container, name string, blockType BlockListType) (BlockListResponse, error) { + params := url.Values{"comp": {"blocklist"}, "blocklisttype": {string(blockType)}} + uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params) + headers := b.client.getStandardHeaders() + + var out BlockListResponse + resp, err := b.client.exec("GET", uri, headers, nil) + if err != nil { + return out, err + } + defer resp.body.Close() + + err = xmlUnmarshal(resp.body, &out) + return out, err +} + +// PutPageBlob initializes an empty page blob with specified name and maximum +// size in bytes (size must be aligned to a 512-byte boundary). A page blob must +// be created using this method before writing pages. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179451.aspx +func (b BlobStorageClient) PutPageBlob(container, name string, size int64, extraHeaders map[string]string) error { + path := fmt.Sprintf("%s/%s", container, name) + uri := b.client.getEndpoint(blobServiceName, path, url.Values{}) + headers := b.client.getStandardHeaders() + headers["x-ms-blob-type"] = string(BlobTypePage) + headers["x-ms-blob-content-length"] = fmt.Sprintf("%v", size) + + for k, v := range extraHeaders { + headers[k] = v + } + + resp, err := b.client.exec("PUT", uri, headers, nil) + if err != nil { + return err + } + defer resp.body.Close() + + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + +// PutPage writes a range of pages to a page blob or clears the given range. +// In case of 'clear' writes, given chunk is discarded. Ranges must be aligned +// with 512-byte boundaries and chunk must be of size multiplies by 512. +// +// See https://msdn.microsoft.com/en-us/library/ee691975.aspx +func (b BlobStorageClient) PutPage(container, name string, startByte, endByte int64, writeType PageWriteType, chunk []byte) error { + path := fmt.Sprintf("%s/%s", container, name) + uri := b.client.getEndpoint(blobServiceName, path, url.Values{"comp": {"page"}}) + headers := b.client.getStandardHeaders() + headers["x-ms-blob-type"] = string(BlobTypePage) + headers["x-ms-page-write"] = string(writeType) + headers["x-ms-range"] = fmt.Sprintf("bytes=%v-%v", startByte, endByte) + + var contentLength int64 + var data io.Reader + if writeType == PageWriteTypeClear { + contentLength = 0 + data = bytes.NewReader([]byte{}) + } else { + contentLength = int64(len(chunk)) + data = bytes.NewReader(chunk) + } + headers["Content-Length"] = fmt.Sprintf("%v", contentLength) + + resp, err := b.client.exec("PUT", uri, headers, data) + if err != nil { + return err + } + defer resp.body.Close() + + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + +// GetPageRanges returns the list of valid page ranges for a page blob. +// +// See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx +func (b BlobStorageClient) GetPageRanges(container, name string) (GetPageRangesResponse, error) { + path := fmt.Sprintf("%s/%s", container, name) + uri := b.client.getEndpoint(blobServiceName, path, url.Values{"comp": {"pagelist"}}) + headers := b.client.getStandardHeaders() + + var out GetPageRangesResponse + resp, err := b.client.exec("GET", uri, headers, nil) + if err != nil { + return out, err + } + defer resp.body.Close() + + if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + return out, err + } + err = xmlUnmarshal(resp.body, &out) + return out, err +} + +// PutAppendBlob initializes an empty append blob with specified name. An +// append blob must be created using this method before appending blocks. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179451.aspx +func (b BlobStorageClient) PutAppendBlob(container, name string, extraHeaders map[string]string) error { + path := fmt.Sprintf("%s/%s", container, name) + uri := b.client.getEndpoint(blobServiceName, path, url.Values{}) + headers := b.client.getStandardHeaders() + headers["x-ms-blob-type"] = string(BlobTypeAppend) + + for k, v := range extraHeaders { + headers[k] = v + } + + resp, err := b.client.exec("PUT", uri, headers, nil) + if err != nil { + return err + } + defer resp.body.Close() + + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + +// AppendBlock appends a block to an append blob. +// +// See https://msdn.microsoft.com/en-us/library/azure/mt427365.aspx +func (b BlobStorageClient) AppendBlock(container, name string, chunk []byte) error { + path := fmt.Sprintf("%s/%s", container, name) + uri := b.client.getEndpoint(blobServiceName, path, url.Values{"comp": {"appendblock"}}) + headers := b.client.getStandardHeaders() + headers["x-ms-blob-type"] = string(BlobTypeAppend) + headers["Content-Length"] = fmt.Sprintf("%v", len(chunk)) + + resp, err := b.client.exec("PUT", uri, headers, bytes.NewReader(chunk)) + if err != nil { + return err + } + defer resp.body.Close() + + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + +// CopyBlob starts a blob copy operation and waits for the operation to +// complete. sourceBlob parameter must be a canonical URL to the blob (can be +// obtained using GetBlobURL method.) There is no SLA on blob copy and therefore +// this helper method works faster on smaller files. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd894037.aspx +func (b BlobStorageClient) CopyBlob(container, name, sourceBlob string) error { + copyID, err := b.startBlobCopy(container, name, sourceBlob) + if err != nil { + return err + } + + return b.waitForBlobCopy(container, name, copyID) +} + +func (b BlobStorageClient) startBlobCopy(container, name, sourceBlob string) (string, error) { + uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) + + headers := b.client.getStandardHeaders() + headers["x-ms-copy-source"] = sourceBlob + + resp, err := b.client.exec("PUT", uri, headers, nil) + if err != nil { + return "", err + } + defer resp.body.Close() + + if err := checkRespCode(resp.statusCode, []int{http.StatusAccepted, http.StatusCreated}); err != nil { + return "", err + } + + copyID := resp.headers.Get("x-ms-copy-id") + if copyID == "" { + return "", errors.New("Got empty copy id header") + } + return copyID, nil +} + +func (b BlobStorageClient) waitForBlobCopy(container, name, copyID string) error { + for { + props, err := b.GetBlobProperties(container, name) + if err != nil { + return err + } + + if props.CopyID != copyID { + return errBlobCopyIDMismatch + } + + switch props.CopyStatus { + case blobCopyStatusSuccess: + return nil + case blobCopyStatusPending: + continue + case blobCopyStatusAborted: + return errBlobCopyAborted + case blobCopyStatusFailed: + return fmt.Errorf("storage: blob copy failed. Id=%s Description=%s", props.CopyID, props.CopyStatusDescription) + default: + return fmt.Errorf("storage: unhandled blob copy status: '%s'", props.CopyStatus) + } + } +} + +// DeleteBlob deletes the given blob from the specified container. +// If the blob does not exists at the time of the Delete Blob operation, it +// returns error. See https://msdn.microsoft.com/en-us/library/azure/dd179413.aspx +func (b BlobStorageClient) DeleteBlob(container, name string) error { + resp, err := b.deleteBlob(container, name) + if err != nil { + return err + } + defer resp.body.Close() + return checkRespCode(resp.statusCode, []int{http.StatusAccepted}) +} + +// DeleteBlobIfExists deletes the given blob from the specified container If the +// blob is deleted with this call, returns true. Otherwise returns false. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179413.aspx +func (b BlobStorageClient) DeleteBlobIfExists(container, name string) (bool, error) { + resp, err := b.deleteBlob(container, name) + if resp != nil && (resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound) { + return resp.statusCode == http.StatusAccepted, nil + } + defer resp.body.Close() + return false, err +} + +func (b BlobStorageClient) deleteBlob(container, name string) (*storageResponse, error) { + verb := "DELETE" + uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) + headers := b.client.getStandardHeaders() + + return b.client.exec(verb, uri, headers, nil) +} + +// helper method to construct the path to a container given its name +func pathForContainer(name string) string { + return fmt.Sprintf("/%s", name) +} + +// helper method to construct the path to a blob given its container and blob +// name +func pathForBlob(container, name string) string { + return fmt.Sprintf("/%s/%s", container, name) +} + +// GetBlobSASURI creates an URL to the specified blob which contains the Shared +// Access Signature with specified permissions and expiration time. +// +// See https://msdn.microsoft.com/en-us/library/azure/ee395415.aspx +func (b BlobStorageClient) GetBlobSASURI(container, name string, expiry time.Time, permissions string) (string, error) { + var ( + signedPermissions = permissions + blobURL = b.GetBlobURL(container, name) + ) + canonicalizedResource, err := b.client.buildCanonicalizedResource(blobURL) + if err != nil { + return "", err + } + signedExpiry := expiry.Format(time.RFC3339) + signedResource := "b" + + stringToSign, err := blobSASStringToSign(b.client.apiVersion, canonicalizedResource, signedExpiry, signedPermissions) + if err != nil { + return "", err + } + + sig := b.client.computeHmac256(stringToSign) + sasParams := url.Values{ + "sv": {b.client.apiVersion}, + "se": {signedExpiry}, + "sr": {signedResource}, + "sp": {signedPermissions}, + "sig": {sig}, + } + + sasURL, err := url.Parse(blobURL) + if err != nil { + return "", err + } + sasURL.RawQuery = sasParams.Encode() + return sasURL.String(), nil +} + +func blobSASStringToSign(signedVersion, canonicalizedResource, signedExpiry, signedPermissions string) (string, error) { + var signedStart, signedIdentifier, rscc, rscd, rsce, rscl, rsct string + + if signedVersion >= "2015-02-21" { + canonicalizedResource = "/blob" + canonicalizedResource + } + + // reference: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx + if signedVersion >= "2013-08-15" { + return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedVersion, rscc, rscd, rsce, rscl, rsct), nil + } + return "", errors.New("storage: not implemented SAS for versions earlier than 2013-08-15") +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/blob_test.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/blob_test.go new file mode 100644 index 000000000..74f533646 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/blob_test.go @@ -0,0 +1,768 @@ +package storage + +import ( + "bytes" + "crypto/rand" + "encoding/base64" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "sort" + "sync" + "testing" + "time" + + chk "gopkg.in/check.v1" +) + +type StorageBlobSuite struct{} + +var _ = chk.Suite(&StorageBlobSuite{}) + +const testContainerPrefix = "zzzztest-" + +func getBlobClient(c *chk.C) BlobStorageClient { + return getBasicClient(c).GetBlobService() +} + +func (s *StorageBlobSuite) Test_pathForContainer(c *chk.C) { + c.Assert(pathForContainer("foo"), chk.Equals, "/foo") +} + +func (s *StorageBlobSuite) Test_pathForBlob(c *chk.C) { + c.Assert(pathForBlob("foo", "blob"), chk.Equals, "/foo/blob") +} + +func (s *StorageBlobSuite) Test_blobSASStringToSign(c *chk.C) { + _, err := blobSASStringToSign("2012-02-12", "CS", "SE", "SP") + c.Assert(err, chk.NotNil) // not implemented SAS for versions earlier than 2013-08-15 + + out, err := blobSASStringToSign("2013-08-15", "CS", "SE", "SP") + c.Assert(err, chk.IsNil) + c.Assert(out, chk.Equals, "SP\n\nSE\nCS\n\n2013-08-15\n\n\n\n\n") +} + +func (s *StorageBlobSuite) TestGetBlobSASURI(c *chk.C) { + api, err := NewClient("foo", "YmFy", DefaultBaseURL, "2013-08-15", true) + c.Assert(err, chk.IsNil) + cli := api.GetBlobService() + expiry := time.Time{} + + expectedParts := url.URL{ + Scheme: "https", + Host: "foo.blob.core.windows.net", + Path: "container/name", + RawQuery: url.Values{ + "sv": {"2013-08-15"}, + "sig": {"/OXG7rWh08jYwtU03GzJM0DHZtidRGpC6g69rSGm3I0="}, + "sr": {"b"}, + "sp": {"r"}, + "se": {"0001-01-01T00:00:00Z"}, + }.Encode()} + + u, err := cli.GetBlobSASURI("container", "name", expiry, "r") + c.Assert(err, chk.IsNil) + sasParts, err := url.Parse(u) + c.Assert(err, chk.IsNil) + c.Assert(expectedParts.String(), chk.Equals, sasParts.String()) + c.Assert(expectedParts.Query(), chk.DeepEquals, sasParts.Query()) +} + +func (s *StorageBlobSuite) TestBlobSASURICorrectness(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + blob := randString(20) + body := []byte(randString(100)) + expiry := time.Now().UTC().Add(time.Hour) + permissions := "r" + + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.DeleteContainer(cnt) + + c.Assert(cli.putSingleBlockBlob(cnt, blob, body), chk.IsNil) + + sasURI, err := cli.GetBlobSASURI(cnt, blob, expiry, permissions) + c.Assert(err, chk.IsNil) + + resp, err := http.Get(sasURI) + c.Assert(err, chk.IsNil) + + blobResp, err := ioutil.ReadAll(resp.Body) + defer resp.Body.Close() + c.Assert(err, chk.IsNil) + + c.Assert(resp.StatusCode, chk.Equals, http.StatusOK) + c.Assert(len(blobResp), chk.Equals, len(body)) +} + +func (s *StorageBlobSuite) TestListContainersPagination(c *chk.C) { + cli := getBlobClient(c) + c.Assert(deleteTestContainers(cli), chk.IsNil) + + const n = 5 + const pageSize = 2 + + // Create test containers + created := []string{} + for i := 0; i < n; i++ { + name := randContainer() + c.Assert(cli.CreateContainer(name, ContainerAccessTypePrivate), chk.IsNil) + created = append(created, name) + } + sort.Strings(created) + + // Defer test container deletions + defer func() { + var wg sync.WaitGroup + for _, cnt := range created { + wg.Add(1) + go func(name string) { + c.Assert(cli.DeleteContainer(name), chk.IsNil) + wg.Done() + }(cnt) + } + wg.Wait() + }() + + // Paginate results + seen := []string{} + marker := "" + for { + resp, err := cli.ListContainers(ListContainersParameters{ + Prefix: testContainerPrefix, + MaxResults: pageSize, + Marker: marker}) + c.Assert(err, chk.IsNil) + + containers := resp.Containers + if len(containers) > pageSize { + c.Fatalf("Got a bigger page. Expected: %d, got: %d", pageSize, len(containers)) + } + + for _, c := range containers { + seen = append(seen, c.Name) + } + + marker = resp.NextMarker + if marker == "" || len(containers) == 0 { + break + } + } + + c.Assert(seen, chk.DeepEquals, created) +} + +func (s *StorageBlobSuite) TestContainerExists(c *chk.C) { + cnt := randContainer() + cli := getBlobClient(c) + ok, err := cli.ContainerExists(cnt) + c.Assert(err, chk.IsNil) + c.Assert(ok, chk.Equals, false) + + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypeBlob), chk.IsNil) + defer cli.DeleteContainer(cnt) + + ok, err = cli.ContainerExists(cnt) + c.Assert(err, chk.IsNil) + c.Assert(ok, chk.Equals, true) +} + +func (s *StorageBlobSuite) TestCreateContainerDeleteContainer(c *chk.C) { + cnt := randContainer() + cli := getBlobClient(c) + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + c.Assert(cli.DeleteContainer(cnt), chk.IsNil) +} + +func (s *StorageBlobSuite) TestCreateContainerIfNotExists(c *chk.C) { + cnt := randContainer() + cli := getBlobClient(c) + defer cli.DeleteContainer(cnt) + + // First create + ok, err := cli.CreateContainerIfNotExists(cnt, ContainerAccessTypePrivate) + c.Assert(err, chk.IsNil) + c.Assert(ok, chk.Equals, true) + + // Second create, should not give errors + ok, err = cli.CreateContainerIfNotExists(cnt, ContainerAccessTypePrivate) + c.Assert(err, chk.IsNil) + c.Assert(ok, chk.Equals, false) +} + +func (s *StorageBlobSuite) TestDeleteContainerIfExists(c *chk.C) { + cnt := randContainer() + cli := getBlobClient(c) + + // Nonexisting container + c.Assert(cli.DeleteContainer(cnt), chk.NotNil) + + ok, err := cli.DeleteContainerIfExists(cnt) + c.Assert(err, chk.IsNil) + c.Assert(ok, chk.Equals, false) + + // Existing container + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + ok, err = cli.DeleteContainerIfExists(cnt) + c.Assert(err, chk.IsNil) + c.Assert(ok, chk.Equals, true) +} + +func (s *StorageBlobSuite) TestBlobExists(c *chk.C) { + cnt := randContainer() + blob := randString(20) + cli := getBlobClient(c) + + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypeBlob), chk.IsNil) + defer cli.DeleteContainer(cnt) + c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte("Hello!")), chk.IsNil) + defer cli.DeleteBlob(cnt, blob) + + ok, err := cli.BlobExists(cnt, blob+".foo") + c.Assert(err, chk.IsNil) + c.Assert(ok, chk.Equals, false) + + ok, err = cli.BlobExists(cnt, blob) + c.Assert(err, chk.IsNil) + c.Assert(ok, chk.Equals, true) +} + +func (s *StorageBlobSuite) TestGetBlobURL(c *chk.C) { + api, err := NewBasicClient("foo", "YmFy") + c.Assert(err, chk.IsNil) + cli := api.GetBlobService() + + c.Assert(cli.GetBlobURL("c", "nested/blob"), chk.Equals, "https://foo.blob.core.windows.net/c/nested/blob") + c.Assert(cli.GetBlobURL("", "blob"), chk.Equals, "https://foo.blob.core.windows.net/$root/blob") + c.Assert(cli.GetBlobURL("", "nested/blob"), chk.Equals, "https://foo.blob.core.windows.net/$root/nested/blob") +} + +func (s *StorageBlobSuite) TestBlobCopy(c *chk.C) { + if testing.Short() { + c.Skip("skipping blob copy in short mode, no SLA on async operation") + } + + cli := getBlobClient(c) + cnt := randContainer() + src := randString(20) + dst := randString(20) + body := []byte(randString(1024)) + + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + c.Assert(cli.putSingleBlockBlob(cnt, src, body), chk.IsNil) + defer cli.DeleteBlob(cnt, src) + + c.Assert(cli.CopyBlob(cnt, dst, cli.GetBlobURL(cnt, src)), chk.IsNil) + defer cli.DeleteBlob(cnt, dst) + + blobBody, err := cli.GetBlob(cnt, dst) + c.Assert(err, chk.IsNil) + + b, err := ioutil.ReadAll(blobBody) + defer blobBody.Close() + c.Assert(err, chk.IsNil) + c.Assert(b, chk.DeepEquals, body) +} + +func (s *StorageBlobSuite) TestDeleteBlobIfExists(c *chk.C) { + cnt := randContainer() + blob := randString(20) + + cli := getBlobClient(c) + c.Assert(cli.DeleteBlob(cnt, blob), chk.NotNil) + + ok, err := cli.DeleteBlobIfExists(cnt, blob) + c.Assert(err, chk.IsNil) + c.Assert(ok, chk.Equals, false) +} + +func (s *StorageBlobSuite) TestGetBlobProperties(c *chk.C) { + cnt := randContainer() + blob := randString(20) + contents := randString(64) + + cli := getBlobClient(c) + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.DeleteContainer(cnt) + + // Nonexisting blob + _, err := cli.GetBlobProperties(cnt, blob) + c.Assert(err, chk.NotNil) + + // Put the blob + c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte(contents)), chk.IsNil) + + // Get blob properties + props, err := cli.GetBlobProperties(cnt, blob) + c.Assert(err, chk.IsNil) + + c.Assert(props.ContentLength, chk.Equals, int64(len(contents))) + c.Assert(props.BlobType, chk.Equals, BlobTypeBlock) +} + +func (s *StorageBlobSuite) TestListBlobsPagination(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.DeleteContainer(cnt) + + blobs := []string{} + const n = 5 + const pageSize = 2 + for i := 0; i < n; i++ { + name := randString(20) + c.Assert(cli.putSingleBlockBlob(cnt, name, []byte("Hello, world!")), chk.IsNil) + blobs = append(blobs, name) + } + sort.Strings(blobs) + + // Paginate + seen := []string{} + marker := "" + for { + resp, err := cli.ListBlobs(cnt, ListBlobsParameters{ + MaxResults: pageSize, + Marker: marker}) + c.Assert(err, chk.IsNil) + + for _, v := range resp.Blobs { + seen = append(seen, v.Name) + } + + marker = resp.NextMarker + if marker == "" || len(resp.Blobs) == 0 { + break + } + } + + // Compare + c.Assert(seen, chk.DeepEquals, blobs) +} + +func (s *StorageBlobSuite) TestGetAndSetMetadata(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + blob := randString(20) + c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte{}), chk.IsNil) + + m, err := cli.GetBlobMetadata(cnt, blob) + c.Assert(err, chk.IsNil) + c.Assert(m, chk.Not(chk.Equals), nil) + c.Assert(len(m), chk.Equals, 0) + + mPut := map[string]string{ + "foo": "bar", + "bar_baz": "waz qux", + } + + err = cli.SetBlobMetadata(cnt, blob, mPut) + c.Assert(err, chk.IsNil) + + m, err = cli.GetBlobMetadata(cnt, blob) + c.Assert(err, chk.IsNil) + c.Check(m, chk.DeepEquals, mPut) + + // Case munging + + mPutUpper := map[string]string{ + "Foo": "different bar", + "bar_BAZ": "different waz qux", + } + mExpectLower := map[string]string{ + "foo": "different bar", + "bar_baz": "different waz qux", + } + + err = cli.SetBlobMetadata(cnt, blob, mPutUpper) + c.Assert(err, chk.IsNil) + + m, err = cli.GetBlobMetadata(cnt, blob) + c.Assert(err, chk.IsNil) + c.Check(m, chk.DeepEquals, mExpectLower) +} + +func (s *StorageBlobSuite) TestPutEmptyBlockBlob(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + blob := randString(20) + c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte{}), chk.IsNil) + + props, err := cli.GetBlobProperties(cnt, blob) + c.Assert(err, chk.IsNil) + c.Assert(props.ContentLength, chk.Not(chk.Equals), 0) +} + +func (s *StorageBlobSuite) TestGetBlobRange(c *chk.C) { + cnt := randContainer() + blob := randString(20) + body := "0123456789" + + cli := getBlobClient(c) + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypeBlob), chk.IsNil) + defer cli.DeleteContainer(cnt) + + c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte(body)), chk.IsNil) + defer cli.DeleteBlob(cnt, blob) + + // Read 1-3 + for _, r := range []struct { + rangeStr string + expected string + }{ + {"0-", body}, + {"1-3", body[1 : 3+1]}, + {"3-", body[3:]}, + } { + resp, err := cli.GetBlobRange(cnt, blob, r.rangeStr) + c.Assert(err, chk.IsNil) + blobBody, err := ioutil.ReadAll(resp) + c.Assert(err, chk.IsNil) + + str := string(blobBody) + c.Assert(str, chk.Equals, r.expected) + } +} + +func (s *StorageBlobSuite) TestCreateBlockBlobFromReader(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + name := randString(20) + data := randBytes(8888) + c.Assert(cli.CreateBlockBlobFromReader(cnt, name, uint64(len(data)), bytes.NewReader(data), nil), chk.IsNil) + + body, err := cli.GetBlob(cnt, name) + c.Assert(err, chk.IsNil) + gotData, err := ioutil.ReadAll(body) + body.Close() + + c.Assert(err, chk.IsNil) + c.Assert(gotData, chk.DeepEquals, data) +} + +func (s *StorageBlobSuite) TestCreateBlockBlobFromReaderWithShortData(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + name := randString(20) + data := randBytes(8888) + err := cli.CreateBlockBlobFromReader(cnt, name, 9999, bytes.NewReader(data), nil) + c.Assert(err, chk.Not(chk.IsNil)) + + _, err = cli.GetBlob(cnt, name) + // Upload was incomplete: blob should not have been created. + c.Assert(err, chk.Not(chk.IsNil)) +} + +func (s *StorageBlobSuite) TestPutBlock(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + blob := randString(20) + chunk := []byte(randString(1024)) + blockID := base64.StdEncoding.EncodeToString([]byte("foo")) + c.Assert(cli.PutBlock(cnt, blob, blockID, chunk), chk.IsNil) +} + +func (s *StorageBlobSuite) TestGetBlockList_PutBlockList(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + blob := randString(20) + chunk := []byte(randString(1024)) + blockID := base64.StdEncoding.EncodeToString([]byte("foo")) + + // Put one block + c.Assert(cli.PutBlock(cnt, blob, blockID, chunk), chk.IsNil) + defer cli.deleteBlob(cnt, blob) + + // Get committed blocks + committed, err := cli.GetBlockList(cnt, blob, BlockListTypeCommitted) + c.Assert(err, chk.IsNil) + + if len(committed.CommittedBlocks) > 0 { + c.Fatal("There are committed blocks") + } + + // Get uncommitted blocks + uncommitted, err := cli.GetBlockList(cnt, blob, BlockListTypeUncommitted) + c.Assert(err, chk.IsNil) + + c.Assert(len(uncommitted.UncommittedBlocks), chk.Equals, 1) + // Commit block list + c.Assert(cli.PutBlockList(cnt, blob, []Block{{blockID, BlockStatusUncommitted}}), chk.IsNil) + + // Get all blocks + all, err := cli.GetBlockList(cnt, blob, BlockListTypeAll) + c.Assert(err, chk.IsNil) + c.Assert(len(all.CommittedBlocks), chk.Equals, 1) + c.Assert(len(all.UncommittedBlocks), chk.Equals, 0) + + // Verify the block + thatBlock := all.CommittedBlocks[0] + c.Assert(thatBlock.Name, chk.Equals, blockID) + c.Assert(thatBlock.Size, chk.Equals, int64(len(chunk))) +} + +func (s *StorageBlobSuite) TestCreateBlockBlob(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + blob := randString(20) + c.Assert(cli.CreateBlockBlob(cnt, blob), chk.IsNil) + + // Verify + blocks, err := cli.GetBlockList(cnt, blob, BlockListTypeAll) + c.Assert(err, chk.IsNil) + c.Assert(len(blocks.CommittedBlocks), chk.Equals, 0) + c.Assert(len(blocks.UncommittedBlocks), chk.Equals, 0) +} + +func (s *StorageBlobSuite) TestPutPageBlob(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + blob := randString(20) + size := int64(10 * 1024 * 1024) + c.Assert(cli.PutPageBlob(cnt, blob, size, nil), chk.IsNil) + + // Verify + props, err := cli.GetBlobProperties(cnt, blob) + c.Assert(err, chk.IsNil) + c.Assert(props.ContentLength, chk.Equals, size) + c.Assert(props.BlobType, chk.Equals, BlobTypePage) +} + +func (s *StorageBlobSuite) TestPutPagesUpdate(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + blob := randString(20) + size := int64(10 * 1024 * 1024) // larger than we'll use + c.Assert(cli.PutPageBlob(cnt, blob, size, nil), chk.IsNil) + + chunk1 := []byte(randString(1024)) + chunk2 := []byte(randString(512)) + + // Append chunks + c.Assert(cli.PutPage(cnt, blob, 0, int64(len(chunk1)-1), PageWriteTypeUpdate, chunk1), chk.IsNil) + c.Assert(cli.PutPage(cnt, blob, int64(len(chunk1)), int64(len(chunk1)+len(chunk2)-1), PageWriteTypeUpdate, chunk2), chk.IsNil) + + // Verify contents + out, err := cli.GetBlobRange(cnt, blob, fmt.Sprintf("%v-%v", 0, len(chunk1)+len(chunk2)-1)) + c.Assert(err, chk.IsNil) + defer out.Close() + blobContents, err := ioutil.ReadAll(out) + c.Assert(err, chk.IsNil) + c.Assert(blobContents, chk.DeepEquals, append(chunk1, chunk2...)) + out.Close() + + // Overwrite first half of chunk1 + chunk0 := []byte(randString(512)) + c.Assert(cli.PutPage(cnt, blob, 0, int64(len(chunk0)-1), PageWriteTypeUpdate, chunk0), chk.IsNil) + + // Verify contents + out, err = cli.GetBlobRange(cnt, blob, fmt.Sprintf("%v-%v", 0, len(chunk1)+len(chunk2)-1)) + c.Assert(err, chk.IsNil) + defer out.Close() + blobContents, err = ioutil.ReadAll(out) + c.Assert(err, chk.IsNil) + c.Assert(blobContents, chk.DeepEquals, append(append(chunk0, chunk1[512:]...), chunk2...)) +} + +func (s *StorageBlobSuite) TestPutPagesClear(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + blob := randString(20) + size := int64(10 * 1024 * 1024) // larger than we'll use + c.Assert(cli.PutPageBlob(cnt, blob, size, nil), chk.IsNil) + + // Put 0-2047 + chunk := []byte(randString(2048)) + c.Assert(cli.PutPage(cnt, blob, 0, 2047, PageWriteTypeUpdate, chunk), chk.IsNil) + + // Clear 512-1023 + c.Assert(cli.PutPage(cnt, blob, 512, 1023, PageWriteTypeClear, nil), chk.IsNil) + + // Verify contents + out, err := cli.GetBlobRange(cnt, blob, "0-2047") + c.Assert(err, chk.IsNil) + contents, err := ioutil.ReadAll(out) + c.Assert(err, chk.IsNil) + defer out.Close() + c.Assert(contents, chk.DeepEquals, append(append(chunk[:512], make([]byte, 512)...), chunk[1024:]...)) +} + +func (s *StorageBlobSuite) TestGetPageRanges(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + blob := randString(20) + size := int64(10 * 1024 * 1024) // larger than we'll use + c.Assert(cli.PutPageBlob(cnt, blob, size, nil), chk.IsNil) + + // Get page ranges on empty blob + out, err := cli.GetPageRanges(cnt, blob) + c.Assert(err, chk.IsNil) + c.Assert(len(out.PageList), chk.Equals, 0) + + // Add 0-512 page + c.Assert(cli.PutPage(cnt, blob, 0, 511, PageWriteTypeUpdate, []byte(randString(512))), chk.IsNil) + + out, err = cli.GetPageRanges(cnt, blob) + c.Assert(err, chk.IsNil) + c.Assert(len(out.PageList), chk.Equals, 1) + + // Add 1024-2048 + c.Assert(cli.PutPage(cnt, blob, 1024, 2047, PageWriteTypeUpdate, []byte(randString(1024))), chk.IsNil) + + out, err = cli.GetPageRanges(cnt, blob) + c.Assert(err, chk.IsNil) + c.Assert(len(out.PageList), chk.Equals, 2) +} + +func (s *StorageBlobSuite) TestPutAppendBlob(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + blob := randString(20) + c.Assert(cli.PutAppendBlob(cnt, blob, nil), chk.IsNil) + + // Verify + props, err := cli.GetBlobProperties(cnt, blob) + c.Assert(err, chk.IsNil) + c.Assert(props.ContentLength, chk.Equals, int64(0)) + c.Assert(props.BlobType, chk.Equals, BlobTypeAppend) +} + +func (s *StorageBlobSuite) TestPutAppendBlobAppendBlocks(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + blob := randString(20) + c.Assert(cli.PutAppendBlob(cnt, blob, nil), chk.IsNil) + + chunk1 := []byte(randString(1024)) + chunk2 := []byte(randString(512)) + + // Append first block + c.Assert(cli.AppendBlock(cnt, blob, chunk1), chk.IsNil) + + // Verify contents + out, err := cli.GetBlobRange(cnt, blob, fmt.Sprintf("%v-%v", 0, len(chunk1)-1)) + c.Assert(err, chk.IsNil) + defer out.Close() + blobContents, err := ioutil.ReadAll(out) + c.Assert(err, chk.IsNil) + c.Assert(blobContents, chk.DeepEquals, chunk1) + out.Close() + + // Append second block + c.Assert(cli.AppendBlock(cnt, blob, chunk2), chk.IsNil) + + // Verify contents + out, err = cli.GetBlobRange(cnt, blob, fmt.Sprintf("%v-%v", 0, len(chunk1)+len(chunk2)-1)) + c.Assert(err, chk.IsNil) + defer out.Close() + blobContents, err = ioutil.ReadAll(out) + c.Assert(err, chk.IsNil) + c.Assert(blobContents, chk.DeepEquals, append(chunk1, chunk2...)) + out.Close() +} + +func deleteTestContainers(cli BlobStorageClient) error { + for { + resp, err := cli.ListContainers(ListContainersParameters{Prefix: testContainerPrefix}) + if err != nil { + return err + } + if len(resp.Containers) == 0 { + break + } + for _, c := range resp.Containers { + err = cli.DeleteContainer(c.Name) + if err != nil { + return err + } + } + } + return nil +} + +func (b BlobStorageClient) putSingleBlockBlob(container, name string, chunk []byte) error { + if len(chunk) > MaxBlobBlockSize { + return fmt.Errorf("storage: provided chunk (%d bytes) cannot fit into single-block blob (max %d bytes)", len(chunk), MaxBlobBlockSize) + } + + uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) + headers := b.client.getStandardHeaders() + headers["x-ms-blob-type"] = string(BlobTypeBlock) + headers["Content-Length"] = fmt.Sprintf("%v", len(chunk)) + + resp, err := b.client.exec("PUT", uri, headers, bytes.NewReader(chunk)) + if err != nil { + return err + } + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + +func randContainer() string { + return testContainerPrefix + randString(32-len(testContainerPrefix)) +} + +func randString(n int) string { + if n <= 0 { + panic("negative number") + } + const alphanum = "0123456789abcdefghijklmnopqrstuvwxyz" + var bytes = make([]byte, n) + rand.Read(bytes) + for i, b := range bytes { + bytes[i] = alphanum[b%byte(len(alphanum))] + } + return string(bytes) +} + +func randBytes(n int) []byte { + data := make([]byte, n) + if _, err := io.ReadFull(rand.Reader, data); err != nil { + panic(err) + } + return data +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go new file mode 100644 index 000000000..df6c1a7b2 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go @@ -0,0 +1,392 @@ +// Package storage provides clients for Microsoft Azure Storage Services. +package storage + +import ( + "bytes" + "encoding/base64" + "encoding/xml" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "regexp" + "sort" + "strconv" + "strings" +) + +const ( + // DefaultBaseURL is the domain name used for storage requests when a + // default client is created. + DefaultBaseURL = "core.windows.net" + + // DefaultAPIVersion is the Azure Storage API version string used when a + // basic client is created. + DefaultAPIVersion = "2015-02-21" + + defaultUseHTTPS = true + + blobServiceName = "blob" + tableServiceName = "table" + queueServiceName = "queue" + fileServiceName = "file" +) + +// Client is the object that needs to be constructed to perform +// operations on the storage account. +type Client struct { + accountName string + accountKey []byte + useHTTPS bool + baseURL string + apiVersion string +} + +type storageResponse struct { + statusCode int + headers http.Header + body io.ReadCloser +} + +// AzureStorageServiceError contains fields of the error response from +// Azure Storage Service REST API. See https://msdn.microsoft.com/en-us/library/azure/dd179382.aspx +// Some fields might be specific to certain calls. +type AzureStorageServiceError struct { + Code string `xml:"Code"` + Message string `xml:"Message"` + AuthenticationErrorDetail string `xml:"AuthenticationErrorDetail"` + QueryParameterName string `xml:"QueryParameterName"` + QueryParameterValue string `xml:"QueryParameterValue"` + Reason string `xml:"Reason"` + StatusCode int + RequestID string +} + +// UnexpectedStatusCodeError is returned when a storage service responds with neither an error +// nor with an HTTP status code indicating success. +type UnexpectedStatusCodeError struct { + allowed []int + got int +} + +func (e UnexpectedStatusCodeError) Error() string { + s := func(i int) string { return fmt.Sprintf("%d %s", i, http.StatusText(i)) } + + got := s(e.got) + expected := []string{} + for _, v := range e.allowed { + expected = append(expected, s(v)) + } + return fmt.Sprintf("storage: status code from service response is %s; was expecting %s", got, strings.Join(expected, " or ")) +} + +// Got is the actual status code returned by Azure. +func (e UnexpectedStatusCodeError) Got() int { + return e.got +} + +// NewBasicClient constructs a Client with given storage service name and +// key. +func NewBasicClient(accountName, accountKey string) (Client, error) { + return NewClient(accountName, accountKey, DefaultBaseURL, DefaultAPIVersion, defaultUseHTTPS) +} + +// NewClient constructs a Client. This should be used if the caller wants +// to specify whether to use HTTPS, a specific REST API version or a custom +// storage endpoint than Azure Public Cloud. +func NewClient(accountName, accountKey, blobServiceBaseURL, apiVersion string, useHTTPS bool) (Client, error) { + var c Client + if accountName == "" { + return c, fmt.Errorf("azure: account name required") + } else if accountKey == "" { + return c, fmt.Errorf("azure: account key required") + } else if blobServiceBaseURL == "" { + return c, fmt.Errorf("azure: base storage service url required") + } + + key, err := base64.StdEncoding.DecodeString(accountKey) + if err != nil { + return c, err + } + + return Client{ + accountName: accountName, + accountKey: key, + useHTTPS: useHTTPS, + baseURL: blobServiceBaseURL, + apiVersion: apiVersion, + }, nil +} + +func (c Client) getBaseURL(service string) string { + scheme := "http" + if c.useHTTPS { + scheme = "https" + } + + host := fmt.Sprintf("%s.%s.%s", c.accountName, service, c.baseURL) + + u := &url.URL{ + Scheme: scheme, + Host: host} + return u.String() +} + +func (c Client) getEndpoint(service, path string, params url.Values) string { + u, err := url.Parse(c.getBaseURL(service)) + if err != nil { + // really should not be happening + panic(err) + } + + if path == "" { + path = "/" // API doesn't accept path segments not starting with '/' + } + + u.Path = path + u.RawQuery = params.Encode() + return u.String() +} + +// GetBlobService returns a BlobStorageClient which can operate on the blob +// service of the storage account. +func (c Client) GetBlobService() BlobStorageClient { + return BlobStorageClient{c} +} + +// GetQueueService returns a QueueServiceClient which can operate on the queue +// service of the storage account. +func (c Client) GetQueueService() QueueServiceClient { + return QueueServiceClient{c} +} + +// GetFileService returns a FileServiceClient which can operate on the file +// service of the storage account. +func (c Client) GetFileService() FileServiceClient { + return FileServiceClient{c} +} + +func (c Client) createAuthorizationHeader(canonicalizedString string) string { + signature := c.computeHmac256(canonicalizedString) + return fmt.Sprintf("%s %s:%s", "SharedKey", c.accountName, signature) +} + +func (c Client) getAuthorizationHeader(verb, url string, headers map[string]string) (string, error) { + canonicalizedResource, err := c.buildCanonicalizedResource(url) + if err != nil { + return "", err + } + + canonicalizedString := c.buildCanonicalizedString(verb, headers, canonicalizedResource) + return c.createAuthorizationHeader(canonicalizedString), nil +} + +func (c Client) getStandardHeaders() map[string]string { + return map[string]string{ + "x-ms-version": c.apiVersion, + "x-ms-date": currentTimeRfc1123Formatted(), + } +} + +func (c Client) buildCanonicalizedHeader(headers map[string]string) string { + cm := make(map[string]string) + + for k, v := range headers { + headerName := strings.TrimSpace(strings.ToLower(k)) + match, _ := regexp.MatchString("x-ms-", headerName) + if match { + cm[headerName] = v + } + } + + if len(cm) == 0 { + return "" + } + + keys := make([]string, 0, len(cm)) + for key := range cm { + keys = append(keys, key) + } + + sort.Strings(keys) + + ch := "" + + for i, key := range keys { + if i == len(keys)-1 { + ch += fmt.Sprintf("%s:%s", key, cm[key]) + } else { + ch += fmt.Sprintf("%s:%s\n", key, cm[key]) + } + } + return ch +} + +func (c Client) buildCanonicalizedResource(uri string) (string, error) { + errMsg := "buildCanonicalizedResource error: %s" + u, err := url.Parse(uri) + if err != nil { + return "", fmt.Errorf(errMsg, err.Error()) + } + + cr := "/" + c.accountName + if len(u.Path) > 0 { + cr += u.Path + } + + params, err := url.ParseQuery(u.RawQuery) + if err != nil { + return "", fmt.Errorf(errMsg, err.Error()) + } + + if len(params) > 0 { + cr += "\n" + keys := make([]string, 0, len(params)) + for key := range params { + keys = append(keys, key) + } + + sort.Strings(keys) + + for i, key := range keys { + if len(params[key]) > 1 { + sort.Strings(params[key]) + } + + if i == len(keys)-1 { + cr += fmt.Sprintf("%s:%s", key, strings.Join(params[key], ",")) + } else { + cr += fmt.Sprintf("%s:%s\n", key, strings.Join(params[key], ",")) + } + } + } + return cr, nil +} + +func (c Client) buildCanonicalizedString(verb string, headers map[string]string, canonicalizedResource string) string { + contentLength := headers["Content-Length"] + if contentLength == "0" { + contentLength = "" + } + canonicalizedString := fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", + verb, + headers["Content-Encoding"], + headers["Content-Language"], + contentLength, + headers["Content-MD5"], + headers["Content-Type"], + headers["Date"], + headers["If-Modified-Since"], + headers["If-Match"], + headers["If-None-Match"], + headers["If-Unmodified-Since"], + headers["Range"], + c.buildCanonicalizedHeader(headers), + canonicalizedResource) + + return canonicalizedString +} + +func (c Client) exec(verb, url string, headers map[string]string, body io.Reader) (*storageResponse, error) { + authHeader, err := c.getAuthorizationHeader(verb, url, headers) + if err != nil { + return nil, err + } + headers["Authorization"] = authHeader + + if err != nil { + return nil, err + } + + req, err := http.NewRequest(verb, url, body) + if err != nil { + return nil, errors.New("azure/storage: error creating request: " + err.Error()) + } + + if clstr, ok := headers["Content-Length"]; ok { + // content length header is being signed, but completely ignored by golang. + // instead we have to use the ContentLength property on the request struct + // (see https://golang.org/src/net/http/request.go?s=18140:18370#L536 and + // https://golang.org/src/net/http/transfer.go?s=1739:2467#L49) + req.ContentLength, err = strconv.ParseInt(clstr, 10, 64) + if err != nil { + return nil, err + } + } + for k, v := range headers { + req.Header.Add(k, v) + } + httpClient := http.Client{} + resp, err := httpClient.Do(req) + if err != nil { + return nil, err + } + + statusCode := resp.StatusCode + if statusCode >= 400 && statusCode <= 505 { + var respBody []byte + respBody, err = readResponseBody(resp) + if err != nil { + return nil, err + } + + if len(respBody) == 0 { + // no error in response body + err = fmt.Errorf("storage: service returned without a response body (%s)", resp.Status) + } else { + // response contains storage service error object, unmarshal + storageErr, errIn := serviceErrFromXML(respBody, resp.StatusCode, resp.Header.Get("x-ms-request-id")) + if err != nil { // error unmarshaling the error response + err = errIn + } + err = storageErr + } + return &storageResponse{ + statusCode: resp.StatusCode, + headers: resp.Header, + body: ioutil.NopCloser(bytes.NewReader(respBody)), /* restore the body */ + }, err + } + + return &storageResponse{ + statusCode: resp.StatusCode, + headers: resp.Header, + body: resp.Body}, nil +} + +func readResponseBody(resp *http.Response) ([]byte, error) { + defer resp.Body.Close() + out, err := ioutil.ReadAll(resp.Body) + if err == io.EOF { + err = nil + } + return out, err +} + +func serviceErrFromXML(body []byte, statusCode int, requestID string) (AzureStorageServiceError, error) { + var storageErr AzureStorageServiceError + if err := xml.Unmarshal(body, &storageErr); err != nil { + return storageErr, err + } + storageErr.StatusCode = statusCode + storageErr.RequestID = requestID + return storageErr, nil +} + +func (e AzureStorageServiceError) Error() string { + return fmt.Sprintf("storage: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=%s, RequestId=%s, QueryParameterName=%s, QueryParameterValue=%s", + e.StatusCode, e.Code, e.Message, e.RequestID, e.QueryParameterName, e.QueryParameterValue) +} + +// checkRespCode returns UnexpectedStatusError if the given response code is not +// one of the allowed status codes; otherwise nil. +func checkRespCode(respCode int, allowed []int) error { + for _, v := range allowed { + if respCode == v { + return nil + } + } + return UnexpectedStatusCodeError{allowed, respCode} +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/client_test.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/client_test.go new file mode 100644 index 000000000..5bc521100 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/client_test.go @@ -0,0 +1,156 @@ +package storage + +import ( + "encoding/base64" + "net/url" + "os" + "testing" + + chk "gopkg.in/check.v1" +) + +// Hook up gocheck to testing +func Test(t *testing.T) { chk.TestingT(t) } + +type StorageClientSuite struct{} + +var _ = chk.Suite(&StorageClientSuite{}) + +// getBasicClient returns a test client from storage credentials in the env +func getBasicClient(c *chk.C) Client { + name := os.Getenv("ACCOUNT_NAME") + if name == "" { + c.Fatal("ACCOUNT_NAME not set, need an empty storage account to test") + } + key := os.Getenv("ACCOUNT_KEY") + if key == "" { + c.Fatal("ACCOUNT_KEY not set") + } + cli, err := NewBasicClient(name, key) + c.Assert(err, chk.IsNil) + return cli +} + +func (s *StorageClientSuite) TestGetBaseURL_Basic_Https(c *chk.C) { + cli, err := NewBasicClient("foo", "YmFy") + c.Assert(err, chk.IsNil) + c.Assert(cli.apiVersion, chk.Equals, DefaultAPIVersion) + c.Assert(err, chk.IsNil) + c.Assert(cli.getBaseURL("table"), chk.Equals, "https://foo.table.core.windows.net") +} + +func (s *StorageClientSuite) TestGetBaseURL_Custom_NoHttps(c *chk.C) { + apiVersion := "2015-01-01" // a non existing one + cli, err := NewClient("foo", "YmFy", "core.chinacloudapi.cn", apiVersion, false) + c.Assert(err, chk.IsNil) + c.Assert(cli.apiVersion, chk.Equals, apiVersion) + c.Assert(cli.getBaseURL("table"), chk.Equals, "http://foo.table.core.chinacloudapi.cn") +} + +func (s *StorageClientSuite) TestGetEndpoint_None(c *chk.C) { + cli, err := NewBasicClient("foo", "YmFy") + c.Assert(err, chk.IsNil) + output := cli.getEndpoint(blobServiceName, "", url.Values{}) + c.Assert(output, chk.Equals, "https://foo.blob.core.windows.net/") +} + +func (s *StorageClientSuite) TestGetEndpoint_PathOnly(c *chk.C) { + cli, err := NewBasicClient("foo", "YmFy") + c.Assert(err, chk.IsNil) + output := cli.getEndpoint(blobServiceName, "path", url.Values{}) + c.Assert(output, chk.Equals, "https://foo.blob.core.windows.net/path") +} + +func (s *StorageClientSuite) TestGetEndpoint_ParamsOnly(c *chk.C) { + cli, err := NewBasicClient("foo", "YmFy") + c.Assert(err, chk.IsNil) + params := url.Values{} + params.Set("a", "b") + params.Set("c", "d") + output := cli.getEndpoint(blobServiceName, "", params) + c.Assert(output, chk.Equals, "https://foo.blob.core.windows.net/?a=b&c=d") +} + +func (s *StorageClientSuite) TestGetEndpoint_Mixed(c *chk.C) { + cli, err := NewBasicClient("foo", "YmFy") + c.Assert(err, chk.IsNil) + params := url.Values{} + params.Set("a", "b") + params.Set("c", "d") + output := cli.getEndpoint(blobServiceName, "path", params) + c.Assert(output, chk.Equals, "https://foo.blob.core.windows.net/path?a=b&c=d") +} + +func (s *StorageClientSuite) Test_getStandardHeaders(c *chk.C) { + cli, err := NewBasicClient("foo", "YmFy") + c.Assert(err, chk.IsNil) + + headers := cli.getStandardHeaders() + c.Assert(len(headers), chk.Equals, 2) + c.Assert(headers["x-ms-version"], chk.Equals, cli.apiVersion) + if _, ok := headers["x-ms-date"]; !ok { + c.Fatal("Missing date header") + } +} + +func (s *StorageClientSuite) Test_buildCanonicalizedResource(c *chk.C) { + cli, err := NewBasicClient("foo", "YmFy") + c.Assert(err, chk.IsNil) + + type test struct{ url, expected string } + tests := []test{ + {"https://foo.blob.core.windows.net/path?a=b&c=d", "/foo/path\na:b\nc:d"}, + {"https://foo.blob.core.windows.net/?comp=list", "/foo/\ncomp:list"}, + {"https://foo.blob.core.windows.net/cnt/blob", "/foo/cnt/blob"}, + } + + for _, i := range tests { + out, err := cli.buildCanonicalizedResource(i.url) + c.Assert(err, chk.IsNil) + c.Assert(out, chk.Equals, i.expected) + } +} + +func (s *StorageClientSuite) Test_buildCanonicalizedHeader(c *chk.C) { + cli, err := NewBasicClient("foo", "YmFy") + c.Assert(err, chk.IsNil) + + type test struct { + headers map[string]string + expected string + } + tests := []test{ + {map[string]string{}, ""}, + {map[string]string{"x-ms-foo": "bar"}, "x-ms-foo:bar"}, + {map[string]string{"foo:": "bar"}, ""}, + {map[string]string{"foo:": "bar", "x-ms-foo": "bar"}, "x-ms-foo:bar"}, + {map[string]string{ + "x-ms-version": "9999-99-99", + "x-ms-blob-type": "BlockBlob"}, "x-ms-blob-type:BlockBlob\nx-ms-version:9999-99-99"}} + + for _, i := range tests { + c.Assert(cli.buildCanonicalizedHeader(i.headers), chk.Equals, i.expected) + } +} + +func (s *StorageClientSuite) TestReturnsStorageServiceError(c *chk.C) { + // attempt to delete a nonexisting container + _, err := getBlobClient(c).deleteContainer(randContainer()) + c.Assert(err, chk.NotNil) + + v, ok := err.(AzureStorageServiceError) + c.Check(ok, chk.Equals, true) + c.Assert(v.StatusCode, chk.Equals, 404) + c.Assert(v.Code, chk.Equals, "ContainerNotFound") + c.Assert(v.Code, chk.Not(chk.Equals), "") +} + +func (s *StorageClientSuite) Test_createAuthorizationHeader(c *chk.C) { + key := base64.StdEncoding.EncodeToString([]byte("bar")) + cli, err := NewBasicClient("foo", key) + c.Assert(err, chk.IsNil) + + canonicalizedString := `foobarzoo` + expected := `SharedKey foo:h5U0ATVX6SpbFX1H6GNuxIMeXXCILLoIvhflPtuQZ30=` + c.Assert(cli.createAuthorizationHeader(canonicalizedString), chk.Equals, expected) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/file.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/file.go new file mode 100644 index 000000000..ede4e21be --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/file.go @@ -0,0 +1,91 @@ +package storage + +import ( + "fmt" + "net/http" + "net/url" +) + +// FileServiceClient contains operations for Microsoft Azure File Service. +type FileServiceClient struct { + client Client +} + +// pathForFileShare returns the URL path segment for a File Share resource +func pathForFileShare(name string) string { + return fmt.Sprintf("/%s", name) +} + +// CreateShare operation creates a new share under the specified account. If the +// share with the same name already exists, the operation fails. +// +// See https://msdn.microsoft.com/en-us/library/azure/dn167008.aspx +func (f FileServiceClient) CreateShare(name string) error { + resp, err := f.createShare(name) + if err != nil { + return err + } + defer resp.body.Close() + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + +// CreateShareIfNotExists creates a new share under the specified account if +// it does not exist. Returns true if container is newly created or false if +// container already exists. +// +// See https://msdn.microsoft.com/en-us/library/azure/dn167008.aspx +func (f FileServiceClient) CreateShareIfNotExists(name string) (bool, error) { + resp, err := f.createShare(name) + if resp != nil { + defer resp.body.Close() + if resp.statusCode == http.StatusCreated || resp.statusCode == http.StatusConflict { + return resp.statusCode == http.StatusCreated, nil + } + } + return false, err +} + +// CreateShare creates a Azure File Share and returns its response +func (f FileServiceClient) createShare(name string) (*storageResponse, error) { + uri := f.client.getEndpoint(fileServiceName, pathForFileShare(name), url.Values{"restype": {"share"}}) + headers := f.client.getStandardHeaders() + return f.client.exec("PUT", uri, headers, nil) +} + +// DeleteShare operation marks the specified share for deletion. The share +// and any files contained within it are later deleted during garbage +// collection. +// +// See https://msdn.microsoft.com/en-us/library/azure/dn689090.aspx +func (f FileServiceClient) DeleteShare(name string) error { + resp, err := f.deleteShare(name) + if err != nil { + return err + } + defer resp.body.Close() + return checkRespCode(resp.statusCode, []int{http.StatusAccepted}) +} + +// DeleteShareIfExists operation marks the specified share for deletion if it +// exists. The share and any files contained within it are later deleted during +// garbage collection. Returns true if share existed and deleted with this call, +// false otherwise. +// +// See https://msdn.microsoft.com/en-us/library/azure/dn689090.aspx +func (f FileServiceClient) DeleteShareIfExists(name string) (bool, error) { + resp, err := f.deleteShare(name) + if resp != nil { + defer resp.body.Close() + if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound { + return resp.statusCode == http.StatusAccepted, nil + } + } + return false, err +} + +// deleteShare makes the call to Delete Share operation endpoint and returns +// the response +func (f FileServiceClient) deleteShare(name string) (*storageResponse, error) { + uri := f.client.getEndpoint(fileServiceName, pathForFileShare(name), url.Values{"restype": {"share"}}) + return f.client.exec("DELETE", uri, f.client.getStandardHeaders(), nil) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/file_test.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/file_test.go new file mode 100644 index 000000000..29a6500a6 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/file_test.go @@ -0,0 +1,63 @@ +package storage + +import ( + chk "gopkg.in/check.v1" +) + +type StorageFileSuite struct{} + +var _ = chk.Suite(&StorageFileSuite{}) + +func getFileClient(c *chk.C) FileServiceClient { + return getBasicClient(c).GetFileService() +} + +func (s *StorageFileSuite) Test_pathForFileShare(c *chk.C) { + c.Assert(pathForFileShare("foo"), chk.Equals, "/foo") +} + +func (s *StorageFileSuite) TestCreateShareDeleteShare(c *chk.C) { + cli := getFileClient(c) + name := randShare() + c.Assert(cli.CreateShare(name), chk.IsNil) + c.Assert(cli.DeleteShare(name), chk.IsNil) +} + +func (s *StorageFileSuite) TestCreateShareIfNotExists(c *chk.C) { + cli := getFileClient(c) + name := randShare() + defer cli.DeleteShare(name) + + // First create + ok, err := cli.CreateShareIfNotExists(name) + c.Assert(err, chk.IsNil) + c.Assert(ok, chk.Equals, true) + + // Second create, should not give errors + ok, err = cli.CreateShareIfNotExists(name) + c.Assert(err, chk.IsNil) + c.Assert(ok, chk.Equals, false) +} + +func (s *StorageFileSuite) TestDeleteShareIfNotExists(c *chk.C) { + cli := getFileClient(c) + name := randShare() + + // delete non-existing share + ok, err := cli.DeleteShareIfExists(name) + c.Assert(err, chk.IsNil) + c.Assert(ok, chk.Equals, false) + + c.Assert(cli.CreateShare(name), chk.IsNil) + + // delete existing share + ok, err = cli.DeleteShareIfExists(name) + c.Assert(err, chk.IsNil) + c.Assert(ok, chk.Equals, true) +} + +const testSharePrefix = "zzzzztest" + +func randShare() string { + return testSharePrefix + randString(32-len(testSharePrefix)) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go new file mode 100644 index 000000000..3ecf4aca0 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go @@ -0,0 +1,306 @@ +package storage + +import ( + "encoding/xml" + "fmt" + "net/http" + "net/url" + "strconv" + "strings" +) + +const ( + // casing is per Golang's http.Header canonicalizing the header names. + approximateMessagesCountHeader = "X-Ms-Approximate-Messages-Count" + userDefinedMetadataHeaderPrefix = "X-Ms-Meta-" +) + +// QueueServiceClient contains operations for Microsoft Azure Queue Storage +// Service. +type QueueServiceClient struct { + client Client +} + +func pathForQueue(queue string) string { return fmt.Sprintf("/%s", queue) } +func pathForQueueMessages(queue string) string { return fmt.Sprintf("/%s/messages", queue) } +func pathForMessage(queue, name string) string { return fmt.Sprintf("/%s/messages/%s", queue, name) } + +type putMessageRequest struct { + XMLName xml.Name `xml:"QueueMessage"` + MessageText string `xml:"MessageText"` +} + +// PutMessageParameters is the set of options can be specified for Put Messsage +// operation. A zero struct does not use any preferences for the request. +type PutMessageParameters struct { + VisibilityTimeout int + MessageTTL int +} + +func (p PutMessageParameters) getParameters() url.Values { + out := url.Values{} + if p.VisibilityTimeout != 0 { + out.Set("visibilitytimeout", strconv.Itoa(p.VisibilityTimeout)) + } + if p.MessageTTL != 0 { + out.Set("messagettl", strconv.Itoa(p.MessageTTL)) + } + return out +} + +// GetMessagesParameters is the set of options can be specified for Get +// Messsages operation. A zero struct does not use any preferences for the +// request. +type GetMessagesParameters struct { + NumOfMessages int + VisibilityTimeout int +} + +func (p GetMessagesParameters) getParameters() url.Values { + out := url.Values{} + if p.NumOfMessages != 0 { + out.Set("numofmessages", strconv.Itoa(p.NumOfMessages)) + } + if p.VisibilityTimeout != 0 { + out.Set("visibilitytimeout", strconv.Itoa(p.VisibilityTimeout)) + } + return out +} + +// PeekMessagesParameters is the set of options can be specified for Peek +// Messsage operation. A zero struct does not use any preferences for the +// request. +type PeekMessagesParameters struct { + NumOfMessages int +} + +func (p PeekMessagesParameters) getParameters() url.Values { + out := url.Values{"peekonly": {"true"}} // Required for peek operation + if p.NumOfMessages != 0 { + out.Set("numofmessages", strconv.Itoa(p.NumOfMessages)) + } + return out +} + +// GetMessagesResponse represents a response returned from Get Messages +// operation. +type GetMessagesResponse struct { + XMLName xml.Name `xml:"QueueMessagesList"` + QueueMessagesList []GetMessageResponse `xml:"QueueMessage"` +} + +// GetMessageResponse represents a QueueMessage object returned from Get +// Messages operation response. +type GetMessageResponse struct { + MessageID string `xml:"MessageId"` + InsertionTime string `xml:"InsertionTime"` + ExpirationTime string `xml:"ExpirationTime"` + PopReceipt string `xml:"PopReceipt"` + TimeNextVisible string `xml:"TimeNextVisible"` + DequeueCount int `xml:"DequeueCount"` + MessageText string `xml:"MessageText"` +} + +// PeekMessagesResponse represents a response returned from Get Messages +// operation. +type PeekMessagesResponse struct { + XMLName xml.Name `xml:"QueueMessagesList"` + QueueMessagesList []PeekMessageResponse `xml:"QueueMessage"` +} + +// PeekMessageResponse represents a QueueMessage object returned from Peek +// Messages operation response. +type PeekMessageResponse struct { + MessageID string `xml:"MessageId"` + InsertionTime string `xml:"InsertionTime"` + ExpirationTime string `xml:"ExpirationTime"` + DequeueCount int `xml:"DequeueCount"` + MessageText string `xml:"MessageText"` +} + +// QueueMetadataResponse represents user defined metadata and queue +// properties on a specific queue. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179384.aspx +type QueueMetadataResponse struct { + ApproximateMessageCount int + UserDefinedMetadata map[string]string +} + +// SetMetadata operation sets user-defined metadata on the specified queue. +// Metadata is associated with the queue as name-value pairs. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179348.aspx +func (c QueueServiceClient) SetMetadata(name string, metadata map[string]string) error { + uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{"comp": []string{"metadata"}}) + headers := c.client.getStandardHeaders() + for k, v := range metadata { + headers[userDefinedMetadataHeaderPrefix+k] = v + } + + resp, err := c.client.exec("PUT", uri, headers, nil) + if err != nil { + return err + } + defer resp.body.Close() + + return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) +} + +// GetMetadata operation retrieves user-defined metadata and queue +// properties on the specified queue. Metadata is associated with +// the queue as name-values pairs. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179384.aspx +// +// Because the way Golang's http client (and http.Header in particular) +// canonicalize header names, the returned metadata names would always +// be all lower case. +func (c QueueServiceClient) GetMetadata(name string) (QueueMetadataResponse, error) { + qm := QueueMetadataResponse{} + qm.UserDefinedMetadata = make(map[string]string) + uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{"comp": []string{"metadata"}}) + headers := c.client.getStandardHeaders() + resp, err := c.client.exec("GET", uri, headers, nil) + if err != nil { + return qm, err + } + defer resp.body.Close() + + for k, v := range resp.headers { + if len(v) != 1 { + return qm, fmt.Errorf("Unexpected number of values (%d) in response header '%s'", len(v), k) + } + + value := v[0] + + if k == approximateMessagesCountHeader { + qm.ApproximateMessageCount, err = strconv.Atoi(value) + if err != nil { + return qm, fmt.Errorf("Unexpected value in response header '%s': '%s' ", k, value) + } + } else if strings.HasPrefix(k, userDefinedMetadataHeaderPrefix) { + name := strings.TrimPrefix(k, userDefinedMetadataHeaderPrefix) + qm.UserDefinedMetadata[strings.ToLower(name)] = value + } + } + + return qm, checkRespCode(resp.statusCode, []int{http.StatusOK}) +} + +// CreateQueue operation creates a queue under the given account. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179342.aspx +func (c QueueServiceClient) CreateQueue(name string) error { + uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{}) + headers := c.client.getStandardHeaders() + resp, err := c.client.exec("PUT", uri, headers, nil) + if err != nil { + return err + } + defer resp.body.Close() + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + +// DeleteQueue operation permanently deletes the specified queue. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179436.aspx +func (c QueueServiceClient) DeleteQueue(name string) error { + uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{}) + resp, err := c.client.exec("DELETE", uri, c.client.getStandardHeaders(), nil) + if err != nil { + return err + } + defer resp.body.Close() + return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) +} + +// QueueExists returns true if a queue with given name exists. +func (c QueueServiceClient) QueueExists(name string) (bool, error) { + uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{"comp": {"metadata"}}) + resp, err := c.client.exec("GET", uri, c.client.getStandardHeaders(), nil) + if resp != nil && (resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound) { + return resp.statusCode == http.StatusOK, nil + } + + return false, err +} + +// PutMessage operation adds a new message to the back of the message queue. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179346.aspx +func (c QueueServiceClient) PutMessage(queue string, message string, params PutMessageParameters) error { + uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), params.getParameters()) + req := putMessageRequest{MessageText: message} + body, nn, err := xmlMarshal(req) + if err != nil { + return err + } + headers := c.client.getStandardHeaders() + headers["Content-Length"] = strconv.Itoa(nn) + resp, err := c.client.exec("POST", uri, headers, body) + if err != nil { + return err + } + defer resp.body.Close() + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + +// ClearMessages operation deletes all messages from the specified queue. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179454.aspx +func (c QueueServiceClient) ClearMessages(queue string) error { + uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), url.Values{}) + resp, err := c.client.exec("DELETE", uri, c.client.getStandardHeaders(), nil) + if err != nil { + return err + } + defer resp.body.Close() + return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) +} + +// GetMessages operation retrieves one or more messages from the front of the +// queue. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179474.aspx +func (c QueueServiceClient) GetMessages(queue string, params GetMessagesParameters) (GetMessagesResponse, error) { + var r GetMessagesResponse + uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), params.getParameters()) + resp, err := c.client.exec("GET", uri, c.client.getStandardHeaders(), nil) + if err != nil { + return r, err + } + defer resp.body.Close() + err = xmlUnmarshal(resp.body, &r) + return r, err +} + +// PeekMessages retrieves one or more messages from the front of the queue, but +// does not alter the visibility of the message. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179472.aspx +func (c QueueServiceClient) PeekMessages(queue string, params PeekMessagesParameters) (PeekMessagesResponse, error) { + var r PeekMessagesResponse + uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), params.getParameters()) + resp, err := c.client.exec("GET", uri, c.client.getStandardHeaders(), nil) + if err != nil { + return r, err + } + defer resp.body.Close() + err = xmlUnmarshal(resp.body, &r) + return r, err +} + +// DeleteMessage operation deletes the specified message. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179347.aspx +func (c QueueServiceClient) DeleteMessage(queue, messageID, popReceipt string) error { + uri := c.client.getEndpoint(queueServiceName, pathForMessage(queue, messageID), url.Values{ + "popreceipt": {popReceipt}}) + resp, err := c.client.exec("DELETE", uri, c.client.getStandardHeaders(), nil) + if err != nil { + return err + } + defer resp.body.Close() + return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/queue_test.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/queue_test.go new file mode 100644 index 000000000..d16d152ac --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/queue_test.go @@ -0,0 +1,132 @@ +package storage + +import ( + "time" + + chk "gopkg.in/check.v1" +) + +type StorageQueueSuite struct{} + +var _ = chk.Suite(&StorageQueueSuite{}) + +func getQueueClient(c *chk.C) QueueServiceClient { + return getBasicClient(c).GetQueueService() +} + +func (s *StorageQueueSuite) Test_pathForQueue(c *chk.C) { + c.Assert(pathForQueue("q"), chk.Equals, "/q") +} + +func (s *StorageQueueSuite) Test_pathForQueueMessages(c *chk.C) { + c.Assert(pathForQueueMessages("q"), chk.Equals, "/q/messages") +} + +func (s *StorageQueueSuite) Test_pathForMessage(c *chk.C) { + c.Assert(pathForMessage("q", "m"), chk.Equals, "/q/messages/m") +} + +func (s *StorageQueueSuite) TestCreateQueue_DeleteQueue(c *chk.C) { + cli := getQueueClient(c) + name := randString(20) + c.Assert(cli.CreateQueue(name), chk.IsNil) + c.Assert(cli.DeleteQueue(name), chk.IsNil) +} + +func (s *StorageQueueSuite) Test_GetMetadata_GetApproximateCount(c *chk.C) { + cli := getQueueClient(c) + name := randString(20) + c.Assert(cli.CreateQueue(name), chk.IsNil) + defer cli.DeleteQueue(name) + + qm, err := cli.GetMetadata(name) + c.Assert(err, chk.IsNil) + c.Assert(qm.ApproximateMessageCount, chk.Equals, 0) + + for ix := 0; ix < 3; ix++ { + err = cli.PutMessage(name, "foobar", PutMessageParameters{}) + c.Assert(err, chk.IsNil) + } + time.Sleep(1 * time.Second) + + qm, err = cli.GetMetadata(name) + c.Assert(err, chk.IsNil) + c.Assert(qm.ApproximateMessageCount, chk.Equals, 3) +} + +func (s *StorageQueueSuite) Test_SetMetadataGetMetadata_Roundtrips(c *chk.C) { + cli := getQueueClient(c) + name := randString(20) + c.Assert(cli.CreateQueue(name), chk.IsNil) + defer cli.DeleteQueue(name) + + metadata := make(map[string]string) + metadata["Foo1"] = "bar1" + metadata["fooBaz"] = "bar" + err := cli.SetMetadata(name, metadata) + c.Assert(err, chk.IsNil) + + qm, err := cli.GetMetadata(name) + c.Assert(err, chk.IsNil) + c.Assert(qm.UserDefinedMetadata["foo1"], chk.Equals, "bar1") + c.Assert(qm.UserDefinedMetadata["foobaz"], chk.Equals, "bar") +} + +func (s *StorageQueueSuite) TestQueueExists(c *chk.C) { + cli := getQueueClient(c) + ok, err := cli.QueueExists("nonexistent-queue") + c.Assert(err, chk.IsNil) + c.Assert(ok, chk.Equals, false) + + name := randString(20) + c.Assert(cli.CreateQueue(name), chk.IsNil) + defer cli.DeleteQueue(name) + + ok, err = cli.QueueExists(name) + c.Assert(err, chk.IsNil) + c.Assert(ok, chk.Equals, true) +} + +func (s *StorageQueueSuite) TestPostMessage_PeekMessage_DeleteMessage(c *chk.C) { + q := randString(20) + cli := getQueueClient(c) + c.Assert(cli.CreateQueue(q), chk.IsNil) + defer cli.DeleteQueue(q) + + msg := randString(64 * 1024) // exercise max length + c.Assert(cli.PutMessage(q, msg, PutMessageParameters{}), chk.IsNil) + r, err := cli.PeekMessages(q, PeekMessagesParameters{}) + c.Assert(err, chk.IsNil) + c.Assert(len(r.QueueMessagesList), chk.Equals, 1) + c.Assert(r.QueueMessagesList[0].MessageText, chk.Equals, msg) +} + +func (s *StorageQueueSuite) TestGetMessages(c *chk.C) { + q := randString(20) + cli := getQueueClient(c) + c.Assert(cli.CreateQueue(q), chk.IsNil) + defer cli.DeleteQueue(q) + + n := 4 + for i := 0; i < n; i++ { + c.Assert(cli.PutMessage(q, randString(10), PutMessageParameters{}), chk.IsNil) + } + + r, err := cli.GetMessages(q, GetMessagesParameters{NumOfMessages: n}) + c.Assert(err, chk.IsNil) + c.Assert(len(r.QueueMessagesList), chk.Equals, n) +} + +func (s *StorageQueueSuite) TestDeleteMessages(c *chk.C) { + q := randString(20) + cli := getQueueClient(c) + c.Assert(cli.CreateQueue(q), chk.IsNil) + defer cli.DeleteQueue(q) + + c.Assert(cli.PutMessage(q, "message", PutMessageParameters{}), chk.IsNil) + r, err := cli.GetMessages(q, GetMessagesParameters{VisibilityTimeout: 1}) + c.Assert(err, chk.IsNil) + c.Assert(len(r.QueueMessagesList), chk.Equals, 1) + m := r.QueueMessagesList[0] + c.Assert(cli.DeleteMessage(q, m.MessageID, m.PopReceipt), chk.IsNil) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/util.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/util.go new file mode 100644 index 000000000..33155af7f --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/util.go @@ -0,0 +1,71 @@ +package storage + +import ( + "bytes" + "crypto/hmac" + "crypto/sha256" + "encoding/base64" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "time" +) + +func (c Client) computeHmac256(message string) string { + h := hmac.New(sha256.New, c.accountKey) + h.Write([]byte(message)) + return base64.StdEncoding.EncodeToString(h.Sum(nil)) +} + +func currentTimeRfc1123Formatted() string { + return timeRfc1123Formatted(time.Now().UTC()) +} + +func timeRfc1123Formatted(t time.Time) string { + return t.Format(http.TimeFormat) +} + +func mergeParams(v1, v2 url.Values) url.Values { + out := url.Values{} + for k, v := range v1 { + out[k] = v + } + for k, v := range v2 { + vals, ok := out[k] + if ok { + vals = append(vals, v...) + out[k] = vals + } else { + out[k] = v + } + } + return out +} + +func prepareBlockListRequest(blocks []Block) string { + s := `` + for _, v := range blocks { + s += fmt.Sprintf("<%s>%s", v.Status, v.ID, v.Status) + } + s += `` + return s +} + +func xmlUnmarshal(body io.Reader, v interface{}) error { + data, err := ioutil.ReadAll(body) + if err != nil { + return err + } + return xml.Unmarshal(data, v) +} + +func xmlMarshal(v interface{}) (io.Reader, int, error) { + b, err := xml.Marshal(v) + if err != nil { + return nil, 0, err + } + return bytes.NewReader(b), len(b), nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/util_test.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/util_test.go new file mode 100644 index 000000000..9bf82dcc3 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/util_test.go @@ -0,0 +1,69 @@ +package storage + +import ( + "encoding/xml" + "io/ioutil" + "net/url" + "strings" + "time" + + chk "gopkg.in/check.v1" +) + +func (s *StorageClientSuite) Test_timeRfc1123Formatted(c *chk.C) { + now := time.Now().UTC() + expectedLayout := "Mon, 02 Jan 2006 15:04:05 GMT" + c.Assert(timeRfc1123Formatted(now), chk.Equals, now.Format(expectedLayout)) +} + +func (s *StorageClientSuite) Test_mergeParams(c *chk.C) { + v1 := url.Values{ + "k1": {"v1"}, + "k2": {"v2"}} + v2 := url.Values{ + "k1": {"v11"}, + "k3": {"v3"}} + out := mergeParams(v1, v2) + c.Assert(out.Get("k1"), chk.Equals, "v1") + c.Assert(out.Get("k2"), chk.Equals, "v2") + c.Assert(out.Get("k3"), chk.Equals, "v3") + c.Assert(out["k1"], chk.DeepEquals, []string{"v1", "v11"}) +} + +func (s *StorageClientSuite) Test_prepareBlockListRequest(c *chk.C) { + empty := []Block{} + expected := `` + c.Assert(prepareBlockListRequest(empty), chk.DeepEquals, expected) + + blocks := []Block{{"foo", BlockStatusLatest}, {"bar", BlockStatusUncommitted}} + expected = `foobar` + c.Assert(prepareBlockListRequest(blocks), chk.DeepEquals, expected) +} + +func (s *StorageClientSuite) Test_xmlUnmarshal(c *chk.C) { + xml := ` + + myblob + ` + var blob Blob + body := ioutil.NopCloser(strings.NewReader(xml)) + c.Assert(xmlUnmarshal(body, &blob), chk.IsNil) + c.Assert(blob.Name, chk.Equals, "myblob") +} + +func (s *StorageClientSuite) Test_xmlMarshal(c *chk.C) { + type t struct { + XMLName xml.Name `xml:"S"` + Name string `xml:"Name"` + } + + b := t{Name: "myblob"} + expected := `myblob` + r, i, err := xmlMarshal(b) + c.Assert(err, chk.IsNil) + o, err := ioutil.ReadAll(r) + c.Assert(err, chk.IsNil) + out := string(o) + c.Assert(out, chk.Equals, expected) + c.Assert(i, chk.Equals, len(expected)) +} diff --git a/vendor/vendor.json b/vendor/vendor.json new file mode 100644 index 000000000..b523ece77 --- /dev/null +++ b/vendor/vendor.json @@ -0,0 +1,13 @@ +{ + "comment": "", + "ignore": "", + "package": [ + { + "checksumSHA1": "YwSdnhzU/foUxdSd9rM0UDcXbms=", + "path": "github.com/Azure/azure-sdk-for-go/storage", + "revision": "95361a2573b1fa92a00c5fc2707a80308483c6f9", + "revisionTime": "2016-02-10T10:36:55-08:00" + } + ], + "rootPath": "github.com/hashicorp/vault" +} diff --git a/website/source/docs/config/index.html.md b/website/source/docs/config/index.html.md index 979efc4b7..7c0d96717 100644 --- a/website/source/docs/config/index.html.md +++ b/website/source/docs/config/index.html.md @@ -151,6 +151,9 @@ to help you, but may refer you to the backend author. * `s3` - Store data within an S3 bucket [S3](https://aws.amazon.com/s3/). This backend does not support HA. This is a community-supported backend. + * `azure` - Store data in an Azure Storage container [Azure](https://azure.microsoft.com/en-us/services/storage/). + This backend does not support HA. This is a community-supported backend. + * `mysql` - Store data within MySQL. This backend does not support HA. This is a community-supported backend. @@ -281,7 +284,7 @@ The following optional settings can be used to configure zNode ACLs: If neither of these is set, the backend will not authenticate with Zookeeper and will set the OPEN_ACL_UNSAFE ACL on all nodes. In this scenario, anyone connected to Zookeeper could change Vault’s znodes and, potentially, take Vault -out of service. +out of service. Some sample configurations: @@ -359,6 +362,14 @@ will cause Vault to attempt to retrieve credentials from the metadata service. You are responsible for ensuring your instance is launched with the appropriate profile enabled. Vault will handle renewing profile credentials as they rotate. +#### Backend Reference: Azure (Community-Supported) + + * `accountName` (required) - The Azure Storage account name + * `accountKey` (required) - The Azure Storage account key + * `container` (required) - The Azure Storage Blob container name + +The current implementation is limited to a maximum of 4 MBytes per blob/file. + #### Backend Reference: MySQL (Community-Supported) The MySQL backend has the following options: