Update Azure dep (#2881)
This commit is contained in:
parent
b946eefcda
commit
f8f95524d0
|
@ -10,11 +10,12 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
storage "github.com/Azure/azure-sdk-for-go/storage"
|
||||||
log "github.com/mgutz/logxi/v1"
|
log "github.com/mgutz/logxi/v1"
|
||||||
|
|
||||||
"github.com/Azure/azure-storage-go"
|
|
||||||
"github.com/armon/go-metrics"
|
"github.com/armon/go-metrics"
|
||||||
"github.com/hashicorp/errwrap"
|
"github.com/hashicorp/errwrap"
|
||||||
|
cleanhttp "github.com/hashicorp/go-cleanhttp"
|
||||||
"github.com/hashicorp/vault/helper/strutil"
|
"github.com/hashicorp/vault/helper/strutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -24,8 +25,7 @@ var MaxBlobSize = 1024 * 1024 * 4
|
||||||
// AzureBackend is a physical backend that stores data
|
// AzureBackend is a physical backend that stores data
|
||||||
// within an Azure blob container.
|
// within an Azure blob container.
|
||||||
type AzureBackend struct {
|
type AzureBackend struct {
|
||||||
container string
|
container *storage.Container
|
||||||
client storage.BlobStorageClient
|
|
||||||
logger log.Logger
|
logger log.Logger
|
||||||
permitPool *PermitPool
|
permitPool *PermitPool
|
||||||
}
|
}
|
||||||
|
@ -34,11 +34,10 @@ type AzureBackend struct {
|
||||||
// bucket. Credentials can be provided to the backend, sourced
|
// bucket. Credentials can be provided to the backend, sourced
|
||||||
// from the environment, AWS credential files or by IAM role.
|
// from the environment, AWS credential files or by IAM role.
|
||||||
func newAzureBackend(conf map[string]string, logger log.Logger) (Backend, error) {
|
func newAzureBackend(conf map[string]string, logger log.Logger) (Backend, error) {
|
||||||
|
name := os.Getenv("AZURE_BLOB_CONTAINER")
|
||||||
container := os.Getenv("AZURE_BLOB_CONTAINER")
|
if name == "" {
|
||||||
if container == "" {
|
name = conf["container"]
|
||||||
container = conf["container"]
|
if name == "" {
|
||||||
if container == "" {
|
|
||||||
return nil, fmt.Errorf("'container' must be set")
|
return nil, fmt.Errorf("'container' must be set")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -63,19 +62,15 @@ func newAzureBackend(conf map[string]string, logger log.Logger) (Backend, error)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to create Azure client: %v", err)
|
return nil, fmt.Errorf("failed to create Azure client: %v", err)
|
||||||
}
|
}
|
||||||
|
client.HTTPClient = cleanhttp.DefaultPooledClient()
|
||||||
|
|
||||||
contObj := client.GetBlobService().GetContainerReference(container)
|
blobClient := client.GetBlobService()
|
||||||
created, err := contObj.CreateIfNotExists()
|
container := blobClient.GetContainerReference(name)
|
||||||
|
_, err = container.CreateIfNotExists(&storage.CreateContainerOptions{
|
||||||
|
Access: storage.ContainerAccessTypePrivate,
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to upsert container: %v", err)
|
return nil, fmt.Errorf("failed to create %q container: %v", name, err)
|
||||||
}
|
|
||||||
if created {
|
|
||||||
err = contObj.SetPermissions(storage.ContainerPermissions{
|
|
||||||
AccessType: storage.ContainerAccessTypePrivate,
|
|
||||||
}, 0, "")
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to set permissions on newly-created container: %v", err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
maxParStr, ok := conf["max_parallel"]
|
maxParStr, ok := conf["max_parallel"]
|
||||||
|
@ -92,7 +87,6 @@ func newAzureBackend(conf map[string]string, logger log.Logger) (Backend, error)
|
||||||
|
|
||||||
a := &AzureBackend{
|
a := &AzureBackend{
|
||||||
container: container,
|
container: container,
|
||||||
client: client.GetBlobService(),
|
|
||||||
logger: logger,
|
logger: logger,
|
||||||
permitPool: NewPermitPool(maxParInt),
|
permitPool: NewPermitPool(maxParInt),
|
||||||
}
|
}
|
||||||
|
@ -104,7 +98,7 @@ func (a *AzureBackend) Put(entry *Entry) error {
|
||||||
defer metrics.MeasureSince([]string{"azure", "put"}, time.Now())
|
defer metrics.MeasureSince([]string{"azure", "put"}, time.Now())
|
||||||
|
|
||||||
if len(entry.Value) >= MaxBlobSize {
|
if len(entry.Value) >= MaxBlobSize {
|
||||||
return fmt.Errorf("Value is bigger than the current supported limit of 4MBytes")
|
return fmt.Errorf("value is bigger than the current supported limit of 4MBytes")
|
||||||
}
|
}
|
||||||
|
|
||||||
blockID := base64.StdEncoding.EncodeToString([]byte("AAAA"))
|
blockID := base64.StdEncoding.EncodeToString([]byte("AAAA"))
|
||||||
|
@ -114,10 +108,15 @@ func (a *AzureBackend) Put(entry *Entry) error {
|
||||||
a.permitPool.Acquire()
|
a.permitPool.Acquire()
|
||||||
defer a.permitPool.Release()
|
defer a.permitPool.Release()
|
||||||
|
|
||||||
err := a.client.PutBlock(a.container, entry.Key, blockID, entry.Value)
|
blob := &storage.Blob{
|
||||||
|
Container: a.container,
|
||||||
|
Name: entry.Key,
|
||||||
|
}
|
||||||
|
if err := blob.PutBlock(blockID, entry.Value, nil); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
err = a.client.PutBlockList(a.container, entry.Key, blocks)
|
return blob.PutBlockList(blocks, nil)
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get is used to fetch an entry
|
// Get is used to fetch an entry
|
||||||
|
@ -127,18 +126,23 @@ func (a *AzureBackend) Get(key string) (*Entry, error) {
|
||||||
a.permitPool.Acquire()
|
a.permitPool.Acquire()
|
||||||
defer a.permitPool.Release()
|
defer a.permitPool.Release()
|
||||||
|
|
||||||
exists, _ := a.client.BlobExists(a.container, key)
|
blob := &storage.Blob{
|
||||||
|
Container: a.container,
|
||||||
|
Name: key,
|
||||||
|
}
|
||||||
|
exists, err := blob.Exists()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
if !exists {
|
if !exists {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
reader, err := a.client.GetBlob(a.container, key)
|
reader, err := blob.Get(nil)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
defer reader.Close()
|
||||||
data, err := ioutil.ReadAll(reader)
|
data, err := ioutil.ReadAll(reader)
|
||||||
|
|
||||||
ent := &Entry{
|
ent := &Entry{
|
||||||
|
@ -153,10 +157,15 @@ func (a *AzureBackend) Get(key string) (*Entry, error) {
|
||||||
func (a *AzureBackend) Delete(key string) error {
|
func (a *AzureBackend) Delete(key string) error {
|
||||||
defer metrics.MeasureSince([]string{"azure", "delete"}, time.Now())
|
defer metrics.MeasureSince([]string{"azure", "delete"}, time.Now())
|
||||||
|
|
||||||
|
blob := &storage.Blob{
|
||||||
|
Container: a.container,
|
||||||
|
Name: key,
|
||||||
|
}
|
||||||
|
|
||||||
a.permitPool.Acquire()
|
a.permitPool.Acquire()
|
||||||
defer a.permitPool.Release()
|
defer a.permitPool.Release()
|
||||||
|
|
||||||
_, err := a.client.DeleteBlobIfExists(a.container, key, nil)
|
_, err := blob.DeleteIfExists(nil)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -166,15 +175,13 @@ func (a *AzureBackend) List(prefix string) ([]string, error) {
|
||||||
defer metrics.MeasureSince([]string{"azure", "list"}, time.Now())
|
defer metrics.MeasureSince([]string{"azure", "list"}, time.Now())
|
||||||
|
|
||||||
a.permitPool.Acquire()
|
a.permitPool.Acquire()
|
||||||
defer a.permitPool.Release()
|
list, err := a.container.ListBlobs(storage.ListBlobsParameters{Prefix: prefix})
|
||||||
|
|
||||||
contObj := a.client.GetContainerReference(a.container)
|
|
||||||
list, err := contObj.ListBlobs(storage.ListBlobsParameters{Prefix: prefix})
|
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Break early.
|
// Break early.
|
||||||
|
a.permitPool.Release()
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
a.permitPool.Release()
|
||||||
|
|
||||||
keys := []string{}
|
keys := []string{}
|
||||||
for _, blob := range list.Blobs {
|
for _, blob := range list.Blobs {
|
||||||
|
|
|
@ -0,0 +1,202 @@
|
||||||
|
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright 2016 Microsoft Corporation
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
|
@ -0,0 +1,70 @@
|
||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// PutAppendBlob initializes an empty append blob with specified name. An
|
||||||
|
// append blob must be created using this method before appending blocks.
|
||||||
|
//
|
||||||
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Blob
|
||||||
|
func (b *Blob) PutAppendBlob(options *PutBlobOptions) error {
|
||||||
|
params := url.Values{}
|
||||||
|
headers := b.Container.bsc.client.getStandardHeaders()
|
||||||
|
headers["x-ms-blob-type"] = string(BlobTypeAppend)
|
||||||
|
headers = mergeHeaders(headers, headersFromStruct(b.Properties))
|
||||||
|
headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata)
|
||||||
|
|
||||||
|
if options != nil {
|
||||||
|
params = addTimeout(params, options.Timeout)
|
||||||
|
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||||
|
}
|
||||||
|
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
|
||||||
|
|
||||||
|
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
readAndCloseBody(resp.body)
|
||||||
|
return checkRespCode(resp.statusCode, []int{http.StatusCreated})
|
||||||
|
}
|
||||||
|
|
||||||
|
// AppendBlockOptions includes the options for an append block operation
|
||||||
|
type AppendBlockOptions struct {
|
||||||
|
Timeout uint
|
||||||
|
LeaseID string `header:"x-ms-lease-id"`
|
||||||
|
MaxSize *uint `header:"x-ms-blob-condition-maxsize"`
|
||||||
|
AppendPosition *uint `header:"x-ms-blob-condition-appendpos"`
|
||||||
|
IfModifiedSince *time.Time `header:"If-Modified-Since"`
|
||||||
|
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
|
||||||
|
IfMatch string `header:"If-Match"`
|
||||||
|
IfNoneMatch string `header:"If-None-Match"`
|
||||||
|
RequestID string `header:"x-ms-client-request-id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// AppendBlock appends a block to an append blob.
|
||||||
|
//
|
||||||
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Append-Block
|
||||||
|
func (b *Blob) AppendBlock(chunk []byte, options *AppendBlockOptions) error {
|
||||||
|
params := url.Values{"comp": {"appendblock"}}
|
||||||
|
headers := b.Container.bsc.client.getStandardHeaders()
|
||||||
|
headers["x-ms-blob-type"] = string(BlobTypeAppend)
|
||||||
|
headers["Content-Length"] = fmt.Sprintf("%v", len(chunk))
|
||||||
|
|
||||||
|
if options != nil {
|
||||||
|
params = addTimeout(params, options.Timeout)
|
||||||
|
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||||
|
}
|
||||||
|
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
|
||||||
|
|
||||||
|
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, bytes.NewReader(chunk), b.Container.bsc.auth)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
readAndCloseBody(resp.body)
|
||||||
|
return checkRespCode(resp.statusCode, []int{http.StatusCreated})
|
||||||
|
}
|
|
@ -20,20 +20,24 @@ const (
|
||||||
sharedKeyLiteForTable authentication = "sharedKeyLiteTable"
|
sharedKeyLiteForTable authentication = "sharedKeyLiteTable"
|
||||||
|
|
||||||
// headers
|
// headers
|
||||||
headerAuthorization = "Authorization"
|
headerAcceptCharset = "Accept-Charset"
|
||||||
headerContentLength = "Content-Length"
|
headerAuthorization = "Authorization"
|
||||||
headerDate = "Date"
|
headerContentLength = "Content-Length"
|
||||||
headerXmsDate = "x-ms-date"
|
headerDate = "Date"
|
||||||
headerXmsVersion = "x-ms-version"
|
headerXmsDate = "x-ms-date"
|
||||||
headerContentEncoding = "Content-Encoding"
|
headerXmsVersion = "x-ms-version"
|
||||||
headerContentLanguage = "Content-Language"
|
headerContentEncoding = "Content-Encoding"
|
||||||
headerContentType = "Content-Type"
|
headerContentLanguage = "Content-Language"
|
||||||
headerContentMD5 = "Content-MD5"
|
headerContentType = "Content-Type"
|
||||||
headerIfModifiedSince = "If-Modified-Since"
|
headerContentMD5 = "Content-MD5"
|
||||||
headerIfMatch = "If-Match"
|
headerIfModifiedSince = "If-Modified-Since"
|
||||||
headerIfNoneMatch = "If-None-Match"
|
headerIfMatch = "If-Match"
|
||||||
headerIfUnmodifiedSince = "If-Unmodified-Since"
|
headerIfNoneMatch = "If-None-Match"
|
||||||
headerRange = "Range"
|
headerIfUnmodifiedSince = "If-Unmodified-Since"
|
||||||
|
headerRange = "Range"
|
||||||
|
headerDataServiceVersion = "DataServiceVersion"
|
||||||
|
headerMaxDataServiceVersion = "MaxDataServiceVersion"
|
||||||
|
headerContentTransferEncoding = "Content-Transfer-Encoding"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (c *Client) addAuthorizationHeader(verb, url string, headers map[string]string, auth authentication) (map[string]string, error) {
|
func (c *Client) addAuthorizationHeader(verb, url string, headers map[string]string, auth authentication) (map[string]string, error) {
|
|
@ -0,0 +1,617 @@
|
||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/xml"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A Blob is an entry in BlobListResponse.
|
||||||
|
type Blob struct {
|
||||||
|
Container *Container
|
||||||
|
Name string `xml:"Name"`
|
||||||
|
Snapshot time.Time `xml:"Snapshot"`
|
||||||
|
Properties BlobProperties `xml:"Properties"`
|
||||||
|
Metadata BlobMetadata `xml:"Metadata"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// PutBlobOptions includes the options any put blob operation
|
||||||
|
// (page, block, append)
|
||||||
|
type PutBlobOptions struct {
|
||||||
|
Timeout uint
|
||||||
|
LeaseID string `header:"x-ms-lease-id"`
|
||||||
|
Origin string `header:"Origin"`
|
||||||
|
IfModifiedSince *time.Time `header:"If-Modified-Since"`
|
||||||
|
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
|
||||||
|
IfMatch string `header:"If-Match"`
|
||||||
|
IfNoneMatch string `header:"If-None-Match"`
|
||||||
|
RequestID string `header:"x-ms-client-request-id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// BlobMetadata is a set of custom name/value pairs.
|
||||||
|
//
|
||||||
|
// See https://msdn.microsoft.com/en-us/library/azure/dd179404.aspx
|
||||||
|
type BlobMetadata map[string]string
|
||||||
|
|
||||||
|
type blobMetadataEntries struct {
|
||||||
|
Entries []blobMetadataEntry `xml:",any"`
|
||||||
|
}
|
||||||
|
type blobMetadataEntry struct {
|
||||||
|
XMLName xml.Name
|
||||||
|
Value string `xml:",chardata"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalXML converts the xml:Metadata into Metadata map
|
||||||
|
func (bm *BlobMetadata) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
||||||
|
var entries blobMetadataEntries
|
||||||
|
if err := d.DecodeElement(&entries, &start); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, entry := range entries.Entries {
|
||||||
|
if *bm == nil {
|
||||||
|
*bm = make(BlobMetadata)
|
||||||
|
}
|
||||||
|
(*bm)[strings.ToLower(entry.XMLName.Local)] = entry.Value
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalXML implements the xml.Marshaler interface. It encodes
|
||||||
|
// metadata name/value pairs as they would appear in an Azure
|
||||||
|
// ListBlobs response.
|
||||||
|
func (bm BlobMetadata) MarshalXML(enc *xml.Encoder, start xml.StartElement) error {
|
||||||
|
entries := make([]blobMetadataEntry, 0, len(bm))
|
||||||
|
for k, v := range bm {
|
||||||
|
entries = append(entries, blobMetadataEntry{
|
||||||
|
XMLName: xml.Name{Local: http.CanonicalHeaderKey(k)},
|
||||||
|
Value: v,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return enc.EncodeElement(blobMetadataEntries{
|
||||||
|
Entries: entries,
|
||||||
|
}, start)
|
||||||
|
}
|
||||||
|
|
||||||
|
// BlobProperties contains various properties of a blob
|
||||||
|
// returned in various endpoints like ListBlobs or GetBlobProperties.
|
||||||
|
type BlobProperties struct {
|
||||||
|
LastModified TimeRFC1123 `xml:"Last-Modified"`
|
||||||
|
Etag string `xml:"Etag"`
|
||||||
|
ContentMD5 string `xml:"Content-MD5" header:"x-ms-blob-content-md5"`
|
||||||
|
ContentLength int64 `xml:"Content-Length"`
|
||||||
|
ContentType string `xml:"Content-Type" header:"x-ms-blob-content-type"`
|
||||||
|
ContentEncoding string `xml:"Content-Encoding" header:"x-ms-blob-content-encoding"`
|
||||||
|
CacheControl string `xml:"Cache-Control" header:"x-ms-blob-cache-control"`
|
||||||
|
ContentLanguage string `xml:"Cache-Language" header:"x-ms-blob-content-language"`
|
||||||
|
ContentDisposition string `xml:"Content-Disposition" header:"x-ms-blob-content-disposition"`
|
||||||
|
BlobType BlobType `xml:"x-ms-blob-blob-type"`
|
||||||
|
SequenceNumber int64 `xml:"x-ms-blob-sequence-number"`
|
||||||
|
CopyID string `xml:"CopyId"`
|
||||||
|
CopyStatus string `xml:"CopyStatus"`
|
||||||
|
CopySource string `xml:"CopySource"`
|
||||||
|
CopyProgress string `xml:"CopyProgress"`
|
||||||
|
CopyCompletionTime TimeRFC1123 `xml:"CopyCompletionTime"`
|
||||||
|
CopyStatusDescription string `xml:"CopyStatusDescription"`
|
||||||
|
LeaseStatus string `xml:"LeaseStatus"`
|
||||||
|
LeaseState string `xml:"LeaseState"`
|
||||||
|
LeaseDuration string `xml:"LeaseDuration"`
|
||||||
|
ServerEncrypted bool `xml:"ServerEncrypted"`
|
||||||
|
IncrementalCopy bool `xml:"IncrementalCopy"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// BlobType defines the type of the Azure Blob.
|
||||||
|
type BlobType string
|
||||||
|
|
||||||
|
// Types of page blobs
|
||||||
|
const (
|
||||||
|
BlobTypeBlock BlobType = "BlockBlob"
|
||||||
|
BlobTypePage BlobType = "PageBlob"
|
||||||
|
BlobTypeAppend BlobType = "AppendBlob"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (b *Blob) buildPath() string {
|
||||||
|
return b.Container.buildPath() + "/" + b.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exists returns true if a blob with given name exists on the specified
|
||||||
|
// container of the storage account.
|
||||||
|
func (b *Blob) Exists() (bool, error) {
|
||||||
|
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), nil)
|
||||||
|
headers := b.Container.bsc.client.getStandardHeaders()
|
||||||
|
resp, err := b.Container.bsc.client.exec(http.MethodHead, uri, headers, nil, b.Container.bsc.auth)
|
||||||
|
if resp != nil {
|
||||||
|
defer readAndCloseBody(resp.body)
|
||||||
|
if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound {
|
||||||
|
return resp.statusCode == http.StatusOK, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetURL gets the canonical URL to the blob with the specified name in the
|
||||||
|
// specified container. If name is not specified, the canonical URL for the entire
|
||||||
|
// container is obtained.
|
||||||
|
// This method does not create a publicly accessible URL if the blob or container
|
||||||
|
// is private and this method does not check if the blob exists.
|
||||||
|
func (b *Blob) GetURL() string {
|
||||||
|
container := b.Container.Name
|
||||||
|
if container == "" {
|
||||||
|
container = "$root"
|
||||||
|
}
|
||||||
|
return b.Container.bsc.client.getEndpoint(blobServiceName, pathForResource(container, b.Name), nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetBlobRangeOptions includes the options for a get blob range operation
|
||||||
|
type GetBlobRangeOptions struct {
|
||||||
|
Range *BlobRange
|
||||||
|
GetRangeContentMD5 bool
|
||||||
|
*GetBlobOptions
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetBlobOptions includes the options for a get blob operation
|
||||||
|
type GetBlobOptions struct {
|
||||||
|
Timeout uint
|
||||||
|
Snapshot *time.Time
|
||||||
|
LeaseID string `header:"x-ms-lease-id"`
|
||||||
|
Origin string `header:"Origin"`
|
||||||
|
IfModifiedSince *time.Time `header:"If-Modified-Since"`
|
||||||
|
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
|
||||||
|
IfMatch string `header:"If-Match"`
|
||||||
|
IfNoneMatch string `header:"If-None-Match"`
|
||||||
|
RequestID string `header:"x-ms-client-request-id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// BlobRange represents the bytes range to be get
|
||||||
|
type BlobRange struct {
|
||||||
|
Start uint64
|
||||||
|
End uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (br BlobRange) String() string {
|
||||||
|
return fmt.Sprintf("bytes=%d-%d", br.Start, br.End)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get returns a stream to read the blob. Caller must call both Read and Close()
|
||||||
|
// to correctly close the underlying connection.
|
||||||
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Blob
|
||||||
|
func (b *Blob) Get(options *GetBlobOptions) (io.ReadCloser, error) {
|
||||||
|
rangeOptions := GetBlobRangeOptions{
|
||||||
|
GetBlobOptions: options,
|
||||||
|
}
|
||||||
|
resp, err := b.getRange(&rangeOptions)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := b.writePropoerties(resp.headers); err != nil {
|
||||||
|
return resp.body, err
|
||||||
|
}
|
||||||
|
return resp.body, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetRange reads the specified range of a blob to a stream. The bytesRange
|
||||||
|
// string must be in a format like "0-", "10-100" as defined in HTTP 1.1 spec.
|
||||||
|
// Caller must call both Read and Close()// to correctly close the underlying
|
||||||
|
// connection.
|
||||||
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Blob
|
||||||
|
func (b *Blob) GetRange(options *GetBlobRangeOptions) (io.ReadCloser, error) {
|
||||||
|
resp, err := b.getRange(options)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := checkRespCode(resp.statusCode, []int{http.StatusPartialContent}); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := b.writePropoerties(resp.headers); err != nil {
|
||||||
|
return resp.body, err
|
||||||
|
}
|
||||||
|
return resp.body, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Blob) getRange(options *GetBlobRangeOptions) (*storageResponse, error) {
|
||||||
|
params := url.Values{}
|
||||||
|
headers := b.Container.bsc.client.getStandardHeaders()
|
||||||
|
|
||||||
|
if options != nil {
|
||||||
|
if options.Range != nil {
|
||||||
|
headers["Range"] = options.Range.String()
|
||||||
|
headers["x-ms-range-get-content-md5"] = fmt.Sprintf("%v", options.GetRangeContentMD5)
|
||||||
|
}
|
||||||
|
if options.GetBlobOptions != nil {
|
||||||
|
headers = mergeHeaders(headers, headersFromStruct(*options.GetBlobOptions))
|
||||||
|
params = addTimeout(params, options.Timeout)
|
||||||
|
params = addSnapshot(params, options.Snapshot)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
|
||||||
|
|
||||||
|
resp, err := b.Container.bsc.client.exec(http.MethodGet, uri, headers, nil, b.Container.bsc.auth)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// SnapshotOptions includes the options for a snapshot blob operation
|
||||||
|
type SnapshotOptions struct {
|
||||||
|
Timeout uint
|
||||||
|
LeaseID string `header:"x-ms-lease-id"`
|
||||||
|
IfModifiedSince *time.Time `header:"If-Modified-Since"`
|
||||||
|
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
|
||||||
|
IfMatch string `header:"If-Match"`
|
||||||
|
IfNoneMatch string `header:"If-None-Match"`
|
||||||
|
RequestID string `header:"x-ms-client-request-id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateSnapshot creates a snapshot for a blob
|
||||||
|
// See https://msdn.microsoft.com/en-us/library/azure/ee691971.aspx
|
||||||
|
func (b *Blob) CreateSnapshot(options *SnapshotOptions) (snapshotTimestamp *time.Time, err error) {
|
||||||
|
params := url.Values{"comp": {"snapshot"}}
|
||||||
|
headers := b.Container.bsc.client.getStandardHeaders()
|
||||||
|
headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata)
|
||||||
|
|
||||||
|
if options != nil {
|
||||||
|
params = addTimeout(params, options.Timeout)
|
||||||
|
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||||
|
}
|
||||||
|
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
|
||||||
|
|
||||||
|
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth)
|
||||||
|
if err != nil || resp == nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer readAndCloseBody(resp.body)
|
||||||
|
|
||||||
|
if err := checkRespCode(resp.statusCode, []int{http.StatusCreated}); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
snapshotResponse := resp.headers.Get(http.CanonicalHeaderKey("x-ms-snapshot"))
|
||||||
|
if snapshotResponse != "" {
|
||||||
|
snapshotTimestamp, err := time.Parse(time.RFC3339, snapshotResponse)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &snapshotTimestamp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, errors.New("Snapshot not created")
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetBlobPropertiesOptions includes the options for a get blob properties operation
|
||||||
|
type GetBlobPropertiesOptions struct {
|
||||||
|
Timeout uint
|
||||||
|
Snapshot *time.Time
|
||||||
|
LeaseID string `header:"x-ms-lease-id"`
|
||||||
|
IfModifiedSince *time.Time `header:"If-Modified-Since"`
|
||||||
|
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
|
||||||
|
IfMatch string `header:"If-Match"`
|
||||||
|
IfNoneMatch string `header:"If-None-Match"`
|
||||||
|
RequestID string `header:"x-ms-client-request-id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetProperties provides various information about the specified blob.
|
||||||
|
// See https://msdn.microsoft.com/en-us/library/azure/dd179394.aspx
|
||||||
|
func (b *Blob) GetProperties(options *GetBlobPropertiesOptions) error {
|
||||||
|
params := url.Values{}
|
||||||
|
headers := b.Container.bsc.client.getStandardHeaders()
|
||||||
|
|
||||||
|
if options != nil {
|
||||||
|
params = addTimeout(params, options.Timeout)
|
||||||
|
params = addSnapshot(params, options.Snapshot)
|
||||||
|
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||||
|
}
|
||||||
|
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
|
||||||
|
|
||||||
|
resp, err := b.Container.bsc.client.exec(http.MethodHead, uri, headers, nil, b.Container.bsc.auth)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer readAndCloseBody(resp.body)
|
||||||
|
|
||||||
|
if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return b.writePropoerties(resp.headers)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Blob) writePropoerties(h http.Header) error {
|
||||||
|
var err error
|
||||||
|
|
||||||
|
var contentLength int64
|
||||||
|
contentLengthStr := h.Get("Content-Length")
|
||||||
|
if contentLengthStr != "" {
|
||||||
|
contentLength, err = strconv.ParseInt(contentLengthStr, 0, 64)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var sequenceNum int64
|
||||||
|
sequenceNumStr := h.Get("x-ms-blob-sequence-number")
|
||||||
|
if sequenceNumStr != "" {
|
||||||
|
sequenceNum, err = strconv.ParseInt(sequenceNumStr, 0, 64)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
lastModified, err := getTimeFromHeaders(h, "Last-Modified")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
copyCompletionTime, err := getTimeFromHeaders(h, "x-ms-copy-completion-time")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
b.Properties = BlobProperties{
|
||||||
|
LastModified: TimeRFC1123(*lastModified),
|
||||||
|
Etag: h.Get("Etag"),
|
||||||
|
ContentMD5: h.Get("Content-MD5"),
|
||||||
|
ContentLength: contentLength,
|
||||||
|
ContentEncoding: h.Get("Content-Encoding"),
|
||||||
|
ContentType: h.Get("Content-Type"),
|
||||||
|
ContentDisposition: h.Get("Content-Disposition"),
|
||||||
|
CacheControl: h.Get("Cache-Control"),
|
||||||
|
ContentLanguage: h.Get("Content-Language"),
|
||||||
|
SequenceNumber: sequenceNum,
|
||||||
|
CopyCompletionTime: TimeRFC1123(*copyCompletionTime),
|
||||||
|
CopyStatusDescription: h.Get("x-ms-copy-status-description"),
|
||||||
|
CopyID: h.Get("x-ms-copy-id"),
|
||||||
|
CopyProgress: h.Get("x-ms-copy-progress"),
|
||||||
|
CopySource: h.Get("x-ms-copy-source"),
|
||||||
|
CopyStatus: h.Get("x-ms-copy-status"),
|
||||||
|
BlobType: BlobType(h.Get("x-ms-blob-type")),
|
||||||
|
LeaseStatus: h.Get("x-ms-lease-status"),
|
||||||
|
LeaseState: h.Get("x-ms-lease-state"),
|
||||||
|
}
|
||||||
|
b.writeMetadata(h)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetBlobPropertiesOptions contains various properties of a blob and is an entry
|
||||||
|
// in SetProperties
|
||||||
|
type SetBlobPropertiesOptions struct {
|
||||||
|
Timeout uint
|
||||||
|
LeaseID string `header:"x-ms-lease-id"`
|
||||||
|
Origin string `header:"Origin"`
|
||||||
|
IfModifiedSince *time.Time `header:"If-Modified-Since"`
|
||||||
|
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
|
||||||
|
IfMatch string `header:"If-Match"`
|
||||||
|
IfNoneMatch string `header:"If-None-Match"`
|
||||||
|
SequenceNumberAction *SequenceNumberAction
|
||||||
|
RequestID string `header:"x-ms-client-request-id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// SequenceNumberAction defines how the blob's sequence number should be modified
|
||||||
|
type SequenceNumberAction string
|
||||||
|
|
||||||
|
// Options for sequence number action
|
||||||
|
const (
|
||||||
|
SequenceNumberActionMax SequenceNumberAction = "max"
|
||||||
|
SequenceNumberActionUpdate SequenceNumberAction = "update"
|
||||||
|
SequenceNumberActionIncrement SequenceNumberAction = "increment"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SetProperties replaces the BlobHeaders for the specified blob.
|
||||||
|
//
|
||||||
|
// Some keys may be converted to Camel-Case before sending. All keys
|
||||||
|
// are returned in lower case by GetBlobProperties. HTTP header names
|
||||||
|
// are case-insensitive so case munging should not matter to other
|
||||||
|
// applications either.
|
||||||
|
//
|
||||||
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Blob-Properties
|
||||||
|
func (b *Blob) SetProperties(options *SetBlobPropertiesOptions) error {
|
||||||
|
params := url.Values{"comp": {"properties"}}
|
||||||
|
headers := b.Container.bsc.client.getStandardHeaders()
|
||||||
|
headers = mergeHeaders(headers, headersFromStruct(b.Properties))
|
||||||
|
|
||||||
|
if options != nil {
|
||||||
|
params = addTimeout(params, options.Timeout)
|
||||||
|
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||||
|
}
|
||||||
|
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
|
||||||
|
|
||||||
|
if b.Properties.BlobType == BlobTypePage {
|
||||||
|
headers = addToHeaders(headers, "x-ms-blob-content-length", fmt.Sprintf("byte %v", b.Properties.ContentLength))
|
||||||
|
if options != nil || options.SequenceNumberAction != nil {
|
||||||
|
headers = addToHeaders(headers, "x-ms-sequence-number-action", string(*options.SequenceNumberAction))
|
||||||
|
if *options.SequenceNumberAction != SequenceNumberActionIncrement {
|
||||||
|
headers = addToHeaders(headers, "x-ms-blob-sequence-number", fmt.Sprintf("%v", b.Properties.SequenceNumber))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
readAndCloseBody(resp.body)
|
||||||
|
return checkRespCode(resp.statusCode, []int{http.StatusOK})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetBlobMetadataOptions includes the options for a set blob metadata operation
|
||||||
|
type SetBlobMetadataOptions struct {
|
||||||
|
Timeout uint
|
||||||
|
LeaseID string `header:"x-ms-lease-id"`
|
||||||
|
IfModifiedSince *time.Time `header:"If-Modified-Since"`
|
||||||
|
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
|
||||||
|
IfMatch string `header:"If-Match"`
|
||||||
|
IfNoneMatch string `header:"If-None-Match"`
|
||||||
|
RequestID string `header:"x-ms-client-request-id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetMetadata replaces the metadata for the specified blob.
|
||||||
|
//
|
||||||
|
// Some keys may be converted to Camel-Case before sending. All keys
|
||||||
|
// are returned in lower case by GetBlobMetadata. HTTP header names
|
||||||
|
// are case-insensitive so case munging should not matter to other
|
||||||
|
// applications either.
|
||||||
|
//
|
||||||
|
// See https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx
|
||||||
|
func (b *Blob) SetMetadata(options *SetBlobMetadataOptions) error {
|
||||||
|
params := url.Values{"comp": {"metadata"}}
|
||||||
|
headers := b.Container.bsc.client.getStandardHeaders()
|
||||||
|
headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata)
|
||||||
|
|
||||||
|
if options != nil {
|
||||||
|
params = addTimeout(params, options.Timeout)
|
||||||
|
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||||
|
}
|
||||||
|
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
|
||||||
|
|
||||||
|
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
readAndCloseBody(resp.body)
|
||||||
|
return checkRespCode(resp.statusCode, []int{http.StatusOK})
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetBlobMetadataOptions includes the options for a get blob metadata operation
|
||||||
|
type GetBlobMetadataOptions struct {
|
||||||
|
Timeout uint
|
||||||
|
Snapshot *time.Time
|
||||||
|
LeaseID string `header:"x-ms-lease-id"`
|
||||||
|
IfModifiedSince *time.Time `header:"If-Modified-Since"`
|
||||||
|
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
|
||||||
|
IfMatch string `header:"If-Match"`
|
||||||
|
IfNoneMatch string `header:"If-None-Match"`
|
||||||
|
RequestID string `header:"x-ms-client-request-id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetMetadata returns all user-defined metadata for the specified blob.
|
||||||
|
//
|
||||||
|
// All metadata keys will be returned in lower case. (HTTP header
|
||||||
|
// names are case-insensitive.)
|
||||||
|
//
|
||||||
|
// See https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx
|
||||||
|
func (b *Blob) GetMetadata(options *GetBlobMetadataOptions) error {
|
||||||
|
params := url.Values{"comp": {"metadata"}}
|
||||||
|
headers := b.Container.bsc.client.getStandardHeaders()
|
||||||
|
|
||||||
|
if options != nil {
|
||||||
|
params = addTimeout(params, options.Timeout)
|
||||||
|
params = addSnapshot(params, options.Snapshot)
|
||||||
|
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||||
|
}
|
||||||
|
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
|
||||||
|
|
||||||
|
resp, err := b.Container.bsc.client.exec(http.MethodGet, uri, headers, nil, b.Container.bsc.auth)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer readAndCloseBody(resp.body)
|
||||||
|
|
||||||
|
if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
b.writeMetadata(resp.headers)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Blob) writeMetadata(h http.Header) {
|
||||||
|
metadata := make(map[string]string)
|
||||||
|
for k, v := range h {
|
||||||
|
// Can't trust CanonicalHeaderKey() to munge case
|
||||||
|
// reliably. "_" is allowed in identifiers:
|
||||||
|
// https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx
|
||||||
|
// https://msdn.microsoft.com/library/aa664670(VS.71).aspx
|
||||||
|
// http://tools.ietf.org/html/rfc7230#section-3.2
|
||||||
|
// ...but "_" is considered invalid by
|
||||||
|
// CanonicalMIMEHeaderKey in
|
||||||
|
// https://golang.org/src/net/textproto/reader.go?s=14615:14659#L542
|
||||||
|
// so k can be "X-Ms-Meta-Lol" or "x-ms-meta-lol_rofl".
|
||||||
|
k = strings.ToLower(k)
|
||||||
|
if len(v) == 0 || !strings.HasPrefix(k, strings.ToLower(userDefinedMetadataHeaderPrefix)) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// metadata["lol"] = content of the last X-Ms-Meta-Lol header
|
||||||
|
k = k[len(userDefinedMetadataHeaderPrefix):]
|
||||||
|
metadata[k] = v[len(v)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
b.Metadata = BlobMetadata(metadata)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteBlobOptions includes the options for a delete blob operation
|
||||||
|
type DeleteBlobOptions struct {
|
||||||
|
Timeout uint
|
||||||
|
Snapshot *time.Time
|
||||||
|
LeaseID string `header:"x-ms-lease-id"`
|
||||||
|
DeleteSnapshots *bool
|
||||||
|
IfModifiedSince *time.Time `header:"If-Modified-Since"`
|
||||||
|
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
|
||||||
|
IfMatch string `header:"If-Match"`
|
||||||
|
IfNoneMatch string `header:"If-None-Match"`
|
||||||
|
RequestID string `header:"x-ms-client-request-id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete deletes the given blob from the specified container.
|
||||||
|
// If the blob does not exists at the time of the Delete Blob operation, it
|
||||||
|
// returns error.
|
||||||
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Blob
|
||||||
|
func (b *Blob) Delete(options *DeleteBlobOptions) error {
|
||||||
|
resp, err := b.delete(options)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
readAndCloseBody(resp.body)
|
||||||
|
return checkRespCode(resp.statusCode, []int{http.StatusAccepted})
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteIfExists deletes the given blob from the specified container If the
|
||||||
|
// blob is deleted with this call, returns true. Otherwise returns false.
|
||||||
|
//
|
||||||
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Blob
|
||||||
|
func (b *Blob) DeleteIfExists(options *DeleteBlobOptions) (bool, error) {
|
||||||
|
resp, err := b.delete(options)
|
||||||
|
if resp != nil {
|
||||||
|
defer readAndCloseBody(resp.body)
|
||||||
|
if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound {
|
||||||
|
return resp.statusCode == http.StatusAccepted, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Blob) delete(options *DeleteBlobOptions) (*storageResponse, error) {
|
||||||
|
params := url.Values{}
|
||||||
|
headers := b.Container.bsc.client.getStandardHeaders()
|
||||||
|
|
||||||
|
if options != nil {
|
||||||
|
params = addTimeout(params, options.Timeout)
|
||||||
|
params = addSnapshot(params, options.Snapshot)
|
||||||
|
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||||
|
if options.DeleteSnapshots != nil {
|
||||||
|
if *options.DeleteSnapshots {
|
||||||
|
headers["x-ms-delete-snapshots"] = "include"
|
||||||
|
} else {
|
||||||
|
headers["x-ms-delete-snapshots"] = "only"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
|
||||||
|
return b.Container.bsc.client.exec(http.MethodDelete, uri, headers, nil, b.Container.bsc.auth)
|
||||||
|
}
|
||||||
|
|
||||||
|
// helper method to construct the path to either a blob or container
|
||||||
|
func pathForResource(container, name string) string {
|
||||||
|
if name != "" {
|
||||||
|
return fmt.Sprintf("/%s/%s", container, name)
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("/%s", container)
|
||||||
|
}
|
|
@ -0,0 +1,106 @@
|
||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GetSASURIWithSignedIPAndProtocol creates an URL to the specified blob which contains the Shared
|
||||||
|
// Access Signature with specified permissions and expiration time. Also includes signedIPRange and allowed protocols.
|
||||||
|
// If old API version is used but no signedIP is passed (ie empty string) then this should still work.
|
||||||
|
// We only populate the signedIP when it non-empty.
|
||||||
|
//
|
||||||
|
// See https://msdn.microsoft.com/en-us/library/azure/ee395415.aspx
|
||||||
|
func (b *Blob) GetSASURIWithSignedIPAndProtocol(expiry time.Time, permissions string, signedIPRange string, HTTPSOnly bool) (string, error) {
|
||||||
|
var (
|
||||||
|
signedPermissions = permissions
|
||||||
|
blobURL = b.GetURL()
|
||||||
|
)
|
||||||
|
canonicalizedResource, err := b.Container.bsc.client.buildCanonicalizedResource(blobURL, b.Container.bsc.auth)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
// "The canonicalizedresouce portion of the string is a canonical path to the signed resource.
|
||||||
|
// It must include the service name (blob, table, queue or file) for version 2015-02-21 or
|
||||||
|
// later, the storage account name, and the resource name, and must be URL-decoded.
|
||||||
|
// -- https://msdn.microsoft.com/en-us/library/azure/dn140255.aspx
|
||||||
|
|
||||||
|
// We need to replace + with %2b first to avoid being treated as a space (which is correct for query strings, but not the path component).
|
||||||
|
canonicalizedResource = strings.Replace(canonicalizedResource, "+", "%2b", -1)
|
||||||
|
canonicalizedResource, err = url.QueryUnescape(canonicalizedResource)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
signedExpiry := expiry.UTC().Format(time.RFC3339)
|
||||||
|
|
||||||
|
//If blob name is missing, resource is a container
|
||||||
|
signedResource := "c"
|
||||||
|
if len(b.Name) > 0 {
|
||||||
|
signedResource = "b"
|
||||||
|
}
|
||||||
|
|
||||||
|
protocols := "https,http"
|
||||||
|
if HTTPSOnly {
|
||||||
|
protocols = "https"
|
||||||
|
}
|
||||||
|
stringToSign, err := blobSASStringToSign(b.Container.bsc.client.apiVersion, canonicalizedResource, signedExpiry, signedPermissions, signedIPRange, protocols)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
sig := b.Container.bsc.client.computeHmac256(stringToSign)
|
||||||
|
sasParams := url.Values{
|
||||||
|
"sv": {b.Container.bsc.client.apiVersion},
|
||||||
|
"se": {signedExpiry},
|
||||||
|
"sr": {signedResource},
|
||||||
|
"sp": {signedPermissions},
|
||||||
|
"sig": {sig},
|
||||||
|
}
|
||||||
|
|
||||||
|
if b.Container.bsc.client.apiVersion >= "2015-04-05" {
|
||||||
|
sasParams.Add("spr", protocols)
|
||||||
|
if signedIPRange != "" {
|
||||||
|
sasParams.Add("sip", signedIPRange)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sasURL, err := url.Parse(blobURL)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
sasURL.RawQuery = sasParams.Encode()
|
||||||
|
return sasURL.String(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetSASURI creates an URL to the specified blob which contains the Shared
|
||||||
|
// Access Signature with specified permissions and expiration time.
|
||||||
|
//
|
||||||
|
// See https://msdn.microsoft.com/en-us/library/azure/ee395415.aspx
|
||||||
|
func (b *Blob) GetSASURI(expiry time.Time, permissions string) (string, error) {
|
||||||
|
return b.GetSASURIWithSignedIPAndProtocol(expiry, permissions, "", false)
|
||||||
|
}
|
||||||
|
|
||||||
|
func blobSASStringToSign(signedVersion, canonicalizedResource, signedExpiry, signedPermissions string, signedIP string, protocols string) (string, error) {
|
||||||
|
var signedStart, signedIdentifier, rscc, rscd, rsce, rscl, rsct string
|
||||||
|
|
||||||
|
if signedVersion >= "2015-02-21" {
|
||||||
|
canonicalizedResource = "/blob" + canonicalizedResource
|
||||||
|
}
|
||||||
|
|
||||||
|
// https://msdn.microsoft.com/en-us/library/azure/dn140255.aspx#Anchor_12
|
||||||
|
if signedVersion >= "2015-04-05" {
|
||||||
|
return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedIP, protocols, signedVersion, rscc, rscd, rsce, rscl, rsct), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// reference: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx
|
||||||
|
if signedVersion >= "2013-08-15" {
|
||||||
|
return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedVersion, rscc, rscd, rsce, rscl, rsct), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return "", errors.New("storage: not implemented SAS for versions earlier than 2013-08-15")
|
||||||
|
}
|
|
@ -1,9 +1,9 @@
|
||||||
package storage
|
package storage
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
"strconv"
|
||||||
)
|
)
|
||||||
|
|
||||||
// BlobStorageClient contains operations for Microsoft Azure Blob Storage
|
// BlobStorageClient contains operations for Microsoft Azure Blob Storage
|
||||||
|
@ -38,9 +38,9 @@ type ListContainersParameters struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetContainerReference returns a Container object for the specified container name.
|
// GetContainerReference returns a Container object for the specified container name.
|
||||||
func (b BlobStorageClient) GetContainerReference(name string) Container {
|
func (b *BlobStorageClient) GetContainerReference(name string) *Container {
|
||||||
return Container{
|
return &Container{
|
||||||
bsc: &b,
|
bsc: b,
|
||||||
Name: name,
|
Name: name,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -61,6 +61,9 @@ func (b BlobStorageClient) ListContainers(params ListContainersParameters) (*Con
|
||||||
}
|
}
|
||||||
defer resp.body.Close()
|
defer resp.body.Close()
|
||||||
err = xmlUnmarshal(resp.body, &out)
|
err = xmlUnmarshal(resp.body, &out)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
// assign our client to the newly created Container objects
|
// assign our client to the newly created Container objects
|
||||||
for i := range out.Containers {
|
for i := range out.Containers {
|
||||||
|
@ -82,10 +85,10 @@ func (p ListContainersParameters) getParameters() url.Values {
|
||||||
out.Set("include", p.Include)
|
out.Set("include", p.Include)
|
||||||
}
|
}
|
||||||
if p.MaxResults != 0 {
|
if p.MaxResults != 0 {
|
||||||
out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults))
|
out.Set("maxresults", strconv.FormatUint(uint64(p.MaxResults), 10))
|
||||||
}
|
}
|
||||||
if p.Timeout != 0 {
|
if p.Timeout != 0 {
|
||||||
out.Set("timeout", fmt.Sprintf("%v", p.Timeout))
|
out.Set("timeout", strconv.FormatUint(uint64(p.Timeout), 10))
|
||||||
}
|
}
|
||||||
|
|
||||||
return out
|
return out
|
|
@ -0,0 +1,240 @@
|
||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/xml"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// BlockListType is used to filter out types of blocks in a Get Blocks List call
|
||||||
|
// for a block blob.
|
||||||
|
//
|
||||||
|
// See https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx for all
|
||||||
|
// block types.
|
||||||
|
type BlockListType string
|
||||||
|
|
||||||
|
// Filters for listing blocks in block blobs
|
||||||
|
const (
|
||||||
|
BlockListTypeAll BlockListType = "all"
|
||||||
|
BlockListTypeCommitted BlockListType = "committed"
|
||||||
|
BlockListTypeUncommitted BlockListType = "uncommitted"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Maximum sizes (per REST API) for various concepts
|
||||||
|
const (
|
||||||
|
MaxBlobBlockSize = 100 * 1024 * 1024
|
||||||
|
MaxBlobPageSize = 4 * 1024 * 1024
|
||||||
|
)
|
||||||
|
|
||||||
|
// BlockStatus defines states a block for a block blob can
|
||||||
|
// be in.
|
||||||
|
type BlockStatus string
|
||||||
|
|
||||||
|
// List of statuses that can be used to refer to a block in a block list
|
||||||
|
const (
|
||||||
|
BlockStatusUncommitted BlockStatus = "Uncommitted"
|
||||||
|
BlockStatusCommitted BlockStatus = "Committed"
|
||||||
|
BlockStatusLatest BlockStatus = "Latest"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Block is used to create Block entities for Put Block List
|
||||||
|
// call.
|
||||||
|
type Block struct {
|
||||||
|
ID string
|
||||||
|
Status BlockStatus
|
||||||
|
}
|
||||||
|
|
||||||
|
// BlockListResponse contains the response fields from Get Block List call.
|
||||||
|
//
|
||||||
|
// See https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx
|
||||||
|
type BlockListResponse struct {
|
||||||
|
XMLName xml.Name `xml:"BlockList"`
|
||||||
|
CommittedBlocks []BlockResponse `xml:"CommittedBlocks>Block"`
|
||||||
|
UncommittedBlocks []BlockResponse `xml:"UncommittedBlocks>Block"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// BlockResponse contains the block information returned
|
||||||
|
// in the GetBlockListCall.
|
||||||
|
type BlockResponse struct {
|
||||||
|
Name string `xml:"Name"`
|
||||||
|
Size int64 `xml:"Size"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateBlockBlob initializes an empty block blob with no blocks.
|
||||||
|
//
|
||||||
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Blob
|
||||||
|
func (b *Blob) CreateBlockBlob(options *PutBlobOptions) error {
|
||||||
|
return b.CreateBlockBlobFromReader(nil, options)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateBlockBlobFromReader initializes a block blob using data from
|
||||||
|
// reader. Size must be the number of bytes read from reader. To
|
||||||
|
// create an empty blob, use size==0 and reader==nil.
|
||||||
|
//
|
||||||
|
// The API rejects requests with size > 256 MiB (but this limit is not
|
||||||
|
// checked by the SDK). To write a larger blob, use CreateBlockBlob,
|
||||||
|
// PutBlock, and PutBlockList.
|
||||||
|
//
|
||||||
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Blob
|
||||||
|
func (b *Blob) CreateBlockBlobFromReader(blob io.Reader, options *PutBlobOptions) error {
|
||||||
|
params := url.Values{}
|
||||||
|
headers := b.Container.bsc.client.getStandardHeaders()
|
||||||
|
headers["x-ms-blob-type"] = string(BlobTypeBlock)
|
||||||
|
|
||||||
|
headers["Content-Length"] = "0"
|
||||||
|
var n int64
|
||||||
|
var err error
|
||||||
|
if blob != nil {
|
||||||
|
buf := &bytes.Buffer{}
|
||||||
|
n, err = io.Copy(buf, blob)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
blob = buf
|
||||||
|
headers["Content-Length"] = strconv.FormatInt(n, 10)
|
||||||
|
}
|
||||||
|
b.Properties.ContentLength = n
|
||||||
|
|
||||||
|
headers = mergeHeaders(headers, headersFromStruct(b.Properties))
|
||||||
|
headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata)
|
||||||
|
|
||||||
|
if options != nil {
|
||||||
|
params = addTimeout(params, options.Timeout)
|
||||||
|
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||||
|
}
|
||||||
|
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
|
||||||
|
|
||||||
|
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, blob, b.Container.bsc.auth)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
readAndCloseBody(resp.body)
|
||||||
|
return checkRespCode(resp.statusCode, []int{http.StatusCreated})
|
||||||
|
}
|
||||||
|
|
||||||
|
// PutBlockOptions includes the options for a put block operation
|
||||||
|
type PutBlockOptions struct {
|
||||||
|
Timeout uint
|
||||||
|
LeaseID string `header:"x-ms-lease-id"`
|
||||||
|
ContentMD5 string `header:"Content-MD5"`
|
||||||
|
RequestID string `header:"x-ms-client-request-id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// PutBlock saves the given data chunk to the specified block blob with
|
||||||
|
// given ID.
|
||||||
|
//
|
||||||
|
// The API rejects chunks larger than 100 MiB (but this limit is not
|
||||||
|
// checked by the SDK).
|
||||||
|
//
|
||||||
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Block
|
||||||
|
func (b *Blob) PutBlock(blockID string, chunk []byte, options *PutBlockOptions) error {
|
||||||
|
return b.PutBlockWithLength(blockID, uint64(len(chunk)), bytes.NewReader(chunk), options)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PutBlockWithLength saves the given data stream of exactly specified size to
|
||||||
|
// the block blob with given ID. It is an alternative to PutBlocks where data
|
||||||
|
// comes as stream but the length is known in advance.
|
||||||
|
//
|
||||||
|
// The API rejects requests with size > 100 MiB (but this limit is not
|
||||||
|
// checked by the SDK).
|
||||||
|
//
|
||||||
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Block
|
||||||
|
func (b *Blob) PutBlockWithLength(blockID string, size uint64, blob io.Reader, options *PutBlockOptions) error {
|
||||||
|
query := url.Values{
|
||||||
|
"comp": {"block"},
|
||||||
|
"blockid": {blockID},
|
||||||
|
}
|
||||||
|
headers := b.Container.bsc.client.getStandardHeaders()
|
||||||
|
headers["Content-Length"] = fmt.Sprintf("%v", size)
|
||||||
|
|
||||||
|
if options != nil {
|
||||||
|
query = addTimeout(query, options.Timeout)
|
||||||
|
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||||
|
}
|
||||||
|
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), query)
|
||||||
|
|
||||||
|
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, blob, b.Container.bsc.auth)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
readAndCloseBody(resp.body)
|
||||||
|
return checkRespCode(resp.statusCode, []int{http.StatusCreated})
|
||||||
|
}
|
||||||
|
|
||||||
|
// PutBlockListOptions includes the options for a put block list operation
|
||||||
|
type PutBlockListOptions struct {
|
||||||
|
Timeout uint
|
||||||
|
LeaseID string `header:"x-ms-lease-id"`
|
||||||
|
IfModifiedSince *time.Time `header:"If-Modified-Since"`
|
||||||
|
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
|
||||||
|
IfMatch string `header:"If-Match"`
|
||||||
|
IfNoneMatch string `header:"If-None-Match"`
|
||||||
|
RequestID string `header:"x-ms-client-request-id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// PutBlockList saves list of blocks to the specified block blob.
|
||||||
|
//
|
||||||
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Block-List
|
||||||
|
func (b *Blob) PutBlockList(blocks []Block, options *PutBlockListOptions) error {
|
||||||
|
params := url.Values{"comp": {"blocklist"}}
|
||||||
|
blockListXML := prepareBlockListRequest(blocks)
|
||||||
|
headers := b.Container.bsc.client.getStandardHeaders()
|
||||||
|
headers["Content-Length"] = fmt.Sprintf("%v", len(blockListXML))
|
||||||
|
headers = mergeHeaders(headers, headersFromStruct(b.Properties))
|
||||||
|
headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata)
|
||||||
|
|
||||||
|
if options != nil {
|
||||||
|
params = addTimeout(params, options.Timeout)
|
||||||
|
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||||
|
}
|
||||||
|
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
|
||||||
|
|
||||||
|
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, strings.NewReader(blockListXML), b.Container.bsc.auth)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
readAndCloseBody(resp.body)
|
||||||
|
return checkRespCode(resp.statusCode, []int{http.StatusCreated})
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetBlockListOptions includes the options for a get block list operation
|
||||||
|
type GetBlockListOptions struct {
|
||||||
|
Timeout uint
|
||||||
|
Snapshot *time.Time
|
||||||
|
LeaseID string `header:"x-ms-lease-id"`
|
||||||
|
RequestID string `header:"x-ms-client-request-id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetBlockList retrieves list of blocks in the specified block blob.
|
||||||
|
//
|
||||||
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Block-List
|
||||||
|
func (b *Blob) GetBlockList(blockType BlockListType, options *GetBlockListOptions) (BlockListResponse, error) {
|
||||||
|
params := url.Values{
|
||||||
|
"comp": {"blocklist"},
|
||||||
|
"blocklisttype": {string(blockType)},
|
||||||
|
}
|
||||||
|
headers := b.Container.bsc.client.getStandardHeaders()
|
||||||
|
|
||||||
|
if options != nil {
|
||||||
|
params = addTimeout(params, options.Timeout)
|
||||||
|
params = addSnapshot(params, options.Snapshot)
|
||||||
|
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||||
|
}
|
||||||
|
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
|
||||||
|
|
||||||
|
var out BlockListResponse
|
||||||
|
resp, err := b.Container.bsc.client.exec(http.MethodGet, uri, headers, nil, b.Container.bsc.auth)
|
||||||
|
if err != nil {
|
||||||
|
return out, err
|
||||||
|
}
|
||||||
|
defer resp.body.Close()
|
||||||
|
|
||||||
|
err = xmlUnmarshal(resp.body, &out)
|
||||||
|
return out, err
|
||||||
|
}
|
|
@ -2,6 +2,7 @@
|
||||||
package storage
|
package storage
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bufio"
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
@ -10,12 +11,16 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
"mime"
|
||||||
|
"mime/multipart"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
"regexp"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strconv"
|
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/Azure/go-autorest/autorest"
|
||||||
"github.com/Azure/go-autorest/autorest/azure"
|
"github.com/Azure/go-autorest/autorest/azure"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -46,15 +51,60 @@ const (
|
||||||
storageEmulatorQueue = "127.0.0.1:10001"
|
storageEmulatorQueue = "127.0.0.1:10001"
|
||||||
|
|
||||||
userAgentHeader = "User-Agent"
|
userAgentHeader = "User-Agent"
|
||||||
|
|
||||||
|
userDefinedMetadataHeaderPrefix = "x-ms-meta-"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
validStorageAccount = regexp.MustCompile("^[0-9a-z]{3,24}$")
|
||||||
|
)
|
||||||
|
|
||||||
|
// Sender sends a request
|
||||||
|
type Sender interface {
|
||||||
|
Send(*Client, *http.Request) (*http.Response, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultSender is the default sender for the client. It implements
|
||||||
|
// an automatic retry strategy.
|
||||||
|
type DefaultSender struct {
|
||||||
|
RetryAttempts int
|
||||||
|
RetryDuration time.Duration
|
||||||
|
ValidStatusCodes []int
|
||||||
|
attempts int // used for testing
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send is the default retry strategy in the client
|
||||||
|
func (ds *DefaultSender) Send(c *Client, req *http.Request) (resp *http.Response, err error) {
|
||||||
|
rr := autorest.NewRetriableRequest(req)
|
||||||
|
for attempts := 0; attempts < ds.RetryAttempts; attempts++ {
|
||||||
|
err = rr.Prepare()
|
||||||
|
if err != nil {
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
resp, err = c.HTTPClient.Do(rr.Request())
|
||||||
|
if err != nil || !autorest.ResponseHasStatusCode(resp, ds.ValidStatusCodes...) {
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
autorest.DelayForBackoff(ds.RetryDuration, attempts, req.Cancel)
|
||||||
|
ds.attempts = attempts
|
||||||
|
}
|
||||||
|
ds.attempts++
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
// Client is the object that needs to be constructed to perform
|
// Client is the object that needs to be constructed to perform
|
||||||
// operations on the storage account.
|
// operations on the storage account.
|
||||||
type Client struct {
|
type Client struct {
|
||||||
// HTTPClient is the http.Client used to initiate API
|
// HTTPClient is the http.Client used to initiate API
|
||||||
// requests. If it is nil, http.DefaultClient is used.
|
// requests. http.DefaultClient is used when creating a
|
||||||
|
// client.
|
||||||
HTTPClient *http.Client
|
HTTPClient *http.Client
|
||||||
|
|
||||||
|
// Sender is an interface that sends the request. Clients are
|
||||||
|
// created with a DefaultSender. The DefaultSender has an
|
||||||
|
// automatic retry strategy built in. The Sender can be customized.
|
||||||
|
Sender Sender
|
||||||
|
|
||||||
accountName string
|
accountName string
|
||||||
accountKey []byte
|
accountKey []byte
|
||||||
useHTTPS bool
|
useHTTPS bool
|
||||||
|
@ -72,7 +122,7 @@ type storageResponse struct {
|
||||||
|
|
||||||
type odataResponse struct {
|
type odataResponse struct {
|
||||||
storageResponse
|
storageResponse
|
||||||
odata odataErrorMessage
|
odata odataErrorWrapper
|
||||||
}
|
}
|
||||||
|
|
||||||
// AzureStorageServiceError contains fields of the error response from
|
// AzureStorageServiceError contains fields of the error response from
|
||||||
|
@ -85,22 +135,25 @@ type AzureStorageServiceError struct {
|
||||||
QueryParameterName string `xml:"QueryParameterName"`
|
QueryParameterName string `xml:"QueryParameterName"`
|
||||||
QueryParameterValue string `xml:"QueryParameterValue"`
|
QueryParameterValue string `xml:"QueryParameterValue"`
|
||||||
Reason string `xml:"Reason"`
|
Reason string `xml:"Reason"`
|
||||||
|
Lang string
|
||||||
StatusCode int
|
StatusCode int
|
||||||
RequestID string
|
RequestID string
|
||||||
|
Date string
|
||||||
|
APIVersion string
|
||||||
}
|
}
|
||||||
|
|
||||||
type odataErrorMessageMessage struct {
|
type odataErrorMessage struct {
|
||||||
Lang string `json:"lang"`
|
Lang string `json:"lang"`
|
||||||
Value string `json:"value"`
|
Value string `json:"value"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type odataErrorMessageInternal struct {
|
type odataError struct {
|
||||||
Code string `json:"code"`
|
Code string `json:"code"`
|
||||||
Message odataErrorMessageMessage `json:"message"`
|
Message odataErrorMessage `json:"message"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type odataErrorMessage struct {
|
type odataErrorWrapper struct {
|
||||||
Err odataErrorMessageInternal `json:"odata.error"`
|
Err odataError `json:"odata.error"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnexpectedStatusCodeError is returned when a storage service responds with neither an error
|
// UnexpectedStatusCodeError is returned when a storage service responds with neither an error
|
||||||
|
@ -155,8 +208,8 @@ func NewEmulatorClient() (Client, error) {
|
||||||
// storage endpoint than Azure Public Cloud.
|
// storage endpoint than Azure Public Cloud.
|
||||||
func NewClient(accountName, accountKey, blobServiceBaseURL, apiVersion string, useHTTPS bool) (Client, error) {
|
func NewClient(accountName, accountKey, blobServiceBaseURL, apiVersion string, useHTTPS bool) (Client, error) {
|
||||||
var c Client
|
var c Client
|
||||||
if accountName == "" {
|
if !IsValidStorageAccount(accountName) {
|
||||||
return c, fmt.Errorf("azure: account name required")
|
return c, fmt.Errorf("azure: account name is not valid: it must be between 3 and 24 characters, and only may contain numbers and lowercase letters: %v", accountName)
|
||||||
} else if accountKey == "" {
|
} else if accountKey == "" {
|
||||||
return c, fmt.Errorf("azure: account key required")
|
return c, fmt.Errorf("azure: account key required")
|
||||||
} else if blobServiceBaseURL == "" {
|
} else if blobServiceBaseURL == "" {
|
||||||
|
@ -169,19 +222,37 @@ func NewClient(accountName, accountKey, blobServiceBaseURL, apiVersion string, u
|
||||||
}
|
}
|
||||||
|
|
||||||
c = Client{
|
c = Client{
|
||||||
|
HTTPClient: http.DefaultClient,
|
||||||
accountName: accountName,
|
accountName: accountName,
|
||||||
accountKey: key,
|
accountKey: key,
|
||||||
useHTTPS: useHTTPS,
|
useHTTPS: useHTTPS,
|
||||||
baseURL: blobServiceBaseURL,
|
baseURL: blobServiceBaseURL,
|
||||||
apiVersion: apiVersion,
|
apiVersion: apiVersion,
|
||||||
UseSharedKeyLite: false,
|
UseSharedKeyLite: false,
|
||||||
|
Sender: &DefaultSender{
|
||||||
|
RetryAttempts: 5,
|
||||||
|
ValidStatusCodes: []int{
|
||||||
|
http.StatusRequestTimeout, // 408
|
||||||
|
http.StatusInternalServerError, // 500
|
||||||
|
http.StatusBadGateway, // 502
|
||||||
|
http.StatusServiceUnavailable, // 503
|
||||||
|
http.StatusGatewayTimeout, // 504
|
||||||
|
},
|
||||||
|
RetryDuration: time.Second * 5,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
c.userAgent = c.getDefaultUserAgent()
|
c.userAgent = c.getDefaultUserAgent()
|
||||||
return c, nil
|
return c, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IsValidStorageAccount checks if the storage account name is valid.
|
||||||
|
// See https://docs.microsoft.com/en-us/azure/storage/storage-create-storage-account
|
||||||
|
func IsValidStorageAccount(account string) bool {
|
||||||
|
return validStorageAccount.MatchString(account)
|
||||||
|
}
|
||||||
|
|
||||||
func (c Client) getDefaultUserAgent() string {
|
func (c Client) getDefaultUserAgent() string {
|
||||||
return fmt.Sprintf("Go/%s (%s-%s) Azure-SDK-For-Go/%s storage-dataplane/%s",
|
return fmt.Sprintf("Go/%s (%s-%s) azure-storage-go/%s api-version/%s",
|
||||||
runtime.Version(),
|
runtime.Version(),
|
||||||
runtime.GOARCH,
|
runtime.GOARCH,
|
||||||
runtime.GOOS,
|
runtime.GOOS,
|
||||||
|
@ -210,7 +281,7 @@ func (c *Client) protectUserAgent(extraheaders map[string]string) map[string]str
|
||||||
return extraheaders
|
return extraheaders
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c Client) getBaseURL(service string) string {
|
func (c Client) getBaseURL(service string) *url.URL {
|
||||||
scheme := "http"
|
scheme := "http"
|
||||||
if c.useHTTPS {
|
if c.useHTTPS {
|
||||||
scheme = "https"
|
scheme = "https"
|
||||||
|
@ -229,18 +300,14 @@ func (c Client) getBaseURL(service string) string {
|
||||||
host = fmt.Sprintf("%s.%s.%s", c.accountName, service, c.baseURL)
|
host = fmt.Sprintf("%s.%s.%s", c.accountName, service, c.baseURL)
|
||||||
}
|
}
|
||||||
|
|
||||||
u := &url.URL{
|
return &url.URL{
|
||||||
Scheme: scheme,
|
Scheme: scheme,
|
||||||
Host: host}
|
Host: host,
|
||||||
return u.String()
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c Client) getEndpoint(service, path string, params url.Values) string {
|
func (c Client) getEndpoint(service, path string, params url.Values) string {
|
||||||
u, err := url.Parse(c.getBaseURL(service))
|
u := c.getBaseURL(service)
|
||||||
if err != nil {
|
|
||||||
// really should not be happening
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// API doesn't accept path segments not starting with '/'
|
// API doesn't accept path segments not starting with '/'
|
||||||
if !strings.HasPrefix(path, "/") {
|
if !strings.HasPrefix(path, "/") {
|
||||||
|
@ -331,46 +398,44 @@ func (c Client) exec(verb, url string, headers map[string]string, body io.Reader
|
||||||
return nil, errors.New("azure/storage: error creating request: " + err.Error())
|
return nil, errors.New("azure/storage: error creating request: " + err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
if clstr, ok := headers["Content-Length"]; ok {
|
|
||||||
// content length header is being signed, but completely ignored by golang.
|
|
||||||
// instead we have to use the ContentLength property on the request struct
|
|
||||||
// (see https://golang.org/src/net/http/request.go?s=18140:18370#L536 and
|
|
||||||
// https://golang.org/src/net/http/transfer.go?s=1739:2467#L49)
|
|
||||||
req.ContentLength, err = strconv.ParseInt(clstr, 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for k, v := range headers {
|
for k, v := range headers {
|
||||||
req.Header.Add(k, v)
|
req.Header.Add(k, v)
|
||||||
}
|
}
|
||||||
|
|
||||||
httpClient := c.HTTPClient
|
resp, err := c.Sender.Send(&c, req)
|
||||||
if httpClient == nil {
|
|
||||||
httpClient = http.DefaultClient
|
|
||||||
}
|
|
||||||
resp, err := httpClient.Do(req)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
statusCode := resp.StatusCode
|
if resp.StatusCode >= 400 && resp.StatusCode <= 505 {
|
||||||
if statusCode >= 400 && statusCode <= 505 {
|
|
||||||
var respBody []byte
|
var respBody []byte
|
||||||
respBody, err = readAndCloseBody(resp.Body)
|
respBody, err = readAndCloseBody(resp.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
requestID := resp.Header.Get("x-ms-request-id")
|
requestID, date, version := getDebugHeaders(resp.Header)
|
||||||
if len(respBody) == 0 {
|
if len(respBody) == 0 {
|
||||||
// no error in response body, might happen in HEAD requests
|
// no error in response body, might happen in HEAD requests
|
||||||
err = serviceErrFromStatusCode(resp.StatusCode, resp.Status, requestID)
|
err = serviceErrFromStatusCode(resp.StatusCode, resp.Status, requestID, date, version)
|
||||||
} else {
|
} else {
|
||||||
|
storageErr := AzureStorageServiceError{
|
||||||
|
StatusCode: resp.StatusCode,
|
||||||
|
RequestID: requestID,
|
||||||
|
Date: date,
|
||||||
|
APIVersion: version,
|
||||||
|
}
|
||||||
// response contains storage service error object, unmarshal
|
// response contains storage service error object, unmarshal
|
||||||
storageErr, errIn := serviceErrFromXML(respBody, resp.StatusCode, requestID)
|
if resp.Header.Get("Content-Type") == "application/xml" {
|
||||||
if err != nil { // error unmarshaling the error response
|
errIn := serviceErrFromXML(respBody, &storageErr)
|
||||||
err = errIn
|
if err != nil { // error unmarshaling the error response
|
||||||
|
err = errIn
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
errIn := serviceErrFromJSON(respBody, &storageErr)
|
||||||
|
if err != nil { // error unmarshaling the error response
|
||||||
|
err = errIn
|
||||||
|
}
|
||||||
}
|
}
|
||||||
err = storageErr
|
err = storageErr
|
||||||
}
|
}
|
||||||
|
@ -387,10 +452,10 @@ func (c Client) exec(verb, url string, headers map[string]string, body io.Reader
|
||||||
body: resp.Body}, nil
|
body: resp.Body}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c Client) execInternalJSON(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*odataResponse, error) {
|
func (c Client) execInternalJSONCommon(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*odataResponse, *http.Request, *http.Response, error) {
|
||||||
headers, err := c.addAuthorizationHeader(verb, url, headers, auth)
|
headers, err := c.addAuthorizationHeader(verb, url, headers, auth)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
req, err := http.NewRequest(verb, url, body)
|
req, err := http.NewRequest(verb, url, body)
|
||||||
|
@ -398,14 +463,9 @@ func (c Client) execInternalJSON(verb, url string, headers map[string]string, bo
|
||||||
req.Header.Add(k, v)
|
req.Header.Add(k, v)
|
||||||
}
|
}
|
||||||
|
|
||||||
httpClient := c.HTTPClient
|
resp, err := c.Sender.Send(&c, req)
|
||||||
if httpClient == nil {
|
|
||||||
httpClient = http.DefaultClient
|
|
||||||
}
|
|
||||||
|
|
||||||
resp, err := httpClient.Do(req)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
respToRet := &odataResponse{}
|
respToRet := &odataResponse{}
|
||||||
|
@ -418,22 +478,110 @@ func (c Client) execInternalJSON(verb, url string, headers map[string]string, bo
|
||||||
var respBody []byte
|
var respBody []byte
|
||||||
respBody, err = readAndCloseBody(resp.Body)
|
respBody, err = readAndCloseBody(resp.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
requestID, date, version := getDebugHeaders(resp.Header)
|
||||||
if len(respBody) == 0 {
|
if len(respBody) == 0 {
|
||||||
// no error in response body, might happen in HEAD requests
|
// no error in response body, might happen in HEAD requests
|
||||||
err = serviceErrFromStatusCode(resp.StatusCode, resp.Status, resp.Header.Get("x-ms-request-id"))
|
err = serviceErrFromStatusCode(resp.StatusCode, resp.Status, requestID, date, version)
|
||||||
return respToRet, err
|
return respToRet, req, resp, err
|
||||||
}
|
}
|
||||||
// try unmarshal as odata.error json
|
// try unmarshal as odata.error json
|
||||||
err = json.Unmarshal(respBody, &respToRet.odata)
|
err = json.Unmarshal(respBody, &respToRet.odata)
|
||||||
return respToRet, err
|
}
|
||||||
|
|
||||||
|
return respToRet, req, resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c Client) execInternalJSON(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*odataResponse, error) {
|
||||||
|
respToRet, _, _, err := c.execInternalJSONCommon(verb, url, headers, body, auth)
|
||||||
|
return respToRet, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c Client) execBatchOperationJSON(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*odataResponse, error) {
|
||||||
|
// execute common query, get back generated request, response etc... for more processing.
|
||||||
|
respToRet, req, resp, err := c.execInternalJSONCommon(verb, url, headers, body, auth)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// return the OData in the case of executing batch commands.
|
||||||
|
// In this case we need to read the outer batch boundary and contents.
|
||||||
|
// Then we read the changeset information within the batch
|
||||||
|
var respBody []byte
|
||||||
|
respBody, err = readAndCloseBody(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// outer multipart body
|
||||||
|
_, batchHeader, err := mime.ParseMediaType(resp.Header["Content-Type"][0])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// batch details.
|
||||||
|
batchBoundary := batchHeader["boundary"]
|
||||||
|
batchPartBuf, changesetBoundary, err := genBatchReader(batchBoundary, respBody)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// changeset details.
|
||||||
|
err = genChangesetReader(req, respToRet, batchPartBuf, changesetBoundary)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return respToRet, nil
|
return respToRet, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func genChangesetReader(req *http.Request, respToRet *odataResponse, batchPartBuf io.Reader, changesetBoundary string) error {
|
||||||
|
changesetMultiReader := multipart.NewReader(batchPartBuf, changesetBoundary)
|
||||||
|
changesetPart, err := changesetMultiReader.NextPart()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
changesetPartBufioReader := bufio.NewReader(changesetPart)
|
||||||
|
changesetResp, err := http.ReadResponse(changesetPartBufioReader, req)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if changesetResp.StatusCode != http.StatusNoContent {
|
||||||
|
changesetBody, err := readAndCloseBody(changesetResp.Body)
|
||||||
|
err = json.Unmarshal(changesetBody, &respToRet.odata)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
respToRet.statusCode = changesetResp.StatusCode
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func genBatchReader(batchBoundary string, respBody []byte) (io.Reader, string, error) {
|
||||||
|
respBodyString := string(respBody)
|
||||||
|
respBodyReader := strings.NewReader(respBodyString)
|
||||||
|
|
||||||
|
// reading batchresponse
|
||||||
|
batchMultiReader := multipart.NewReader(respBodyReader, batchBoundary)
|
||||||
|
batchPart, err := batchMultiReader.NextPart()
|
||||||
|
if err != nil {
|
||||||
|
return nil, "", err
|
||||||
|
}
|
||||||
|
batchPartBufioReader := bufio.NewReader(batchPart)
|
||||||
|
|
||||||
|
_, changesetHeader, err := mime.ParseMediaType(batchPart.Header.Get("Content-Type"))
|
||||||
|
if err != nil {
|
||||||
|
return nil, "", err
|
||||||
|
}
|
||||||
|
changesetBoundary := changesetHeader["boundary"]
|
||||||
|
return batchPartBufioReader, changesetBoundary, nil
|
||||||
|
}
|
||||||
|
|
||||||
func readAndCloseBody(body io.ReadCloser) ([]byte, error) {
|
func readAndCloseBody(body io.ReadCloser) ([]byte, error) {
|
||||||
defer body.Close()
|
defer body.Close()
|
||||||
out, err := ioutil.ReadAll(body)
|
out, err := ioutil.ReadAll(body)
|
||||||
|
@ -443,28 +591,40 @@ func readAndCloseBody(body io.ReadCloser) ([]byte, error) {
|
||||||
return out, err
|
return out, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func serviceErrFromXML(body []byte, statusCode int, requestID string) (AzureStorageServiceError, error) {
|
func serviceErrFromXML(body []byte, storageErr *AzureStorageServiceError) error {
|
||||||
var storageErr AzureStorageServiceError
|
if err := xml.Unmarshal(body, storageErr); err != nil {
|
||||||
if err := xml.Unmarshal(body, &storageErr); err != nil {
|
storageErr.Message = fmt.Sprintf("Response body could no be unmarshaled: %v. Body: %v.", err, string(body))
|
||||||
return storageErr, err
|
return err
|
||||||
}
|
}
|
||||||
storageErr.StatusCode = statusCode
|
return nil
|
||||||
storageErr.RequestID = requestID
|
|
||||||
return storageErr, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func serviceErrFromStatusCode(code int, status string, requestID string) AzureStorageServiceError {
|
func serviceErrFromJSON(body []byte, storageErr *AzureStorageServiceError) error {
|
||||||
|
odataError := odataErrorWrapper{}
|
||||||
|
if err := json.Unmarshal(body, &odataError); err != nil {
|
||||||
|
storageErr.Message = fmt.Sprintf("Response body could no be unmarshaled: %v. Body: %v.", err, string(body))
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
storageErr.Code = odataError.Err.Code
|
||||||
|
storageErr.Message = odataError.Err.Message.Value
|
||||||
|
storageErr.Lang = odataError.Err.Message.Lang
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func serviceErrFromStatusCode(code int, status string, requestID, date, version string) AzureStorageServiceError {
|
||||||
return AzureStorageServiceError{
|
return AzureStorageServiceError{
|
||||||
StatusCode: code,
|
StatusCode: code,
|
||||||
Code: status,
|
Code: status,
|
||||||
RequestID: requestID,
|
RequestID: requestID,
|
||||||
|
Date: date,
|
||||||
|
APIVersion: version,
|
||||||
Message: "no response body was available for error status code",
|
Message: "no response body was available for error status code",
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e AzureStorageServiceError) Error() string {
|
func (e AzureStorageServiceError) Error() string {
|
||||||
return fmt.Sprintf("storage: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=%s, RequestId=%s, QueryParameterName=%s, QueryParameterValue=%s",
|
return fmt.Sprintf("storage: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=%s, RequestInitiated=%s, RequestId=%s, API Version=%s, QueryParameterName=%s, QueryParameterValue=%s",
|
||||||
e.StatusCode, e.Code, e.Message, e.RequestID, e.QueryParameterName, e.QueryParameterValue)
|
e.StatusCode, e.Code, e.Message, e.Date, e.RequestID, e.APIVersion, e.QueryParameterName, e.QueryParameterValue)
|
||||||
}
|
}
|
||||||
|
|
||||||
// checkRespCode returns UnexpectedStatusError if the given response code is not
|
// checkRespCode returns UnexpectedStatusError if the given response code is not
|
||||||
|
@ -477,3 +637,18 @@ func checkRespCode(respCode int, allowed []int) error {
|
||||||
}
|
}
|
||||||
return UnexpectedStatusCodeError{allowed, respCode}
|
return UnexpectedStatusCodeError{allowed, respCode}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c Client) addMetadataToHeaders(h map[string]string, metadata map[string]string) map[string]string {
|
||||||
|
metadata = c.protectUserAgent(metadata)
|
||||||
|
for k, v := range metadata {
|
||||||
|
h[userDefinedMetadataHeaderPrefix+k] = v
|
||||||
|
}
|
||||||
|
return h
|
||||||
|
}
|
||||||
|
|
||||||
|
func getDebugHeaders(h http.Header) (requestID, date, version string) {
|
||||||
|
requestID = h.Get("x-ms-request-id")
|
||||||
|
version = h.Get("x-ms-version")
|
||||||
|
date = h.Get("Date")
|
||||||
|
return
|
||||||
|
}
|
|
@ -8,6 +8,7 @@ import (
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -16,6 +17,7 @@ type Container struct {
|
||||||
bsc *BlobStorageClient
|
bsc *BlobStorageClient
|
||||||
Name string `xml:"Name"`
|
Name string `xml:"Name"`
|
||||||
Properties ContainerProperties `xml:"Properties"`
|
Properties ContainerProperties `xml:"Properties"`
|
||||||
|
Metadata map[string]string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Container) buildPath() string {
|
func (c *Container) buildPath() string {
|
||||||
|
@ -69,6 +71,14 @@ type BlobListResponse struct {
|
||||||
Delimiter string `xml:"Delimiter"`
|
Delimiter string `xml:"Delimiter"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IncludeBlobDataset has options to include in a list blobs operation
|
||||||
|
type IncludeBlobDataset struct {
|
||||||
|
Snapshots bool
|
||||||
|
Metadata bool
|
||||||
|
UncommittedBlobs bool
|
||||||
|
Copy bool
|
||||||
|
}
|
||||||
|
|
||||||
// ListBlobsParameters defines the set of customizable
|
// ListBlobsParameters defines the set of customizable
|
||||||
// parameters to make a List Blobs call.
|
// parameters to make a List Blobs call.
|
||||||
//
|
//
|
||||||
|
@ -77,9 +87,10 @@ type ListBlobsParameters struct {
|
||||||
Prefix string
|
Prefix string
|
||||||
Delimiter string
|
Delimiter string
|
||||||
Marker string
|
Marker string
|
||||||
Include string
|
Include *IncludeBlobDataset
|
||||||
MaxResults uint
|
MaxResults uint
|
||||||
Timeout uint
|
Timeout uint
|
||||||
|
RequestID string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p ListBlobsParameters) getParameters() url.Values {
|
func (p ListBlobsParameters) getParameters() url.Values {
|
||||||
|
@ -94,19 +105,32 @@ func (p ListBlobsParameters) getParameters() url.Values {
|
||||||
if p.Marker != "" {
|
if p.Marker != "" {
|
||||||
out.Set("marker", p.Marker)
|
out.Set("marker", p.Marker)
|
||||||
}
|
}
|
||||||
if p.Include != "" {
|
if p.Include != nil {
|
||||||
out.Set("include", p.Include)
|
include := []string{}
|
||||||
|
include = addString(include, p.Include.Snapshots, "snapshots")
|
||||||
|
include = addString(include, p.Include.Metadata, "metadata")
|
||||||
|
include = addString(include, p.Include.UncommittedBlobs, "uncommittedblobs")
|
||||||
|
include = addString(include, p.Include.Copy, "copy")
|
||||||
|
fullInclude := strings.Join(include, ",")
|
||||||
|
out.Set("include", fullInclude)
|
||||||
}
|
}
|
||||||
if p.MaxResults != 0 {
|
if p.MaxResults != 0 {
|
||||||
out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults))
|
out.Set("maxresults", strconv.FormatUint(uint64(p.MaxResults), 10))
|
||||||
}
|
}
|
||||||
if p.Timeout != 0 {
|
if p.Timeout != 0 {
|
||||||
out.Set("timeout", fmt.Sprintf("%v", p.Timeout))
|
out.Set("timeout", strconv.FormatUint(uint64(p.Timeout), 10))
|
||||||
}
|
}
|
||||||
|
|
||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func addString(datasets []string, include bool, text string) []string {
|
||||||
|
if include {
|
||||||
|
datasets = append(datasets, text)
|
||||||
|
}
|
||||||
|
return datasets
|
||||||
|
}
|
||||||
|
|
||||||
// ContainerAccessType defines the access level to the container from a public
|
// ContainerAccessType defines the access level to the container from a public
|
||||||
// request.
|
// request.
|
||||||
//
|
//
|
||||||
|
@ -142,23 +166,38 @@ const (
|
||||||
ContainerAccessHeader string = "x-ms-blob-public-access"
|
ContainerAccessHeader string = "x-ms-blob-public-access"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// GetBlobReference returns a Blob object for the specified blob name.
|
||||||
|
func (c *Container) GetBlobReference(name string) *Blob {
|
||||||
|
return &Blob{
|
||||||
|
Container: c,
|
||||||
|
Name: name,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateContainerOptions includes the options for a create container operation
|
||||||
|
type CreateContainerOptions struct {
|
||||||
|
Timeout uint
|
||||||
|
Access ContainerAccessType `header:"x-ms-blob-public-access"`
|
||||||
|
RequestID string `header:"x-ms-client-request-id"`
|
||||||
|
}
|
||||||
|
|
||||||
// Create creates a blob container within the storage account
|
// Create creates a blob container within the storage account
|
||||||
// with given name and access level. Returns error if container already exists.
|
// with given name and access level. Returns error if container already exists.
|
||||||
//
|
//
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Container
|
||||||
func (c *Container) Create() error {
|
func (c *Container) Create(options *CreateContainerOptions) error {
|
||||||
resp, err := c.create()
|
resp, err := c.create(options)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer readAndCloseBody(resp.body)
|
readAndCloseBody(resp.body)
|
||||||
return checkRespCode(resp.statusCode, []int{http.StatusCreated})
|
return checkRespCode(resp.statusCode, []int{http.StatusCreated})
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateIfNotExists creates a blob container if it does not exist. Returns
|
// CreateIfNotExists creates a blob container if it does not exist. Returns
|
||||||
// true if container is newly created or false if container already exists.
|
// true if container is newly created or false if container already exists.
|
||||||
func (c *Container) CreateIfNotExists() (bool, error) {
|
func (c *Container) CreateIfNotExists(options *CreateContainerOptions) (bool, error) {
|
||||||
resp, err := c.create()
|
resp, err := c.create(options)
|
||||||
if resp != nil {
|
if resp != nil {
|
||||||
defer readAndCloseBody(resp.body)
|
defer readAndCloseBody(resp.body)
|
||||||
if resp.statusCode == http.StatusCreated || resp.statusCode == http.StatusConflict {
|
if resp.statusCode == http.StatusCreated || resp.statusCode == http.StatusConflict {
|
||||||
|
@ -168,9 +207,17 @@ func (c *Container) CreateIfNotExists() (bool, error) {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Container) create() (*storageResponse, error) {
|
func (c *Container) create(options *CreateContainerOptions) (*storageResponse, error) {
|
||||||
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), url.Values{"restype": {"container"}})
|
query := url.Values{"restype": {"container"}}
|
||||||
headers := c.bsc.client.getStandardHeaders()
|
headers := c.bsc.client.getStandardHeaders()
|
||||||
|
headers = c.bsc.client.addMetadataToHeaders(headers, c.Metadata)
|
||||||
|
|
||||||
|
if options != nil {
|
||||||
|
query = addTimeout(query, options.Timeout)
|
||||||
|
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||||
|
}
|
||||||
|
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), query)
|
||||||
|
|
||||||
return c.bsc.client.exec(http.MethodPut, uri, headers, nil, c.bsc.auth)
|
return c.bsc.client.exec(http.MethodPut, uri, headers, nil, c.bsc.auth)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -190,30 +237,36 @@ func (c *Container) Exists() (bool, error) {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetPermissions sets up container permissions as per https://msdn.microsoft.com/en-us/library/azure/dd179391.aspx
|
// SetContainerPermissionOptions includes options for a set container permissions operation
|
||||||
func (c *Container) SetPermissions(permissions ContainerPermissions, timeout int, leaseID string) error {
|
type SetContainerPermissionOptions struct {
|
||||||
|
Timeout uint
|
||||||
|
LeaseID string `header:"x-ms-lease-id"`
|
||||||
|
IfModifiedSince *time.Time `header:"If-Modified-Since"`
|
||||||
|
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
|
||||||
|
RequestID string `header:"x-ms-client-request-id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetPermissions sets up container permissions
|
||||||
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Container-ACL
|
||||||
|
func (c *Container) SetPermissions(permissions ContainerPermissions, options *SetContainerPermissionOptions) error {
|
||||||
|
body, length, err := generateContainerACLpayload(permissions.AccessPolicies)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
params := url.Values{
|
params := url.Values{
|
||||||
"restype": {"container"},
|
"restype": {"container"},
|
||||||
"comp": {"acl"},
|
"comp": {"acl"},
|
||||||
}
|
}
|
||||||
|
|
||||||
if timeout > 0 {
|
|
||||||
params.Add("timeout", strconv.Itoa(timeout))
|
|
||||||
}
|
|
||||||
|
|
||||||
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params)
|
|
||||||
headers := c.bsc.client.getStandardHeaders()
|
headers := c.bsc.client.getStandardHeaders()
|
||||||
if permissions.AccessType != "" {
|
headers = addToHeaders(headers, ContainerAccessHeader, string(permissions.AccessType))
|
||||||
headers[ContainerAccessHeader] = string(permissions.AccessType)
|
|
||||||
}
|
|
||||||
|
|
||||||
if leaseID != "" {
|
|
||||||
headers[headerLeaseID] = leaseID
|
|
||||||
}
|
|
||||||
|
|
||||||
body, length, err := generateContainerACLpayload(permissions.AccessPolicies)
|
|
||||||
headers["Content-Length"] = strconv.Itoa(length)
|
headers["Content-Length"] = strconv.Itoa(length)
|
||||||
|
|
||||||
|
if options != nil {
|
||||||
|
params = addTimeout(params, options.Timeout)
|
||||||
|
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||||
|
}
|
||||||
|
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params)
|
||||||
|
|
||||||
resp, err := c.bsc.client.exec(http.MethodPut, uri, headers, body, c.bsc.auth)
|
resp, err := c.bsc.client.exec(http.MethodPut, uri, headers, body, c.bsc.auth)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -227,25 +280,28 @@ func (c *Container) SetPermissions(permissions ContainerPermissions, timeout int
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetContainerPermissionOptions includes options for a get container permissions operation
|
||||||
|
type GetContainerPermissionOptions struct {
|
||||||
|
Timeout uint
|
||||||
|
LeaseID string `header:"x-ms-lease-id"`
|
||||||
|
RequestID string `header:"x-ms-client-request-id"`
|
||||||
|
}
|
||||||
|
|
||||||
// GetPermissions gets the container permissions as per https://msdn.microsoft.com/en-us/library/azure/dd179469.aspx
|
// GetPermissions gets the container permissions as per https://msdn.microsoft.com/en-us/library/azure/dd179469.aspx
|
||||||
// If timeout is 0 then it will not be passed to Azure
|
// If timeout is 0 then it will not be passed to Azure
|
||||||
// leaseID will only be passed to Azure if populated
|
// leaseID will only be passed to Azure if populated
|
||||||
func (c *Container) GetPermissions(timeout int, leaseID string) (*ContainerPermissions, error) {
|
func (c *Container) GetPermissions(options *GetContainerPermissionOptions) (*ContainerPermissions, error) {
|
||||||
params := url.Values{
|
params := url.Values{
|
||||||
"restype": {"container"},
|
"restype": {"container"},
|
||||||
"comp": {"acl"},
|
"comp": {"acl"},
|
||||||
}
|
}
|
||||||
|
|
||||||
if timeout > 0 {
|
|
||||||
params.Add("timeout", strconv.Itoa(timeout))
|
|
||||||
}
|
|
||||||
|
|
||||||
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params)
|
|
||||||
headers := c.bsc.client.getStandardHeaders()
|
headers := c.bsc.client.getStandardHeaders()
|
||||||
|
|
||||||
if leaseID != "" {
|
if options != nil {
|
||||||
headers[headerLeaseID] = leaseID
|
params = addTimeout(params, options.Timeout)
|
||||||
|
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||||
}
|
}
|
||||||
|
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params)
|
||||||
|
|
||||||
resp, err := c.bsc.client.exec(http.MethodGet, uri, headers, nil, c.bsc.auth)
|
resp, err := c.bsc.client.exec(http.MethodGet, uri, headers, nil, c.bsc.auth)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -284,16 +340,25 @@ func buildAccessPolicy(ap AccessPolicy, headers *http.Header) *ContainerPermissi
|
||||||
return &permissions
|
return &permissions
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeleteContainerOptions includes options for a delete container operation
|
||||||
|
type DeleteContainerOptions struct {
|
||||||
|
Timeout uint
|
||||||
|
LeaseID string `header:"x-ms-lease-id"`
|
||||||
|
IfModifiedSince *time.Time `header:"If-Modified-Since"`
|
||||||
|
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
|
||||||
|
RequestID string `header:"x-ms-client-request-id"`
|
||||||
|
}
|
||||||
|
|
||||||
// Delete deletes the container with given name on the storage
|
// Delete deletes the container with given name on the storage
|
||||||
// account. If the container does not exist returns error.
|
// account. If the container does not exist returns error.
|
||||||
//
|
//
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179408.aspx
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/delete-container
|
||||||
func (c *Container) Delete() error {
|
func (c *Container) Delete(options *DeleteContainerOptions) error {
|
||||||
resp, err := c.delete()
|
resp, err := c.delete(options)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer readAndCloseBody(resp.body)
|
readAndCloseBody(resp.body)
|
||||||
return checkRespCode(resp.statusCode, []int{http.StatusAccepted})
|
return checkRespCode(resp.statusCode, []int{http.StatusAccepted})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -302,9 +367,9 @@ func (c *Container) Delete() error {
|
||||||
// false if the container did not exist at the time of the Delete Container
|
// false if the container did not exist at the time of the Delete Container
|
||||||
// operation.
|
// operation.
|
||||||
//
|
//
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179408.aspx
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/delete-container
|
||||||
func (c *Container) DeleteIfExists() (bool, error) {
|
func (c *Container) DeleteIfExists(options *DeleteContainerOptions) (bool, error) {
|
||||||
resp, err := c.delete()
|
resp, err := c.delete(options)
|
||||||
if resp != nil {
|
if resp != nil {
|
||||||
defer readAndCloseBody(resp.body)
|
defer readAndCloseBody(resp.body)
|
||||||
if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound {
|
if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound {
|
||||||
|
@ -314,23 +379,32 @@ func (c *Container) DeleteIfExists() (bool, error) {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Container) delete() (*storageResponse, error) {
|
func (c *Container) delete(options *DeleteContainerOptions) (*storageResponse, error) {
|
||||||
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), url.Values{"restype": {"container"}})
|
query := url.Values{"restype": {"container"}}
|
||||||
headers := c.bsc.client.getStandardHeaders()
|
headers := c.bsc.client.getStandardHeaders()
|
||||||
|
|
||||||
|
if options != nil {
|
||||||
|
query = addTimeout(query, options.Timeout)
|
||||||
|
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||||
|
}
|
||||||
|
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), query)
|
||||||
|
|
||||||
return c.bsc.client.exec(http.MethodDelete, uri, headers, nil, c.bsc.auth)
|
return c.bsc.client.exec(http.MethodDelete, uri, headers, nil, c.bsc.auth)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListBlobs returns an object that contains list of blobs in the container,
|
// ListBlobs returns an object that contains list of blobs in the container,
|
||||||
// pagination token and other information in the response of List Blobs call.
|
// pagination token and other information in the response of List Blobs call.
|
||||||
//
|
//
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Blobs
|
||||||
func (c *Container) ListBlobs(params ListBlobsParameters) (BlobListResponse, error) {
|
func (c *Container) ListBlobs(params ListBlobsParameters) (BlobListResponse, error) {
|
||||||
q := mergeParams(params.getParameters(), url.Values{
|
q := mergeParams(params.getParameters(), url.Values{
|
||||||
"restype": {"container"},
|
"restype": {"container"},
|
||||||
"comp": {"list"}},
|
"comp": {"list"}},
|
||||||
)
|
)
|
||||||
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), q)
|
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), q)
|
||||||
|
|
||||||
headers := c.bsc.client.getStandardHeaders()
|
headers := c.bsc.client.getStandardHeaders()
|
||||||
|
headers = addToHeaders(headers, "x-ms-client-request-id", params.RequestID)
|
||||||
|
|
||||||
var out BlobListResponse
|
var out BlobListResponse
|
||||||
resp, err := c.bsc.client.exec(http.MethodGet, uri, headers, nil, c.bsc.auth)
|
resp, err := c.bsc.client.exec(http.MethodGet, uri, headers, nil, c.bsc.auth)
|
||||||
|
@ -340,6 +414,9 @@ func (c *Container) ListBlobs(params ListBlobsParameters) (BlobListResponse, err
|
||||||
defer resp.body.Close()
|
defer resp.body.Close()
|
||||||
|
|
||||||
err = xmlUnmarshal(resp.body, &out)
|
err = xmlUnmarshal(resp.body, &out)
|
||||||
|
for i := range out.Blobs {
|
||||||
|
out.Blobs[i].Container = c
|
||||||
|
}
|
||||||
return out, err
|
return out, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,223 @@
|
||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
blobCopyStatusPending = "pending"
|
||||||
|
blobCopyStatusSuccess = "success"
|
||||||
|
blobCopyStatusAborted = "aborted"
|
||||||
|
blobCopyStatusFailed = "failed"
|
||||||
|
)
|
||||||
|
|
||||||
|
// CopyOptions includes the options for a copy blob operation
|
||||||
|
type CopyOptions struct {
|
||||||
|
Timeout uint
|
||||||
|
Source CopyOptionsConditions
|
||||||
|
Destiny CopyOptionsConditions
|
||||||
|
RequestID string
|
||||||
|
}
|
||||||
|
|
||||||
|
// IncrementalCopyOptions includes the options for an incremental copy blob operation
|
||||||
|
type IncrementalCopyOptions struct {
|
||||||
|
Timeout uint
|
||||||
|
Destination IncrementalCopyOptionsConditions
|
||||||
|
RequestID string
|
||||||
|
}
|
||||||
|
|
||||||
|
// CopyOptionsConditions includes some conditional options in a copy blob operation
|
||||||
|
type CopyOptionsConditions struct {
|
||||||
|
LeaseID string
|
||||||
|
IfModifiedSince *time.Time
|
||||||
|
IfUnmodifiedSince *time.Time
|
||||||
|
IfMatch string
|
||||||
|
IfNoneMatch string
|
||||||
|
}
|
||||||
|
|
||||||
|
// IncrementalCopyOptionsConditions includes some conditional options in a copy blob operation
|
||||||
|
type IncrementalCopyOptionsConditions struct {
|
||||||
|
IfModifiedSince *time.Time
|
||||||
|
IfUnmodifiedSince *time.Time
|
||||||
|
IfMatch string
|
||||||
|
IfNoneMatch string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy starts a blob copy operation and waits for the operation to
|
||||||
|
// complete. sourceBlob parameter must be a canonical URL to the blob (can be
|
||||||
|
// obtained using GetBlobURL method.) There is no SLA on blob copy and therefore
|
||||||
|
// this helper method works faster on smaller files.
|
||||||
|
//
|
||||||
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Copy-Blob
|
||||||
|
func (b *Blob) Copy(sourceBlob string, options *CopyOptions) error {
|
||||||
|
copyID, err := b.StartCopy(sourceBlob, options)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return b.WaitForCopy(copyID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// StartCopy starts a blob copy operation.
|
||||||
|
// sourceBlob parameter must be a canonical URL to the blob (can be
|
||||||
|
// obtained using GetBlobURL method.)
|
||||||
|
//
|
||||||
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Copy-Blob
|
||||||
|
func (b *Blob) StartCopy(sourceBlob string, options *CopyOptions) (string, error) {
|
||||||
|
params := url.Values{}
|
||||||
|
headers := b.Container.bsc.client.getStandardHeaders()
|
||||||
|
headers["x-ms-copy-source"] = sourceBlob
|
||||||
|
headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata)
|
||||||
|
|
||||||
|
if options != nil {
|
||||||
|
params = addTimeout(params, options.Timeout)
|
||||||
|
headers = addToHeaders(headers, "x-ms-client-request-id", options.RequestID)
|
||||||
|
// source
|
||||||
|
headers = addToHeaders(headers, "x-ms-source-lease-id", options.Source.LeaseID)
|
||||||
|
headers = addTimeToHeaders(headers, "x-ms-source-if-modified-since", options.Source.IfModifiedSince)
|
||||||
|
headers = addTimeToHeaders(headers, "x-ms-source-if-unmodified-since", options.Source.IfUnmodifiedSince)
|
||||||
|
headers = addToHeaders(headers, "x-ms-source-if-match", options.Source.IfMatch)
|
||||||
|
headers = addToHeaders(headers, "x-ms-source-if-none-match", options.Source.IfNoneMatch)
|
||||||
|
//destiny
|
||||||
|
headers = addToHeaders(headers, "x-ms-lease-id", options.Destiny.LeaseID)
|
||||||
|
headers = addTimeToHeaders(headers, "x-ms-if-modified-since", options.Destiny.IfModifiedSince)
|
||||||
|
headers = addTimeToHeaders(headers, "x-ms-if-unmodified-since", options.Destiny.IfUnmodifiedSince)
|
||||||
|
headers = addToHeaders(headers, "x-ms-if-match", options.Destiny.IfMatch)
|
||||||
|
headers = addToHeaders(headers, "x-ms-if-none-match", options.Destiny.IfNoneMatch)
|
||||||
|
}
|
||||||
|
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
|
||||||
|
|
||||||
|
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
defer readAndCloseBody(resp.body)
|
||||||
|
|
||||||
|
if err := checkRespCode(resp.statusCode, []int{http.StatusAccepted, http.StatusCreated}); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
copyID := resp.headers.Get("x-ms-copy-id")
|
||||||
|
if copyID == "" {
|
||||||
|
return "", errors.New("Got empty copy id header")
|
||||||
|
}
|
||||||
|
return copyID, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// AbortCopyOptions includes the options for an abort blob operation
|
||||||
|
type AbortCopyOptions struct {
|
||||||
|
Timeout uint
|
||||||
|
LeaseID string `header:"x-ms-lease-id"`
|
||||||
|
RequestID string `header:"x-ms-client-request-id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// AbortCopy aborts a BlobCopy which has already been triggered by the StartBlobCopy function.
|
||||||
|
// copyID is generated from StartBlobCopy function.
|
||||||
|
// currentLeaseID is required IF the destination blob has an active lease on it.
|
||||||
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Abort-Copy-Blob
|
||||||
|
func (b *Blob) AbortCopy(copyID string, options *AbortCopyOptions) error {
|
||||||
|
params := url.Values{
|
||||||
|
"comp": {"copy"},
|
||||||
|
"copyid": {copyID},
|
||||||
|
}
|
||||||
|
headers := b.Container.bsc.client.getStandardHeaders()
|
||||||
|
headers["x-ms-copy-action"] = "abort"
|
||||||
|
|
||||||
|
if options != nil {
|
||||||
|
params = addTimeout(params, options.Timeout)
|
||||||
|
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||||
|
}
|
||||||
|
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
|
||||||
|
|
||||||
|
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
readAndCloseBody(resp.body)
|
||||||
|
return checkRespCode(resp.statusCode, []int{http.StatusNoContent})
|
||||||
|
}
|
||||||
|
|
||||||
|
// WaitForCopy loops until a BlobCopy operation is completed (or fails with error)
|
||||||
|
func (b *Blob) WaitForCopy(copyID string) error {
|
||||||
|
for {
|
||||||
|
err := b.GetProperties(nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if b.Properties.CopyID != copyID {
|
||||||
|
return errBlobCopyIDMismatch
|
||||||
|
}
|
||||||
|
|
||||||
|
switch b.Properties.CopyStatus {
|
||||||
|
case blobCopyStatusSuccess:
|
||||||
|
return nil
|
||||||
|
case blobCopyStatusPending:
|
||||||
|
continue
|
||||||
|
case blobCopyStatusAborted:
|
||||||
|
return errBlobCopyAborted
|
||||||
|
case blobCopyStatusFailed:
|
||||||
|
return fmt.Errorf("storage: blob copy failed. Id=%s Description=%s", b.Properties.CopyID, b.Properties.CopyStatusDescription)
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("storage: unhandled blob copy status: '%s'", b.Properties.CopyStatus)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// IncrementalCopyBlob copies a snapshot of a source blob and copies to referring blob
|
||||||
|
// sourceBlob parameter must be a valid snapshot URL of the original blob.
|
||||||
|
// THe original blob mut be public, or use a Shared Access Signature.
|
||||||
|
//
|
||||||
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/incremental-copy-blob .
|
||||||
|
func (b *Blob) IncrementalCopyBlob(sourceBlobURL string, snapshotTime time.Time, options *IncrementalCopyOptions) (string, error) {
|
||||||
|
params := url.Values{"comp": {"incrementalcopy"}}
|
||||||
|
|
||||||
|
// need formatting to 7 decimal places so it's friendly to Windows and *nix
|
||||||
|
snapshotTimeFormatted := snapshotTime.Format("2006-01-02T15:04:05.0000000Z")
|
||||||
|
u, err := url.Parse(sourceBlobURL)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
query := u.Query()
|
||||||
|
query.Add("snapshot", snapshotTimeFormatted)
|
||||||
|
encodedQuery := query.Encode()
|
||||||
|
encodedQuery = strings.Replace(encodedQuery, "%3A", ":", -1)
|
||||||
|
u.RawQuery = encodedQuery
|
||||||
|
snapshotURL := u.String()
|
||||||
|
|
||||||
|
headers := b.Container.bsc.client.getStandardHeaders()
|
||||||
|
headers["x-ms-copy-source"] = snapshotURL
|
||||||
|
|
||||||
|
if options != nil {
|
||||||
|
addTimeout(params, options.Timeout)
|
||||||
|
headers = addToHeaders(headers, "x-ms-client-request-id", options.RequestID)
|
||||||
|
headers = addTimeToHeaders(headers, "x-ms-if-modified-since", options.Destination.IfModifiedSince)
|
||||||
|
headers = addTimeToHeaders(headers, "x-ms-if-unmodified-since", options.Destination.IfUnmodifiedSince)
|
||||||
|
headers = addToHeaders(headers, "x-ms-if-match", options.Destination.IfMatch)
|
||||||
|
headers = addToHeaders(headers, "x-ms-if-none-match", options.Destination.IfNoneMatch)
|
||||||
|
}
|
||||||
|
|
||||||
|
// get URI of destination blob
|
||||||
|
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
|
||||||
|
|
||||||
|
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
defer readAndCloseBody(resp.body)
|
||||||
|
|
||||||
|
if err := checkRespCode(resp.statusCode, []int{http.StatusAccepted}); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
copyID := resp.headers.Get("x-ms-copy-id")
|
||||||
|
if copyID == "" {
|
||||||
|
return "", errors.New("Got empty copy id header")
|
||||||
|
}
|
||||||
|
return copyID, nil
|
||||||
|
}
|
|
@ -25,8 +25,9 @@ type DirectoryProperties struct {
|
||||||
// ListDirsAndFilesParameters defines the set of customizable parameters to
|
// ListDirsAndFilesParameters defines the set of customizable parameters to
|
||||||
// make a List Files and Directories call.
|
// make a List Files and Directories call.
|
||||||
//
|
//
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dn166980.aspx
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Directories-and-Files
|
||||||
type ListDirsAndFilesParameters struct {
|
type ListDirsAndFilesParameters struct {
|
||||||
|
Prefix string
|
||||||
Marker string
|
Marker string
|
||||||
MaxResults uint
|
MaxResults uint
|
||||||
Timeout uint
|
Timeout uint
|
||||||
|
@ -35,7 +36,7 @@ type ListDirsAndFilesParameters struct {
|
||||||
// DirsAndFilesListResponse contains the response fields from
|
// DirsAndFilesListResponse contains the response fields from
|
||||||
// a List Files and Directories call.
|
// a List Files and Directories call.
|
||||||
//
|
//
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dn166980.aspx
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Directories-and-Files
|
||||||
type DirsAndFilesListResponse struct {
|
type DirsAndFilesListResponse struct {
|
||||||
XMLName xml.Name `xml:"EnumerationResults"`
|
XMLName xml.Name `xml:"EnumerationResults"`
|
||||||
Xmlns string `xml:"xmlns,attr"`
|
Xmlns string `xml:"xmlns,attr"`
|
||||||
|
@ -60,14 +61,15 @@ func (d *Directory) buildPath() string {
|
||||||
// Create this directory in the associated share.
|
// Create this directory in the associated share.
|
||||||
// If a directory with the same name already exists, the operation fails.
|
// If a directory with the same name already exists, the operation fails.
|
||||||
//
|
//
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dn166993.aspx
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Directory
|
||||||
func (d *Directory) Create() error {
|
func (d *Directory) Create(options *FileRequestOptions) error {
|
||||||
// if this is the root directory exit early
|
// if this is the root directory exit early
|
||||||
if d.parent == nil {
|
if d.parent == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
headers, err := d.fsc.createResource(d.buildPath(), resourceDirectory, nil, mergeMDIntoExtraHeaders(d.Metadata, nil), []int{http.StatusCreated})
|
params := prepareOptions(options)
|
||||||
|
headers, err := d.fsc.createResource(d.buildPath(), resourceDirectory, params, mergeMDIntoExtraHeaders(d.Metadata, nil), []int{http.StatusCreated})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -80,14 +82,15 @@ func (d *Directory) Create() error {
|
||||||
// directory does not exists. Returns true if the directory is newly created or
|
// directory does not exists. Returns true if the directory is newly created or
|
||||||
// false if the directory already exists.
|
// false if the directory already exists.
|
||||||
//
|
//
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dn166993.aspx
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Directory
|
||||||
func (d *Directory) CreateIfNotExists() (bool, error) {
|
func (d *Directory) CreateIfNotExists(options *FileRequestOptions) (bool, error) {
|
||||||
// if this is the root directory exit early
|
// if this is the root directory exit early
|
||||||
if d.parent == nil {
|
if d.parent == nil {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := d.fsc.createResourceNoClose(d.buildPath(), resourceDirectory, nil, nil)
|
params := prepareOptions(options)
|
||||||
|
resp, err := d.fsc.createResourceNoClose(d.buildPath(), resourceDirectory, params, nil)
|
||||||
if resp != nil {
|
if resp != nil {
|
||||||
defer readAndCloseBody(resp.body)
|
defer readAndCloseBody(resp.body)
|
||||||
if resp.statusCode == http.StatusCreated || resp.statusCode == http.StatusConflict {
|
if resp.statusCode == http.StatusCreated || resp.statusCode == http.StatusConflict {
|
||||||
|
@ -96,7 +99,7 @@ func (d *Directory) CreateIfNotExists() (bool, error) {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return false, d.FetchAttributes()
|
return false, d.FetchAttributes(nil)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -106,16 +109,16 @@ func (d *Directory) CreateIfNotExists() (bool, error) {
|
||||||
// Delete removes this directory. It must be empty in order to be deleted.
|
// Delete removes this directory. It must be empty in order to be deleted.
|
||||||
// If the directory does not exist the operation fails.
|
// If the directory does not exist the operation fails.
|
||||||
//
|
//
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dn166969.aspx
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Directory
|
||||||
func (d *Directory) Delete() error {
|
func (d *Directory) Delete(options *FileRequestOptions) error {
|
||||||
return d.fsc.deleteResource(d.buildPath(), resourceDirectory)
|
return d.fsc.deleteResource(d.buildPath(), resourceDirectory, options)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteIfExists removes this directory if it exists.
|
// DeleteIfExists removes this directory if it exists.
|
||||||
//
|
//
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dn166969.aspx
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Directory
|
||||||
func (d *Directory) DeleteIfExists() (bool, error) {
|
func (d *Directory) DeleteIfExists(options *FileRequestOptions) (bool, error) {
|
||||||
resp, err := d.fsc.deleteResourceNoClose(d.buildPath(), resourceDirectory)
|
resp, err := d.fsc.deleteResourceNoClose(d.buildPath(), resourceDirectory, options)
|
||||||
if resp != nil {
|
if resp != nil {
|
||||||
defer readAndCloseBody(resp.body)
|
defer readAndCloseBody(resp.body)
|
||||||
if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound {
|
if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound {
|
||||||
|
@ -135,8 +138,10 @@ func (d *Directory) Exists() (bool, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// FetchAttributes retrieves metadata for this directory.
|
// FetchAttributes retrieves metadata for this directory.
|
||||||
func (d *Directory) FetchAttributes() error {
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-directory-properties
|
||||||
headers, err := d.fsc.getResourceHeaders(d.buildPath(), compNone, resourceDirectory, http.MethodHead)
|
func (d *Directory) FetchAttributes(options *FileRequestOptions) error {
|
||||||
|
params := prepareOptions(options)
|
||||||
|
headers, err := d.fsc.getResourceHeaders(d.buildPath(), compNone, resourceDirectory, params, http.MethodHead)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -170,7 +175,7 @@ func (d *Directory) GetFileReference(name string) *File {
|
||||||
// ListDirsAndFiles returns a list of files and directories under this directory.
|
// ListDirsAndFiles returns a list of files and directories under this directory.
|
||||||
// It also contains a pagination token and other response details.
|
// It also contains a pagination token and other response details.
|
||||||
//
|
//
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dn166980.aspx
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Directories-and-Files
|
||||||
func (d *Directory) ListDirsAndFiles(params ListDirsAndFilesParameters) (*DirsAndFilesListResponse, error) {
|
func (d *Directory) ListDirsAndFiles(params ListDirsAndFilesParameters) (*DirsAndFilesListResponse, error) {
|
||||||
q := mergeParams(params.getParameters(), getURLInitValues(compList, resourceDirectory))
|
q := mergeParams(params.getParameters(), getURLInitValues(compList, resourceDirectory))
|
||||||
|
|
||||||
|
@ -192,9 +197,9 @@ func (d *Directory) ListDirsAndFiles(params ListDirsAndFilesParameters) (*DirsAn
|
||||||
// are case-insensitive so case munging should not matter to other
|
// are case-insensitive so case munging should not matter to other
|
||||||
// applications either.
|
// applications either.
|
||||||
//
|
//
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/mt427370.aspx
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Directory-Metadata
|
||||||
func (d *Directory) SetMetadata() error {
|
func (d *Directory) SetMetadata(options *FileRequestOptions) error {
|
||||||
headers, err := d.fsc.setResourceHeaders(d.buildPath(), compMetadata, resourceDirectory, mergeMDIntoExtraHeaders(d.Metadata, nil))
|
headers, err := d.fsc.setResourceHeaders(d.buildPath(), compMetadata, resourceDirectory, mergeMDIntoExtraHeaders(d.Metadata, nil), options)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
|
@ -0,0 +1,439 @@
|
||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/satori/uuid"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Annotating as secure for gas scanning
|
||||||
|
/* #nosec */
|
||||||
|
const (
|
||||||
|
partitionKeyNode = "PartitionKey"
|
||||||
|
rowKeyNode = "RowKey"
|
||||||
|
etagErrorTemplate = "Etag didn't match: %v"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
errEmptyPayload = errors.New("Empty payload is not a valid metadata level for this operation")
|
||||||
|
errNilPreviousResult = errors.New("The previous results page is nil")
|
||||||
|
errNilNextLink = errors.New("There are no more pages in this query results")
|
||||||
|
)
|
||||||
|
|
||||||
|
// Entity represents an entity inside an Azure table.
|
||||||
|
type Entity struct {
|
||||||
|
Table *Table
|
||||||
|
PartitionKey string
|
||||||
|
RowKey string
|
||||||
|
TimeStamp time.Time
|
||||||
|
OdataMetadata string
|
||||||
|
OdataType string
|
||||||
|
OdataID string
|
||||||
|
OdataEtag string
|
||||||
|
OdataEditLink string
|
||||||
|
Properties map[string]interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetEntityReference returns an Entity object with the specified
|
||||||
|
// partition key and row key.
|
||||||
|
func (t *Table) GetEntityReference(partitionKey, rowKey string) *Entity {
|
||||||
|
return &Entity{
|
||||||
|
PartitionKey: partitionKey,
|
||||||
|
RowKey: rowKey,
|
||||||
|
Table: t,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// EntityOptions includes options for entity operations.
|
||||||
|
type EntityOptions struct {
|
||||||
|
Timeout uint
|
||||||
|
RequestID string `header:"x-ms-client-request-id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetEntityOptions includes options for a get entity operation
|
||||||
|
type GetEntityOptions struct {
|
||||||
|
Select []string
|
||||||
|
RequestID string `header:"x-ms-client-request-id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get gets the referenced entity. Which properties to get can be
|
||||||
|
// specified using the select option.
|
||||||
|
// See:
|
||||||
|
// https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/query-entities
|
||||||
|
// https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/querying-tables-and-entities
|
||||||
|
func (e *Entity) Get(timeout uint, ml MetadataLevel, options *GetEntityOptions) error {
|
||||||
|
if ml == EmptyPayload {
|
||||||
|
return errEmptyPayload
|
||||||
|
}
|
||||||
|
// RowKey and PartitionKey could be lost if not included in the query
|
||||||
|
// As those are the entity identifiers, it is best if they are not lost
|
||||||
|
rk := e.RowKey
|
||||||
|
pk := e.PartitionKey
|
||||||
|
|
||||||
|
query := url.Values{
|
||||||
|
"timeout": {strconv.FormatUint(uint64(timeout), 10)},
|
||||||
|
}
|
||||||
|
headers := e.Table.tsc.client.getStandardHeaders()
|
||||||
|
headers[headerAccept] = string(ml)
|
||||||
|
|
||||||
|
if options != nil {
|
||||||
|
if len(options.Select) > 0 {
|
||||||
|
query.Add("$select", strings.Join(options.Select, ","))
|
||||||
|
}
|
||||||
|
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||||
|
}
|
||||||
|
|
||||||
|
uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.buildPath(), query)
|
||||||
|
resp, err := e.Table.tsc.client.exec(http.MethodGet, uri, headers, nil, e.Table.tsc.auth)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer readAndCloseBody(resp.body)
|
||||||
|
|
||||||
|
if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
respBody, err := ioutil.ReadAll(resp.body)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = json.Unmarshal(respBody, e)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
e.PartitionKey = pk
|
||||||
|
e.RowKey = rk
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Insert inserts the referenced entity in its table.
|
||||||
|
// The function fails if there is an entity with the same
|
||||||
|
// PartitionKey and RowKey in the table.
|
||||||
|
// ml determines the level of detail of metadata in the operation response,
|
||||||
|
// or no data at all.
|
||||||
|
// See: https://docs.microsoft.com/rest/api/storageservices/fileservices/insert-entity
|
||||||
|
func (e *Entity) Insert(ml MetadataLevel, options *EntityOptions) error {
|
||||||
|
query, headers := options.getParameters()
|
||||||
|
headers = mergeHeaders(headers, e.Table.tsc.client.getStandardHeaders())
|
||||||
|
|
||||||
|
body, err := json.Marshal(e)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
headers = addBodyRelatedHeaders(headers, len(body))
|
||||||
|
headers = addReturnContentHeaders(headers, ml)
|
||||||
|
|
||||||
|
uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.Table.buildPath(), query)
|
||||||
|
resp, err := e.Table.tsc.client.exec(http.MethodPost, uri, headers, bytes.NewReader(body), e.Table.tsc.auth)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer resp.body.Close()
|
||||||
|
|
||||||
|
data, err := ioutil.ReadAll(resp.body)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if ml != EmptyPayload {
|
||||||
|
if err = checkRespCode(resp.statusCode, []int{http.StatusCreated}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err = e.UnmarshalJSON(data); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if err = checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update updates the contents of an entity. The function fails if there is no entity
|
||||||
|
// with the same PartitionKey and RowKey in the table or if the ETag is different
|
||||||
|
// than the one in Azure.
|
||||||
|
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/update-entity2
|
||||||
|
func (e *Entity) Update(force bool, options *EntityOptions) error {
|
||||||
|
return e.updateMerge(force, http.MethodPut, options)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Merge merges the contents of entity specified with PartitionKey and RowKey
|
||||||
|
// with the content specified in Properties.
|
||||||
|
// The function fails if there is no entity with the same PartitionKey and
|
||||||
|
// RowKey in the table or if the ETag is different than the one in Azure.
|
||||||
|
// Read more: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/merge-entity
|
||||||
|
func (e *Entity) Merge(force bool, options *EntityOptions) error {
|
||||||
|
return e.updateMerge(force, "MERGE", options)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete deletes the entity.
|
||||||
|
// The function fails if there is no entity with the same PartitionKey and
|
||||||
|
// RowKey in the table or if the ETag is different than the one in Azure.
|
||||||
|
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/delete-entity1
|
||||||
|
func (e *Entity) Delete(force bool, options *EntityOptions) error {
|
||||||
|
query, headers := options.getParameters()
|
||||||
|
headers = mergeHeaders(headers, e.Table.tsc.client.getStandardHeaders())
|
||||||
|
|
||||||
|
headers = addIfMatchHeader(headers, force, e.OdataEtag)
|
||||||
|
headers = addReturnContentHeaders(headers, EmptyPayload)
|
||||||
|
|
||||||
|
uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.buildPath(), query)
|
||||||
|
resp, err := e.Table.tsc.client.exec(http.MethodDelete, uri, headers, nil, e.Table.tsc.auth)
|
||||||
|
if err != nil {
|
||||||
|
if resp.statusCode == http.StatusPreconditionFailed {
|
||||||
|
return fmt.Errorf(etagErrorTemplate, err)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer readAndCloseBody(resp.body)
|
||||||
|
|
||||||
|
if err = checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return e.updateTimestamp(resp.headers)
|
||||||
|
}
|
||||||
|
|
||||||
|
// InsertOrReplace inserts an entity or replaces the existing one.
|
||||||
|
// Read more: https://docs.microsoft.com/rest/api/storageservices/fileservices/insert-or-replace-entity
|
||||||
|
func (e *Entity) InsertOrReplace(options *EntityOptions) error {
|
||||||
|
return e.insertOr(http.MethodPut, options)
|
||||||
|
}
|
||||||
|
|
||||||
|
// InsertOrMerge inserts an entity or merges the existing one.
|
||||||
|
// Read more: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/insert-or-merge-entity
|
||||||
|
func (e *Entity) InsertOrMerge(options *EntityOptions) error {
|
||||||
|
return e.insertOr("MERGE", options)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *Entity) buildPath() string {
|
||||||
|
return fmt.Sprintf("%s(PartitionKey='%s', RowKey='%s')", e.Table.buildPath(), e.PartitionKey, e.RowKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalJSON is a custom marshaller for entity
|
||||||
|
func (e *Entity) MarshalJSON() ([]byte, error) {
|
||||||
|
completeMap := map[string]interface{}{}
|
||||||
|
completeMap[partitionKeyNode] = e.PartitionKey
|
||||||
|
completeMap[rowKeyNode] = e.RowKey
|
||||||
|
for k, v := range e.Properties {
|
||||||
|
typeKey := strings.Join([]string{k, OdataTypeSuffix}, "")
|
||||||
|
switch t := v.(type) {
|
||||||
|
case []byte:
|
||||||
|
completeMap[typeKey] = OdataBinary
|
||||||
|
completeMap[k] = string(t)
|
||||||
|
case time.Time:
|
||||||
|
completeMap[typeKey] = OdataDateTime
|
||||||
|
completeMap[k] = t.Format(time.RFC3339Nano)
|
||||||
|
case uuid.UUID:
|
||||||
|
completeMap[typeKey] = OdataGUID
|
||||||
|
completeMap[k] = t.String()
|
||||||
|
case int64:
|
||||||
|
completeMap[typeKey] = OdataInt64
|
||||||
|
completeMap[k] = fmt.Sprintf("%v", v)
|
||||||
|
default:
|
||||||
|
completeMap[k] = v
|
||||||
|
}
|
||||||
|
if strings.HasSuffix(k, OdataTypeSuffix) {
|
||||||
|
if !(completeMap[k] == OdataBinary ||
|
||||||
|
completeMap[k] == OdataDateTime ||
|
||||||
|
completeMap[k] == OdataGUID ||
|
||||||
|
completeMap[k] == OdataInt64) {
|
||||||
|
return nil, fmt.Errorf("Odata.type annotation %v value is not valid", k)
|
||||||
|
}
|
||||||
|
valueKey := strings.TrimSuffix(k, OdataTypeSuffix)
|
||||||
|
if _, ok := completeMap[valueKey]; !ok {
|
||||||
|
return nil, fmt.Errorf("Odata.type annotation %v defined without value defined", k)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return json.Marshal(completeMap)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON is a custom unmarshaller for entities
|
||||||
|
func (e *Entity) UnmarshalJSON(data []byte) error {
|
||||||
|
errorTemplate := "Deserializing error: %v"
|
||||||
|
|
||||||
|
props := map[string]interface{}{}
|
||||||
|
err := json.Unmarshal(data, &props)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// deselialize metadata
|
||||||
|
e.OdataMetadata = stringFromMap(props, "odata.metadata")
|
||||||
|
e.OdataType = stringFromMap(props, "odata.type")
|
||||||
|
e.OdataID = stringFromMap(props, "odata.id")
|
||||||
|
e.OdataEtag = stringFromMap(props, "odata.etag")
|
||||||
|
e.OdataEditLink = stringFromMap(props, "odata.editLink")
|
||||||
|
e.PartitionKey = stringFromMap(props, partitionKeyNode)
|
||||||
|
e.RowKey = stringFromMap(props, rowKeyNode)
|
||||||
|
|
||||||
|
// deserialize timestamp
|
||||||
|
timeStamp, ok := props["Timestamp"]
|
||||||
|
if ok {
|
||||||
|
str, ok := timeStamp.(string)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf(errorTemplate, "Timestamp casting error")
|
||||||
|
}
|
||||||
|
t, err := time.Parse(time.RFC3339Nano, str)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf(errorTemplate, err)
|
||||||
|
}
|
||||||
|
e.TimeStamp = t
|
||||||
|
}
|
||||||
|
delete(props, "Timestamp")
|
||||||
|
delete(props, "Timestamp@odata.type")
|
||||||
|
|
||||||
|
// deserialize entity (user defined fields)
|
||||||
|
for k, v := range props {
|
||||||
|
if strings.HasSuffix(k, OdataTypeSuffix) {
|
||||||
|
valueKey := strings.TrimSuffix(k, OdataTypeSuffix)
|
||||||
|
str, ok := props[valueKey].(string)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf(errorTemplate, fmt.Sprintf("%v casting error", v))
|
||||||
|
}
|
||||||
|
switch v {
|
||||||
|
case OdataBinary:
|
||||||
|
props[valueKey] = []byte(str)
|
||||||
|
case OdataDateTime:
|
||||||
|
t, err := time.Parse("2006-01-02T15:04:05Z", str)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf(errorTemplate, err)
|
||||||
|
}
|
||||||
|
props[valueKey] = t
|
||||||
|
case OdataGUID:
|
||||||
|
props[valueKey] = uuid.FromStringOrNil(str)
|
||||||
|
case OdataInt64:
|
||||||
|
i, err := strconv.ParseInt(str, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf(errorTemplate, err)
|
||||||
|
}
|
||||||
|
props[valueKey] = i
|
||||||
|
default:
|
||||||
|
return fmt.Errorf(errorTemplate, fmt.Sprintf("%v is not supported", v))
|
||||||
|
}
|
||||||
|
delete(props, k)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
e.Properties = props
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getAndDelete(props map[string]interface{}, key string) interface{} {
|
||||||
|
if value, ok := props[key]; ok {
|
||||||
|
delete(props, key)
|
||||||
|
return value
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func addIfMatchHeader(h map[string]string, force bool, etag string) map[string]string {
|
||||||
|
if force {
|
||||||
|
h[headerIfMatch] = "*"
|
||||||
|
} else {
|
||||||
|
h[headerIfMatch] = etag
|
||||||
|
}
|
||||||
|
return h
|
||||||
|
}
|
||||||
|
|
||||||
|
// updates Etag and timestamp
|
||||||
|
func (e *Entity) updateEtagAndTimestamp(headers http.Header) error {
|
||||||
|
e.OdataEtag = headers.Get(headerEtag)
|
||||||
|
return e.updateTimestamp(headers)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *Entity) updateTimestamp(headers http.Header) error {
|
||||||
|
str := headers.Get(headerDate)
|
||||||
|
t, err := time.Parse(time.RFC1123, str)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Update timestamp error: %v", err)
|
||||||
|
}
|
||||||
|
e.TimeStamp = t
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *Entity) insertOr(verb string, options *EntityOptions) error {
|
||||||
|
query, headers := options.getParameters()
|
||||||
|
headers = mergeHeaders(headers, e.Table.tsc.client.getStandardHeaders())
|
||||||
|
|
||||||
|
body, err := json.Marshal(e)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
headers = addBodyRelatedHeaders(headers, len(body))
|
||||||
|
headers = addReturnContentHeaders(headers, EmptyPayload)
|
||||||
|
|
||||||
|
uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.buildPath(), query)
|
||||||
|
resp, err := e.Table.tsc.client.exec(verb, uri, headers, bytes.NewReader(body), e.Table.tsc.auth)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer readAndCloseBody(resp.body)
|
||||||
|
|
||||||
|
if err = checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return e.updateEtagAndTimestamp(resp.headers)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *Entity) updateMerge(force bool, verb string, options *EntityOptions) error {
|
||||||
|
query, headers := options.getParameters()
|
||||||
|
headers = mergeHeaders(headers, e.Table.tsc.client.getStandardHeaders())
|
||||||
|
|
||||||
|
body, err := json.Marshal(e)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
headers = addBodyRelatedHeaders(headers, len(body))
|
||||||
|
headers = addIfMatchHeader(headers, force, e.OdataEtag)
|
||||||
|
headers = addReturnContentHeaders(headers, EmptyPayload)
|
||||||
|
|
||||||
|
uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.buildPath(), query)
|
||||||
|
resp, err := e.Table.tsc.client.exec(verb, uri, headers, bytes.NewReader(body), e.Table.tsc.auth)
|
||||||
|
if err != nil {
|
||||||
|
if resp.statusCode == http.StatusPreconditionFailed {
|
||||||
|
return fmt.Errorf(etagErrorTemplate, err)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer readAndCloseBody(resp.body)
|
||||||
|
|
||||||
|
if err = checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return e.updateEtagAndTimestamp(resp.headers)
|
||||||
|
}
|
||||||
|
|
||||||
|
func stringFromMap(props map[string]interface{}, key string) string {
|
||||||
|
value := getAndDelete(props, key)
|
||||||
|
if value != nil {
|
||||||
|
return value.(string)
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (options *EntityOptions) getParameters() (url.Values, map[string]string) {
|
||||||
|
query := url.Values{}
|
||||||
|
headers := map[string]string{}
|
||||||
|
if options != nil {
|
||||||
|
query = addTimeout(query, options.Timeout)
|
||||||
|
headers = headersFromStruct(*options)
|
||||||
|
}
|
||||||
|
return query, headers
|
||||||
|
}
|
|
@ -32,7 +32,7 @@ type FileProperties struct {
|
||||||
Etag string
|
Etag string
|
||||||
Language string `header:"x-ms-content-language"`
|
Language string `header:"x-ms-content-language"`
|
||||||
LastModified string
|
LastModified string
|
||||||
Length uint64 `xml:"Content-Length"`
|
Length uint64 `xml:"Content-Length" header:"x-ms-content-length"`
|
||||||
MD5 string `header:"x-ms-content-md5"`
|
MD5 string `header:"x-ms-content-md5"`
|
||||||
Type string `header:"x-ms-content-type"`
|
Type string `header:"x-ms-content-type"`
|
||||||
}
|
}
|
||||||
|
@ -54,26 +54,22 @@ type FileStream struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// FileRequestOptions will be passed to misc file operations.
|
// FileRequestOptions will be passed to misc file operations.
|
||||||
// Currently just Timeout (in seconds) but will expand.
|
// Currently just Timeout (in seconds) but could expand.
|
||||||
type FileRequestOptions struct {
|
type FileRequestOptions struct {
|
||||||
Timeout uint // timeout duration in seconds.
|
Timeout uint // timeout duration in seconds.
|
||||||
}
|
}
|
||||||
|
|
||||||
// getParameters, construct parameters for FileRequestOptions.
|
func prepareOptions(options *FileRequestOptions) url.Values {
|
||||||
// currently only timeout, but expecting to grow as functionality fills out.
|
params := url.Values{}
|
||||||
func (p FileRequestOptions) getParameters() url.Values {
|
if options != nil {
|
||||||
out := url.Values{}
|
params = addTimeout(params, options.Timeout)
|
||||||
|
|
||||||
if p.Timeout != 0 {
|
|
||||||
out.Set("timeout", fmt.Sprintf("%v", p.Timeout))
|
|
||||||
}
|
}
|
||||||
|
return params
|
||||||
return out
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// FileRanges contains a list of file range information for a file.
|
// FileRanges contains a list of file range information for a file.
|
||||||
//
|
//
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dn166984.aspx
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Ranges
|
||||||
type FileRanges struct {
|
type FileRanges struct {
|
||||||
ContentLength uint64
|
ContentLength uint64
|
||||||
LastModified string
|
LastModified string
|
||||||
|
@ -83,7 +79,7 @@ type FileRanges struct {
|
||||||
|
|
||||||
// FileRange contains range information for a file.
|
// FileRange contains range information for a file.
|
||||||
//
|
//
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dn166984.aspx
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Ranges
|
||||||
type FileRange struct {
|
type FileRange struct {
|
||||||
Start uint64 `xml:"Start"`
|
Start uint64 `xml:"Start"`
|
||||||
End uint64 `xml:"End"`
|
End uint64 `xml:"End"`
|
||||||
|
@ -100,9 +96,13 @@ func (f *File) buildPath() string {
|
||||||
|
|
||||||
// ClearRange releases the specified range of space in a file.
|
// ClearRange releases the specified range of space in a file.
|
||||||
//
|
//
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dn194276.aspx
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Range
|
||||||
func (f *File) ClearRange(fileRange FileRange) error {
|
func (f *File) ClearRange(fileRange FileRange, options *FileRequestOptions) error {
|
||||||
headers, err := f.modifyRange(nil, fileRange, nil)
|
var timeout *uint
|
||||||
|
if options != nil {
|
||||||
|
timeout = &options.Timeout
|
||||||
|
}
|
||||||
|
headers, err := f.modifyRange(nil, fileRange, timeout, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -113,24 +113,23 @@ func (f *File) ClearRange(fileRange FileRange) error {
|
||||||
|
|
||||||
// Create creates a new file or replaces an existing one.
|
// Create creates a new file or replaces an existing one.
|
||||||
//
|
//
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dn194271.aspx
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-File
|
||||||
func (f *File) Create(maxSize uint64) error {
|
func (f *File) Create(maxSize uint64, options *FileRequestOptions) error {
|
||||||
if maxSize > oneTB {
|
if maxSize > oneTB {
|
||||||
return fmt.Errorf("max file size is 1TB")
|
return fmt.Errorf("max file size is 1TB")
|
||||||
}
|
}
|
||||||
|
params := prepareOptions(options)
|
||||||
|
headers := headersFromStruct(f.Properties)
|
||||||
|
headers["x-ms-content-length"] = strconv.FormatUint(maxSize, 10)
|
||||||
|
headers["x-ms-type"] = "file"
|
||||||
|
|
||||||
extraHeaders := map[string]string{
|
outputHeaders, err := f.fsc.createResource(f.buildPath(), resourceFile, params, mergeMDIntoExtraHeaders(f.Metadata, headers), []int{http.StatusCreated})
|
||||||
"x-ms-content-length": strconv.FormatUint(maxSize, 10),
|
|
||||||
"x-ms-type": "file",
|
|
||||||
}
|
|
||||||
|
|
||||||
headers, err := f.fsc.createResource(f.buildPath(), resourceFile, nil, mergeMDIntoExtraHeaders(f.Metadata, extraHeaders), []int{http.StatusCreated})
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
f.Properties.Length = maxSize
|
f.Properties.Length = maxSize
|
||||||
f.updateEtagAndLastModified(headers)
|
f.updateEtagAndLastModified(outputHeaders)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -142,13 +141,9 @@ func (f *File) CopyFile(sourceURL string, options *FileRequestOptions) error {
|
||||||
"x-ms-type": "file",
|
"x-ms-type": "file",
|
||||||
"x-ms-copy-source": sourceURL,
|
"x-ms-copy-source": sourceURL,
|
||||||
}
|
}
|
||||||
|
params := prepareOptions(options)
|
||||||
|
|
||||||
var parameters url.Values
|
headers, err := f.fsc.createResource(f.buildPath(), resourceFile, params, mergeMDIntoExtraHeaders(f.Metadata, extraHeaders), []int{http.StatusAccepted})
|
||||||
if options != nil {
|
|
||||||
parameters = options.getParameters()
|
|
||||||
}
|
|
||||||
|
|
||||||
headers, err := f.fsc.createResource(f.buildPath(), resourceFile, parameters, mergeMDIntoExtraHeaders(f.Metadata, extraHeaders), []int{http.StatusAccepted})
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -159,16 +154,16 @@ func (f *File) CopyFile(sourceURL string, options *FileRequestOptions) error {
|
||||||
|
|
||||||
// Delete immediately removes this file from the storage account.
|
// Delete immediately removes this file from the storage account.
|
||||||
//
|
//
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dn689085.aspx
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-File2
|
||||||
func (f *File) Delete() error {
|
func (f *File) Delete(options *FileRequestOptions) error {
|
||||||
return f.fsc.deleteResource(f.buildPath(), resourceFile)
|
return f.fsc.deleteResource(f.buildPath(), resourceFile, options)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteIfExists removes this file if it exists.
|
// DeleteIfExists removes this file if it exists.
|
||||||
//
|
//
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dn689085.aspx
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-File2
|
||||||
func (f *File) DeleteIfExists() (bool, error) {
|
func (f *File) DeleteIfExists(options *FileRequestOptions) (bool, error) {
|
||||||
resp, err := f.fsc.deleteResourceNoClose(f.buildPath(), resourceFile)
|
resp, err := f.fsc.deleteResourceNoClose(f.buildPath(), resourceFile, options)
|
||||||
if resp != nil {
|
if resp != nil {
|
||||||
defer readAndCloseBody(resp.body)
|
defer readAndCloseBody(resp.body)
|
||||||
if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound {
|
if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound {
|
||||||
|
@ -178,33 +173,59 @@ func (f *File) DeleteIfExists() (bool, error) {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetFileOptions includes options for a get file operation
|
||||||
|
type GetFileOptions struct {
|
||||||
|
Timeout uint
|
||||||
|
GetContentMD5 bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// DownloadToStream operation downloads the file.
|
||||||
|
//
|
||||||
|
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-file
|
||||||
|
func (f *File) DownloadToStream(options *FileRequestOptions) (io.ReadCloser, error) {
|
||||||
|
params := prepareOptions(options)
|
||||||
|
resp, err := f.fsc.getResourceNoClose(f.buildPath(), compNone, resourceFile, params, http.MethodGet, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
|
||||||
|
readAndCloseBody(resp.body)
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return resp.body, nil
|
||||||
|
}
|
||||||
|
|
||||||
// DownloadRangeToStream operation downloads the specified range of this file with optional MD5 hash.
|
// DownloadRangeToStream operation downloads the specified range of this file with optional MD5 hash.
|
||||||
//
|
//
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-file
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-file
|
||||||
func (f *File) DownloadRangeToStream(fileRange FileRange, getContentMD5 bool) (fs FileStream, err error) {
|
func (f *File) DownloadRangeToStream(fileRange FileRange, options *GetFileOptions) (fs FileStream, err error) {
|
||||||
if getContentMD5 && isRangeTooBig(fileRange) {
|
|
||||||
return fs, fmt.Errorf("must specify a range less than or equal to 4MB when getContentMD5 is true")
|
|
||||||
}
|
|
||||||
|
|
||||||
extraHeaders := map[string]string{
|
extraHeaders := map[string]string{
|
||||||
"Range": fileRange.String(),
|
"Range": fileRange.String(),
|
||||||
}
|
}
|
||||||
if getContentMD5 == true {
|
params := url.Values{}
|
||||||
extraHeaders["x-ms-range-get-content-md5"] = "true"
|
if options != nil {
|
||||||
|
if options.GetContentMD5 {
|
||||||
|
if isRangeTooBig(fileRange) {
|
||||||
|
return fs, fmt.Errorf("must specify a range less than or equal to 4MB when getContentMD5 is true")
|
||||||
|
}
|
||||||
|
extraHeaders["x-ms-range-get-content-md5"] = "true"
|
||||||
|
}
|
||||||
|
params = addTimeout(params, options.Timeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := f.fsc.getResourceNoClose(f.buildPath(), compNone, resourceFile, http.MethodGet, extraHeaders)
|
resp, err := f.fsc.getResourceNoClose(f.buildPath(), compNone, resourceFile, params, http.MethodGet, extraHeaders)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fs, err
|
return fs, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = checkRespCode(resp.statusCode, []int{http.StatusOK, http.StatusPartialContent}); err != nil {
|
if err = checkRespCode(resp.statusCode, []int{http.StatusOK, http.StatusPartialContent}); err != nil {
|
||||||
resp.body.Close()
|
readAndCloseBody(resp.body)
|
||||||
return fs, err
|
return fs, err
|
||||||
}
|
}
|
||||||
|
|
||||||
fs.Body = resp.body
|
fs.Body = resp.body
|
||||||
if getContentMD5 {
|
if options != nil && options.GetContentMD5 {
|
||||||
fs.ContentMD5 = resp.headers.Get("Content-MD5")
|
fs.ContentMD5 = resp.headers.Get("Content-MD5")
|
||||||
}
|
}
|
||||||
return fs, nil
|
return fs, nil
|
||||||
|
@ -221,8 +242,10 @@ func (f *File) Exists() (bool, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// FetchAttributes updates metadata and properties for this file.
|
// FetchAttributes updates metadata and properties for this file.
|
||||||
func (f *File) FetchAttributes() error {
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-file-properties
|
||||||
headers, err := f.fsc.getResourceHeaders(f.buildPath(), compNone, resourceFile, http.MethodHead)
|
func (f *File) FetchAttributes(options *FileRequestOptions) error {
|
||||||
|
params := prepareOptions(options)
|
||||||
|
headers, err := f.fsc.getResourceHeaders(f.buildPath(), compNone, resourceFile, params, http.MethodHead)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -242,17 +265,26 @@ func isRangeTooBig(fileRange FileRange) bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ListRangesOptions includes options for a list file ranges operation
|
||||||
|
type ListRangesOptions struct {
|
||||||
|
Timeout uint
|
||||||
|
ListRange *FileRange
|
||||||
|
}
|
||||||
|
|
||||||
// ListRanges returns the list of valid ranges for this file.
|
// ListRanges returns the list of valid ranges for this file.
|
||||||
//
|
//
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dn166984.aspx
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Ranges
|
||||||
func (f *File) ListRanges(listRange *FileRange) (*FileRanges, error) {
|
func (f *File) ListRanges(options *ListRangesOptions) (*FileRanges, error) {
|
||||||
params := url.Values{"comp": {"rangelist"}}
|
params := url.Values{"comp": {"rangelist"}}
|
||||||
|
|
||||||
// add optional range to list
|
// add optional range to list
|
||||||
var headers map[string]string
|
var headers map[string]string
|
||||||
if listRange != nil {
|
if options != nil {
|
||||||
headers = make(map[string]string)
|
params = addTimeout(params, options.Timeout)
|
||||||
headers["Range"] = listRange.String()
|
if options.ListRange != nil {
|
||||||
|
headers = make(map[string]string)
|
||||||
|
headers["Range"] = options.ListRange.String()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := f.fsc.listContent(f.buildPath(), params, headers)
|
resp, err := f.fsc.listContent(f.buildPath(), params, headers)
|
||||||
|
@ -278,7 +310,7 @@ func (f *File) ListRanges(listRange *FileRange) (*FileRanges, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// modifies a range of bytes in this file
|
// modifies a range of bytes in this file
|
||||||
func (f *File) modifyRange(bytes io.Reader, fileRange FileRange, contentMD5 *string) (http.Header, error) {
|
func (f *File) modifyRange(bytes io.Reader, fileRange FileRange, timeout *uint, contentMD5 *string) (http.Header, error) {
|
||||||
if err := f.fsc.checkForStorageEmulator(); err != nil {
|
if err := f.fsc.checkForStorageEmulator(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -289,7 +321,12 @@ func (f *File) modifyRange(bytes io.Reader, fileRange FileRange, contentMD5 *str
|
||||||
return nil, errors.New("range cannot exceed 4MB in size")
|
return nil, errors.New("range cannot exceed 4MB in size")
|
||||||
}
|
}
|
||||||
|
|
||||||
uri := f.fsc.client.getEndpoint(fileServiceName, f.buildPath(), url.Values{"comp": {"range"}})
|
params := url.Values{"comp": {"range"}}
|
||||||
|
if timeout != nil {
|
||||||
|
params = addTimeout(params, *timeout)
|
||||||
|
}
|
||||||
|
|
||||||
|
uri := f.fsc.client.getEndpoint(fileServiceName, f.buildPath(), params)
|
||||||
|
|
||||||
// default to clear
|
// default to clear
|
||||||
write := "clear"
|
write := "clear"
|
||||||
|
@ -327,9 +364,9 @@ func (f *File) modifyRange(bytes io.Reader, fileRange FileRange, contentMD5 *str
|
||||||
// are case-insensitive so case munging should not matter to other
|
// are case-insensitive so case munging should not matter to other
|
||||||
// applications either.
|
// applications either.
|
||||||
//
|
//
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dn689097.aspx
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-File-Metadata
|
||||||
func (f *File) SetMetadata() error {
|
func (f *File) SetMetadata(options *FileRequestOptions) error {
|
||||||
headers, err := f.fsc.setResourceHeaders(f.buildPath(), compMetadata, resourceFile, mergeMDIntoExtraHeaders(f.Metadata, nil))
|
headers, err := f.fsc.setResourceHeaders(f.buildPath(), compMetadata, resourceFile, mergeMDIntoExtraHeaders(f.Metadata, nil), options)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -345,9 +382,9 @@ func (f *File) SetMetadata() error {
|
||||||
// are case-insensitive so case munging should not matter to other
|
// are case-insensitive so case munging should not matter to other
|
||||||
// applications either.
|
// applications either.
|
||||||
//
|
//
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dn166975.aspx
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-File-Properties
|
||||||
func (f *File) SetProperties() error {
|
func (f *File) SetProperties(options *FileRequestOptions) error {
|
||||||
headers, err := f.fsc.setResourceHeaders(f.buildPath(), compProperties, resourceFile, headersFromStruct(f.Properties))
|
headers, err := f.fsc.setResourceHeaders(f.buildPath(), compProperties, resourceFile, headersFromStruct(f.Properties), options)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -390,19 +427,32 @@ func (f *File) updateProperties(header http.Header) {
|
||||||
// This method does not create a publicly accessible URL if the file
|
// This method does not create a publicly accessible URL if the file
|
||||||
// is private and this method does not check if the file exists.
|
// is private and this method does not check if the file exists.
|
||||||
func (f *File) URL() string {
|
func (f *File) URL() string {
|
||||||
return f.fsc.client.getEndpoint(fileServiceName, f.buildPath(), url.Values{})
|
return f.fsc.client.getEndpoint(fileServiceName, f.buildPath(), nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// WriteRange writes a range of bytes to this file with an optional MD5 hash of the content.
|
// WriteRangeOptions includes opptions for a write file range operation
|
||||||
// Note that the length of bytes must match (rangeEnd - rangeStart) + 1 with a maximum size of 4MB.
|
type WriteRangeOptions struct {
|
||||||
|
Timeout uint
|
||||||
|
ContentMD5 string
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteRange writes a range of bytes to this file with an optional MD5 hash of the content (inside
|
||||||
|
// options parameter). Note that the length of bytes must match (rangeEnd - rangeStart) + 1 with
|
||||||
|
// a maximum size of 4MB.
|
||||||
//
|
//
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dn194276.aspx
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Range
|
||||||
func (f *File) WriteRange(bytes io.Reader, fileRange FileRange, contentMD5 *string) error {
|
func (f *File) WriteRange(bytes io.Reader, fileRange FileRange, options *WriteRangeOptions) error {
|
||||||
if bytes == nil {
|
if bytes == nil {
|
||||||
return errors.New("bytes cannot be nil")
|
return errors.New("bytes cannot be nil")
|
||||||
}
|
}
|
||||||
|
var timeout *uint
|
||||||
|
var md5 *string
|
||||||
|
if options != nil {
|
||||||
|
timeout = &options.Timeout
|
||||||
|
md5 = &options.ContentMD5
|
||||||
|
}
|
||||||
|
|
||||||
headers, err := f.modifyRange(bytes, fileRange, contentMD5)
|
headers, err := f.modifyRange(bytes, fileRange, timeout, md5)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
|
@ -5,7 +5,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"strings"
|
"strconv"
|
||||||
)
|
)
|
||||||
|
|
||||||
// FileServiceClient contains operations for Microsoft Azure File Service.
|
// FileServiceClient contains operations for Microsoft Azure File Service.
|
||||||
|
@ -17,7 +17,7 @@ type FileServiceClient struct {
|
||||||
// ListSharesParameters defines the set of customizable parameters to make a
|
// ListSharesParameters defines the set of customizable parameters to make a
|
||||||
// List Shares call.
|
// List Shares call.
|
||||||
//
|
//
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dn167009.aspx
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Shares
|
||||||
type ListSharesParameters struct {
|
type ListSharesParameters struct {
|
||||||
Prefix string
|
Prefix string
|
||||||
Marker string
|
Marker string
|
||||||
|
@ -29,7 +29,7 @@ type ListSharesParameters struct {
|
||||||
// ShareListResponse contains the response fields from
|
// ShareListResponse contains the response fields from
|
||||||
// ListShares call.
|
// ListShares call.
|
||||||
//
|
//
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dn167009.aspx
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Shares
|
||||||
type ShareListResponse struct {
|
type ShareListResponse struct {
|
||||||
XMLName xml.Name `xml:"EnumerationResults"`
|
XMLName xml.Name `xml:"EnumerationResults"`
|
||||||
Xmlns string `xml:"xmlns,attr"`
|
Xmlns string `xml:"xmlns,attr"`
|
||||||
|
@ -79,10 +79,10 @@ func (p ListSharesParameters) getParameters() url.Values {
|
||||||
out.Set("include", p.Include)
|
out.Set("include", p.Include)
|
||||||
}
|
}
|
||||||
if p.MaxResults != 0 {
|
if p.MaxResults != 0 {
|
||||||
out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults))
|
out.Set("maxresults", strconv.FormatUint(uint64(p.MaxResults), 10))
|
||||||
}
|
}
|
||||||
if p.Timeout != 0 {
|
if p.Timeout != 0 {
|
||||||
out.Set("timeout", fmt.Sprintf("%v", p.Timeout))
|
out.Set("timeout", strconv.FormatUint(uint64(p.Timeout), 10))
|
||||||
}
|
}
|
||||||
|
|
||||||
return out
|
return out
|
||||||
|
@ -91,15 +91,16 @@ func (p ListSharesParameters) getParameters() url.Values {
|
||||||
func (p ListDirsAndFilesParameters) getParameters() url.Values {
|
func (p ListDirsAndFilesParameters) getParameters() url.Values {
|
||||||
out := url.Values{}
|
out := url.Values{}
|
||||||
|
|
||||||
|
if p.Prefix != "" {
|
||||||
|
out.Set("prefix", p.Prefix)
|
||||||
|
}
|
||||||
if p.Marker != "" {
|
if p.Marker != "" {
|
||||||
out.Set("marker", p.Marker)
|
out.Set("marker", p.Marker)
|
||||||
}
|
}
|
||||||
if p.MaxResults != 0 {
|
if p.MaxResults != 0 {
|
||||||
out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults))
|
out.Set("maxresults", strconv.FormatUint(uint64(p.MaxResults), 10))
|
||||||
}
|
|
||||||
if p.Timeout != 0 {
|
|
||||||
out.Set("timeout", fmt.Sprintf("%v", p.Timeout))
|
|
||||||
}
|
}
|
||||||
|
out = addTimeout(out, p.Timeout)
|
||||||
|
|
||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
@ -117,9 +118,9 @@ func getURLInitValues(comp compType, res resourceType) url.Values {
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetShareReference returns a Share object for the specified share name.
|
// GetShareReference returns a Share object for the specified share name.
|
||||||
func (f FileServiceClient) GetShareReference(name string) Share {
|
func (f *FileServiceClient) GetShareReference(name string) *Share {
|
||||||
return Share{
|
return &Share{
|
||||||
fsc: &f,
|
fsc: f,
|
||||||
Name: name,
|
Name: name,
|
||||||
Properties: ShareProperties{
|
Properties: ShareProperties{
|
||||||
Quota: -1,
|
Quota: -1,
|
||||||
|
@ -130,7 +131,7 @@ func (f FileServiceClient) GetShareReference(name string) Share {
|
||||||
// ListShares returns the list of shares in a storage account along with
|
// ListShares returns the list of shares in a storage account along with
|
||||||
// pagination token and other response details.
|
// pagination token and other response details.
|
||||||
//
|
//
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/list-shares
|
||||||
func (f FileServiceClient) ListShares(params ListSharesParameters) (*ShareListResponse, error) {
|
func (f FileServiceClient) ListShares(params ListSharesParameters) (*ShareListResponse, error) {
|
||||||
q := mergeParams(params.getParameters(), url.Values{"comp": {"list"}})
|
q := mergeParams(params.getParameters(), url.Values{"comp": {"list"}})
|
||||||
|
|
||||||
|
@ -231,8 +232,8 @@ func (f FileServiceClient) createResourceNoClose(path string, res resourceType,
|
||||||
}
|
}
|
||||||
|
|
||||||
// returns HTTP header data for the specified directory or share
|
// returns HTTP header data for the specified directory or share
|
||||||
func (f FileServiceClient) getResourceHeaders(path string, comp compType, res resourceType, verb string) (http.Header, error) {
|
func (f FileServiceClient) getResourceHeaders(path string, comp compType, res resourceType, params url.Values, verb string) (http.Header, error) {
|
||||||
resp, err := f.getResourceNoClose(path, comp, res, verb, nil)
|
resp, err := f.getResourceNoClose(path, comp, res, params, verb, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -246,22 +247,21 @@ func (f FileServiceClient) getResourceHeaders(path string, comp compType, res re
|
||||||
}
|
}
|
||||||
|
|
||||||
// gets the specified resource, doesn't close the response body
|
// gets the specified resource, doesn't close the response body
|
||||||
func (f FileServiceClient) getResourceNoClose(path string, comp compType, res resourceType, verb string, extraHeaders map[string]string) (*storageResponse, error) {
|
func (f FileServiceClient) getResourceNoClose(path string, comp compType, res resourceType, params url.Values, verb string, extraHeaders map[string]string) (*storageResponse, error) {
|
||||||
if err := f.checkForStorageEmulator(); err != nil {
|
if err := f.checkForStorageEmulator(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
params := getURLInitValues(comp, res)
|
params = mergeParams(params, getURLInitValues(comp, res))
|
||||||
uri := f.client.getEndpoint(fileServiceName, path, params)
|
uri := f.client.getEndpoint(fileServiceName, path, params)
|
||||||
extraHeaders = f.client.protectUserAgent(extraHeaders)
|
|
||||||
headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders)
|
headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders)
|
||||||
|
|
||||||
return f.client.exec(verb, uri, headers, nil, f.auth)
|
return f.client.exec(verb, uri, headers, nil, f.auth)
|
||||||
}
|
}
|
||||||
|
|
||||||
// deletes the resource and returns the response
|
// deletes the resource and returns the response
|
||||||
func (f FileServiceClient) deleteResource(path string, res resourceType) error {
|
func (f FileServiceClient) deleteResource(path string, res resourceType, options *FileRequestOptions) error {
|
||||||
resp, err := f.deleteResourceNoClose(path, res)
|
resp, err := f.deleteResourceNoClose(path, res, options)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -270,12 +270,12 @@ func (f FileServiceClient) deleteResource(path string, res resourceType) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// deletes the resource and returns the response, doesn't close the response body
|
// deletes the resource and returns the response, doesn't close the response body
|
||||||
func (f FileServiceClient) deleteResourceNoClose(path string, res resourceType) (*storageResponse, error) {
|
func (f FileServiceClient) deleteResourceNoClose(path string, res resourceType, options *FileRequestOptions) (*storageResponse, error) {
|
||||||
if err := f.checkForStorageEmulator(); err != nil {
|
if err := f.checkForStorageEmulator(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
values := getURLInitValues(compNone, res)
|
values := mergeParams(getURLInitValues(compNone, res), prepareOptions(options))
|
||||||
uri := f.client.getEndpoint(fileServiceName, path, values)
|
uri := f.client.getEndpoint(fileServiceName, path, values)
|
||||||
return f.client.exec(http.MethodDelete, uri, f.client.getStandardHeaders(), nil, f.auth)
|
return f.client.exec(http.MethodDelete, uri, f.client.getStandardHeaders(), nil, f.auth)
|
||||||
}
|
}
|
||||||
|
@ -294,21 +294,13 @@ func mergeMDIntoExtraHeaders(metadata, extraHeaders map[string]string) map[strin
|
||||||
return extraHeaders
|
return extraHeaders
|
||||||
}
|
}
|
||||||
|
|
||||||
// merges extraHeaders into headers and returns headers
|
|
||||||
func mergeHeaders(headers, extraHeaders map[string]string) map[string]string {
|
|
||||||
for k, v := range extraHeaders {
|
|
||||||
headers[k] = v
|
|
||||||
}
|
|
||||||
return headers
|
|
||||||
}
|
|
||||||
|
|
||||||
// sets extra header data for the specified resource
|
// sets extra header data for the specified resource
|
||||||
func (f FileServiceClient) setResourceHeaders(path string, comp compType, res resourceType, extraHeaders map[string]string) (http.Header, error) {
|
func (f FileServiceClient) setResourceHeaders(path string, comp compType, res resourceType, extraHeaders map[string]string, options *FileRequestOptions) (http.Header, error) {
|
||||||
if err := f.checkForStorageEmulator(); err != nil {
|
if err := f.checkForStorageEmulator(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
params := getURLInitValues(comp, res)
|
params := mergeParams(getURLInitValues(comp, res), prepareOptions(options))
|
||||||
uri := f.client.getEndpoint(fileServiceName, path, params)
|
uri := f.client.getEndpoint(fileServiceName, path, params)
|
||||||
extraHeaders = f.client.protectUserAgent(extraHeaders)
|
extraHeaders = f.client.protectUserAgent(extraHeaders)
|
||||||
headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders)
|
headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders)
|
||||||
|
@ -322,49 +314,6 @@ func (f FileServiceClient) setResourceHeaders(path string, comp compType, res re
|
||||||
return resp.headers, checkRespCode(resp.statusCode, []int{http.StatusOK})
|
return resp.headers, checkRespCode(resp.statusCode, []int{http.StatusOK})
|
||||||
}
|
}
|
||||||
|
|
||||||
// gets metadata for the specified resource
|
|
||||||
func (f FileServiceClient) getMetadata(path string, res resourceType) (map[string]string, error) {
|
|
||||||
if err := f.checkForStorageEmulator(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
headers, err := f.getResourceHeaders(path, compMetadata, res, http.MethodGet)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return getMetadataFromHeaders(headers), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// returns a map of custom metadata values from the specified HTTP header
|
|
||||||
func getMetadataFromHeaders(header http.Header) map[string]string {
|
|
||||||
metadata := make(map[string]string)
|
|
||||||
for k, v := range header {
|
|
||||||
// Can't trust CanonicalHeaderKey() to munge case
|
|
||||||
// reliably. "_" is allowed in identifiers:
|
|
||||||
// https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx
|
|
||||||
// https://msdn.microsoft.com/library/aa664670(VS.71).aspx
|
|
||||||
// http://tools.ietf.org/html/rfc7230#section-3.2
|
|
||||||
// ...but "_" is considered invalid by
|
|
||||||
// CanonicalMIMEHeaderKey in
|
|
||||||
// https://golang.org/src/net/textproto/reader.go?s=14615:14659#L542
|
|
||||||
// so k can be "X-Ms-Meta-Foo" or "x-ms-meta-foo_bar".
|
|
||||||
k = strings.ToLower(k)
|
|
||||||
if len(v) == 0 || !strings.HasPrefix(k, strings.ToLower(userDefinedMetadataHeaderPrefix)) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// metadata["foo"] = content of the last X-Ms-Meta-Foo header
|
|
||||||
k = k[len(userDefinedMetadataHeaderPrefix):]
|
|
||||||
metadata[k] = v[len(v)-1]
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(metadata) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return metadata
|
|
||||||
}
|
|
||||||
|
|
||||||
//checkForStorageEmulator determines if the client is setup for use with
|
//checkForStorageEmulator determines if the client is setup for use with
|
||||||
//Azure Storage Emulator, and returns a relevant error
|
//Azure Storage Emulator, and returns a relevant error
|
||||||
func (f FileServiceClient) checkForStorageEmulator() error {
|
func (f FileServiceClient) checkForStorageEmulator() error {
|
|
@ -0,0 +1,187 @@
|
||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strconv"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// lease constants.
|
||||||
|
const (
|
||||||
|
leaseHeaderPrefix = "x-ms-lease-"
|
||||||
|
headerLeaseID = "x-ms-lease-id"
|
||||||
|
leaseAction = "x-ms-lease-action"
|
||||||
|
leaseBreakPeriod = "x-ms-lease-break-period"
|
||||||
|
leaseDuration = "x-ms-lease-duration"
|
||||||
|
leaseProposedID = "x-ms-proposed-lease-id"
|
||||||
|
leaseTime = "x-ms-lease-time"
|
||||||
|
|
||||||
|
acquireLease = "acquire"
|
||||||
|
renewLease = "renew"
|
||||||
|
changeLease = "change"
|
||||||
|
releaseLease = "release"
|
||||||
|
breakLease = "break"
|
||||||
|
)
|
||||||
|
|
||||||
|
// leasePut is common PUT code for the various acquire/release/break etc functions.
|
||||||
|
func (b *Blob) leaseCommonPut(headers map[string]string, expectedStatus int, options *LeaseOptions) (http.Header, error) {
|
||||||
|
params := url.Values{"comp": {"lease"}}
|
||||||
|
|
||||||
|
if options != nil {
|
||||||
|
params = addTimeout(params, options.Timeout)
|
||||||
|
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||||
|
}
|
||||||
|
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
|
||||||
|
|
||||||
|
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer readAndCloseBody(resp.body)
|
||||||
|
|
||||||
|
if err := checkRespCode(resp.statusCode, []int{expectedStatus}); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return resp.headers, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// LeaseOptions includes options for all operations regarding leasing blobs
|
||||||
|
type LeaseOptions struct {
|
||||||
|
Timeout uint
|
||||||
|
Origin string `header:"Origin"`
|
||||||
|
IfMatch string `header:"If-Match"`
|
||||||
|
IfNoneMatch string `header:"If-None-Match"`
|
||||||
|
IfModifiedSince *time.Time `header:"If-Modified-Since"`
|
||||||
|
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
|
||||||
|
RequestID string `header:"x-ms-client-request-id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// AcquireLease creates a lease for a blob
|
||||||
|
// returns leaseID acquired
|
||||||
|
// In API Versions starting on 2012-02-12, the minimum leaseTimeInSeconds is 15, the maximum
|
||||||
|
// non-infinite leaseTimeInSeconds is 60. To specify an infinite lease, provide the value -1.
|
||||||
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob
|
||||||
|
func (b *Blob) AcquireLease(leaseTimeInSeconds int, proposedLeaseID string, options *LeaseOptions) (returnedLeaseID string, err error) {
|
||||||
|
headers := b.Container.bsc.client.getStandardHeaders()
|
||||||
|
headers[leaseAction] = acquireLease
|
||||||
|
|
||||||
|
if leaseTimeInSeconds == -1 {
|
||||||
|
// Do nothing, but don't trigger the following clauses.
|
||||||
|
} else if leaseTimeInSeconds > 60 || b.Container.bsc.client.apiVersion < "2012-02-12" {
|
||||||
|
leaseTimeInSeconds = 60
|
||||||
|
} else if leaseTimeInSeconds < 15 {
|
||||||
|
leaseTimeInSeconds = 15
|
||||||
|
}
|
||||||
|
|
||||||
|
headers[leaseDuration] = strconv.Itoa(leaseTimeInSeconds)
|
||||||
|
|
||||||
|
if proposedLeaseID != "" {
|
||||||
|
headers[leaseProposedID] = proposedLeaseID
|
||||||
|
}
|
||||||
|
|
||||||
|
respHeaders, err := b.leaseCommonPut(headers, http.StatusCreated, options)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
returnedLeaseID = respHeaders.Get(http.CanonicalHeaderKey(headerLeaseID))
|
||||||
|
|
||||||
|
if returnedLeaseID != "" {
|
||||||
|
return returnedLeaseID, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return "", errors.New("LeaseID not returned")
|
||||||
|
}
|
||||||
|
|
||||||
|
// BreakLease breaks the lease for a blob
|
||||||
|
// Returns the timeout remaining in the lease in seconds
|
||||||
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob
|
||||||
|
func (b *Blob) BreakLease(options *LeaseOptions) (breakTimeout int, err error) {
|
||||||
|
headers := b.Container.bsc.client.getStandardHeaders()
|
||||||
|
headers[leaseAction] = breakLease
|
||||||
|
return b.breakLeaseCommon(headers, options)
|
||||||
|
}
|
||||||
|
|
||||||
|
// BreakLeaseWithBreakPeriod breaks the lease for a blob
|
||||||
|
// breakPeriodInSeconds is used to determine how long until new lease can be created.
|
||||||
|
// Returns the timeout remaining in the lease in seconds
|
||||||
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob
|
||||||
|
func (b *Blob) BreakLeaseWithBreakPeriod(breakPeriodInSeconds int, options *LeaseOptions) (breakTimeout int, err error) {
|
||||||
|
headers := b.Container.bsc.client.getStandardHeaders()
|
||||||
|
headers[leaseAction] = breakLease
|
||||||
|
headers[leaseBreakPeriod] = strconv.Itoa(breakPeriodInSeconds)
|
||||||
|
return b.breakLeaseCommon(headers, options)
|
||||||
|
}
|
||||||
|
|
||||||
|
// breakLeaseCommon is common code for both version of BreakLease (with and without break period)
|
||||||
|
func (b *Blob) breakLeaseCommon(headers map[string]string, options *LeaseOptions) (breakTimeout int, err error) {
|
||||||
|
|
||||||
|
respHeaders, err := b.leaseCommonPut(headers, http.StatusAccepted, options)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
breakTimeoutStr := respHeaders.Get(http.CanonicalHeaderKey(leaseTime))
|
||||||
|
if breakTimeoutStr != "" {
|
||||||
|
breakTimeout, err = strconv.Atoi(breakTimeoutStr)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return breakTimeout, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChangeLease changes a lease ID for a blob
|
||||||
|
// Returns the new LeaseID acquired
|
||||||
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob
|
||||||
|
func (b *Blob) ChangeLease(currentLeaseID string, proposedLeaseID string, options *LeaseOptions) (newLeaseID string, err error) {
|
||||||
|
headers := b.Container.bsc.client.getStandardHeaders()
|
||||||
|
headers[leaseAction] = changeLease
|
||||||
|
headers[headerLeaseID] = currentLeaseID
|
||||||
|
headers[leaseProposedID] = proposedLeaseID
|
||||||
|
|
||||||
|
respHeaders, err := b.leaseCommonPut(headers, http.StatusOK, options)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
newLeaseID = respHeaders.Get(http.CanonicalHeaderKey(headerLeaseID))
|
||||||
|
if newLeaseID != "" {
|
||||||
|
return newLeaseID, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return "", errors.New("LeaseID not returned")
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReleaseLease releases the lease for a blob
|
||||||
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob
|
||||||
|
func (b *Blob) ReleaseLease(currentLeaseID string, options *LeaseOptions) error {
|
||||||
|
headers := b.Container.bsc.client.getStandardHeaders()
|
||||||
|
headers[leaseAction] = releaseLease
|
||||||
|
headers[headerLeaseID] = currentLeaseID
|
||||||
|
|
||||||
|
_, err := b.leaseCommonPut(headers, http.StatusOK, options)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// RenewLease renews the lease for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691972.aspx
|
||||||
|
func (b *Blob) RenewLease(currentLeaseID string, options *LeaseOptions) error {
|
||||||
|
headers := b.Container.bsc.client.getStandardHeaders()
|
||||||
|
headers[leaseAction] = renewLease
|
||||||
|
headers[headerLeaseID] = currentLeaseID
|
||||||
|
|
||||||
|
_, err := b.leaseCommonPut(headers, http.StatusOK, options)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -0,0 +1,153 @@
|
||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/xml"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strconv"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Message represents an Azure message.
|
||||||
|
type Message struct {
|
||||||
|
Queue *Queue
|
||||||
|
Text string `xml:"MessageText"`
|
||||||
|
ID string `xml:"MessageId"`
|
||||||
|
Insertion TimeRFC1123 `xml:"InsertionTime"`
|
||||||
|
Expiration TimeRFC1123 `xml:"ExpirationTime"`
|
||||||
|
PopReceipt string `xml:"PopReceipt"`
|
||||||
|
NextVisible TimeRFC1123 `xml:"TimeNextVisible"`
|
||||||
|
DequeueCount int `xml:"DequeueCount"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Message) buildPath() string {
|
||||||
|
return fmt.Sprintf("%s/%s", m.Queue.buildPathMessages(), m.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PutMessageOptions is the set of options can be specified for Put Messsage
|
||||||
|
// operation. A zero struct does not use any preferences for the request.
|
||||||
|
type PutMessageOptions struct {
|
||||||
|
Timeout uint
|
||||||
|
VisibilityTimeout int
|
||||||
|
MessageTTL int
|
||||||
|
RequestID string `header:"x-ms-client-request-id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Put operation adds a new message to the back of the message queue.
|
||||||
|
//
|
||||||
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Message
|
||||||
|
func (m *Message) Put(options *PutMessageOptions) error {
|
||||||
|
query := url.Values{}
|
||||||
|
headers := m.Queue.qsc.client.getStandardHeaders()
|
||||||
|
|
||||||
|
req := putMessageRequest{MessageText: m.Text}
|
||||||
|
body, nn, err := xmlMarshal(req)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
headers["Content-Length"] = strconv.Itoa(nn)
|
||||||
|
|
||||||
|
if options != nil {
|
||||||
|
if options.VisibilityTimeout != 0 {
|
||||||
|
query.Set("visibilitytimeout", strconv.Itoa(options.VisibilityTimeout))
|
||||||
|
}
|
||||||
|
if options.MessageTTL != 0 {
|
||||||
|
query.Set("messagettl", strconv.Itoa(options.MessageTTL))
|
||||||
|
}
|
||||||
|
query = addTimeout(query, options.Timeout)
|
||||||
|
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||||
|
}
|
||||||
|
|
||||||
|
uri := m.Queue.qsc.client.getEndpoint(queueServiceName, m.Queue.buildPathMessages(), query)
|
||||||
|
resp, err := m.Queue.qsc.client.exec(http.MethodPost, uri, headers, body, m.Queue.qsc.auth)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer readAndCloseBody(resp.body)
|
||||||
|
|
||||||
|
err = xmlUnmarshal(resp.body, m)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return checkRespCode(resp.statusCode, []int{http.StatusCreated})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateMessageOptions is the set of options can be specified for Update Messsage
|
||||||
|
// operation. A zero struct does not use any preferences for the request.
|
||||||
|
type UpdateMessageOptions struct {
|
||||||
|
Timeout uint
|
||||||
|
VisibilityTimeout int
|
||||||
|
RequestID string `header:"x-ms-client-request-id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update operation updates the specified message.
|
||||||
|
//
|
||||||
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Update-Message
|
||||||
|
func (m *Message) Update(options *UpdateMessageOptions) error {
|
||||||
|
query := url.Values{}
|
||||||
|
if m.PopReceipt != "" {
|
||||||
|
query.Set("popreceipt", m.PopReceipt)
|
||||||
|
}
|
||||||
|
|
||||||
|
headers := m.Queue.qsc.client.getStandardHeaders()
|
||||||
|
req := putMessageRequest{MessageText: m.Text}
|
||||||
|
body, nn, err := xmlMarshal(req)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
headers["Content-Length"] = strconv.Itoa(nn)
|
||||||
|
|
||||||
|
if options != nil {
|
||||||
|
if options.VisibilityTimeout != 0 {
|
||||||
|
query.Set("visibilitytimeout", strconv.Itoa(options.VisibilityTimeout))
|
||||||
|
}
|
||||||
|
query = addTimeout(query, options.Timeout)
|
||||||
|
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||||
|
}
|
||||||
|
uri := m.Queue.qsc.client.getEndpoint(queueServiceName, m.buildPath(), query)
|
||||||
|
|
||||||
|
resp, err := m.Queue.qsc.client.exec(http.MethodPut, uri, headers, body, m.Queue.qsc.auth)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer readAndCloseBody(resp.body)
|
||||||
|
|
||||||
|
m.PopReceipt = resp.headers.Get("x-ms-popreceipt")
|
||||||
|
nextTimeStr := resp.headers.Get("x-ms-time-next-visible")
|
||||||
|
if nextTimeStr != "" {
|
||||||
|
nextTime, err := time.Parse(time.RFC1123, nextTimeStr)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
m.NextVisible = TimeRFC1123(nextTime)
|
||||||
|
}
|
||||||
|
|
||||||
|
return checkRespCode(resp.statusCode, []int{http.StatusNoContent})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete operation deletes the specified message.
|
||||||
|
//
|
||||||
|
// See https://msdn.microsoft.com/en-us/library/azure/dd179347.aspx
|
||||||
|
func (m *Message) Delete(options *QueueServiceOptions) error {
|
||||||
|
params := url.Values{"popreceipt": {m.PopReceipt}}
|
||||||
|
headers := m.Queue.qsc.client.getStandardHeaders()
|
||||||
|
|
||||||
|
if options != nil {
|
||||||
|
params = addTimeout(params, options.Timeout)
|
||||||
|
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||||
|
}
|
||||||
|
uri := m.Queue.qsc.client.getEndpoint(queueServiceName, m.buildPath(), params)
|
||||||
|
|
||||||
|
resp, err := m.Queue.qsc.client.exec(http.MethodDelete, uri, headers, nil, m.Queue.qsc.auth)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
readAndCloseBody(resp.body)
|
||||||
|
return checkRespCode(resp.statusCode, []int{http.StatusNoContent})
|
||||||
|
}
|
||||||
|
|
||||||
|
type putMessageRequest struct {
|
||||||
|
XMLName xml.Name `xml:"QueueMessage"`
|
||||||
|
MessageText string `xml:"MessageText"`
|
||||||
|
}
|
|
@ -0,0 +1,33 @@
|
||||||
|
package storage
|
||||||
|
|
||||||
|
// MetadataLevel determines if operations should return a paylod,
|
||||||
|
// and it level of detail.
|
||||||
|
type MetadataLevel string
|
||||||
|
|
||||||
|
// This consts are meant to help with Odata supported operations
|
||||||
|
const (
|
||||||
|
OdataTypeSuffix = "@odata.type"
|
||||||
|
|
||||||
|
// Types
|
||||||
|
|
||||||
|
OdataBinary = "Edm.Binary"
|
||||||
|
OdataDateTime = "Edm.DateTime"
|
||||||
|
OdataGUID = "Edm.Guid"
|
||||||
|
OdataInt64 = "Edm.Int64"
|
||||||
|
|
||||||
|
// Query options
|
||||||
|
|
||||||
|
OdataFilter = "$filter"
|
||||||
|
OdataOrderBy = "$orderby"
|
||||||
|
OdataTop = "$top"
|
||||||
|
OdataSkip = "$skip"
|
||||||
|
OdataCount = "$count"
|
||||||
|
OdataExpand = "$expand"
|
||||||
|
OdataSelect = "$select"
|
||||||
|
OdataSearch = "$search"
|
||||||
|
|
||||||
|
EmptyPayload MetadataLevel = ""
|
||||||
|
NoMetadata MetadataLevel = "application/json;odata=nometadata"
|
||||||
|
MinimalMetadata MetadataLevel = "application/json;odata=minimalmetadata"
|
||||||
|
FullMetadata MetadataLevel = "application/json;odata=fullmetadata"
|
||||||
|
)
|
|
@ -0,0 +1,189 @@
|
||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/xml"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GetPageRangesResponse contains the response fields from
|
||||||
|
// Get Page Ranges call.
|
||||||
|
//
|
||||||
|
// See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx
|
||||||
|
type GetPageRangesResponse struct {
|
||||||
|
XMLName xml.Name `xml:"PageList"`
|
||||||
|
PageList []PageRange `xml:"PageRange"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// PageRange contains information about a page of a page blob from
|
||||||
|
// Get Pages Range call.
|
||||||
|
//
|
||||||
|
// See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx
|
||||||
|
type PageRange struct {
|
||||||
|
Start int64 `xml:"Start"`
|
||||||
|
End int64 `xml:"End"`
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
errBlobCopyAborted = errors.New("storage: blob copy is aborted")
|
||||||
|
errBlobCopyIDMismatch = errors.New("storage: blob copy id is a mismatch")
|
||||||
|
)
|
||||||
|
|
||||||
|
// PutPageOptions includes the options for a put page operation
|
||||||
|
type PutPageOptions struct {
|
||||||
|
Timeout uint
|
||||||
|
LeaseID string `header:"x-ms-lease-id"`
|
||||||
|
IfSequenceNumberLessThanOrEqualTo *int `header:"x-ms-if-sequence-number-le"`
|
||||||
|
IfSequenceNumberLessThan *int `header:"x-ms-if-sequence-number-lt"`
|
||||||
|
IfSequenceNumberEqualTo *int `header:"x-ms-if-sequence-number-eq"`
|
||||||
|
IfModifiedSince *time.Time `header:"If-Modified-Since"`
|
||||||
|
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
|
||||||
|
IfMatch string `header:"If-Match"`
|
||||||
|
IfNoneMatch string `header:"If-None-Match"`
|
||||||
|
RequestID string `header:"x-ms-client-request-id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteRange writes a range of pages to a page blob.
|
||||||
|
// Ranges must be aligned with 512-byte boundaries and chunk must be of size
|
||||||
|
// multiplies by 512.
|
||||||
|
//
|
||||||
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Page
|
||||||
|
func (b *Blob) WriteRange(blobRange BlobRange, bytes io.Reader, options *PutPageOptions) error {
|
||||||
|
if bytes == nil {
|
||||||
|
return errors.New("bytes cannot be nil")
|
||||||
|
}
|
||||||
|
return b.modifyRange(blobRange, bytes, options)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearRange clears the given range in a page blob.
|
||||||
|
// Ranges must be aligned with 512-byte boundaries and chunk must be of size
|
||||||
|
// multiplies by 512.
|
||||||
|
//
|
||||||
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Page
|
||||||
|
func (b *Blob) ClearRange(blobRange BlobRange, options *PutPageOptions) error {
|
||||||
|
return b.modifyRange(blobRange, nil, options)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Blob) modifyRange(blobRange BlobRange, bytes io.Reader, options *PutPageOptions) error {
|
||||||
|
if blobRange.End < blobRange.Start {
|
||||||
|
return errors.New("the value for rangeEnd must be greater than or equal to rangeStart")
|
||||||
|
}
|
||||||
|
if blobRange.Start%512 != 0 {
|
||||||
|
return errors.New("the value for rangeStart must be a modulus of 512")
|
||||||
|
}
|
||||||
|
if blobRange.End%512 != 511 {
|
||||||
|
return errors.New("the value for rangeEnd must be a modulus of 511")
|
||||||
|
}
|
||||||
|
|
||||||
|
params := url.Values{"comp": {"page"}}
|
||||||
|
|
||||||
|
// default to clear
|
||||||
|
write := "clear"
|
||||||
|
var cl uint64
|
||||||
|
|
||||||
|
// if bytes is not nil then this is an update operation
|
||||||
|
if bytes != nil {
|
||||||
|
write = "update"
|
||||||
|
cl = (blobRange.End - blobRange.Start) + 1
|
||||||
|
}
|
||||||
|
|
||||||
|
headers := b.Container.bsc.client.getStandardHeaders()
|
||||||
|
headers["x-ms-blob-type"] = string(BlobTypePage)
|
||||||
|
headers["x-ms-page-write"] = write
|
||||||
|
headers["x-ms-range"] = blobRange.String()
|
||||||
|
headers["Content-Length"] = fmt.Sprintf("%v", cl)
|
||||||
|
|
||||||
|
if options != nil {
|
||||||
|
params = addTimeout(params, options.Timeout)
|
||||||
|
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||||
|
}
|
||||||
|
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
|
||||||
|
|
||||||
|
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, bytes, b.Container.bsc.auth)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
readAndCloseBody(resp.body)
|
||||||
|
|
||||||
|
return checkRespCode(resp.statusCode, []int{http.StatusCreated})
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetPageRangesOptions includes the options for a get page ranges operation
|
||||||
|
type GetPageRangesOptions struct {
|
||||||
|
Timeout uint
|
||||||
|
Snapshot *time.Time
|
||||||
|
PreviousSnapshot *time.Time
|
||||||
|
Range *BlobRange
|
||||||
|
LeaseID string `header:"x-ms-lease-id"`
|
||||||
|
RequestID string `header:"x-ms-client-request-id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetPageRanges returns the list of valid page ranges for a page blob.
|
||||||
|
//
|
||||||
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Page-Ranges
|
||||||
|
func (b *Blob) GetPageRanges(options *GetPageRangesOptions) (GetPageRangesResponse, error) {
|
||||||
|
params := url.Values{"comp": {"pagelist"}}
|
||||||
|
headers := b.Container.bsc.client.getStandardHeaders()
|
||||||
|
|
||||||
|
if options != nil {
|
||||||
|
params = addTimeout(params, options.Timeout)
|
||||||
|
params = addSnapshot(params, options.Snapshot)
|
||||||
|
if options.PreviousSnapshot != nil {
|
||||||
|
params.Add("prevsnapshot", timeRfc1123Formatted(*options.PreviousSnapshot))
|
||||||
|
}
|
||||||
|
if options.Range != nil {
|
||||||
|
headers["Range"] = options.Range.String()
|
||||||
|
}
|
||||||
|
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||||
|
}
|
||||||
|
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
|
||||||
|
|
||||||
|
var out GetPageRangesResponse
|
||||||
|
resp, err := b.Container.bsc.client.exec(http.MethodGet, uri, headers, nil, b.Container.bsc.auth)
|
||||||
|
if err != nil {
|
||||||
|
return out, err
|
||||||
|
}
|
||||||
|
defer resp.body.Close()
|
||||||
|
|
||||||
|
if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
|
||||||
|
return out, err
|
||||||
|
}
|
||||||
|
err = xmlUnmarshal(resp.body, &out)
|
||||||
|
return out, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// PutPageBlob initializes an empty page blob with specified name and maximum
|
||||||
|
// size in bytes (size must be aligned to a 512-byte boundary). A page blob must
|
||||||
|
// be created using this method before writing pages.
|
||||||
|
//
|
||||||
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Blob
|
||||||
|
func (b *Blob) PutPageBlob(options *PutBlobOptions) error {
|
||||||
|
if b.Properties.ContentLength%512 != 0 {
|
||||||
|
return errors.New("Content length must be aligned to a 512-byte boundary")
|
||||||
|
}
|
||||||
|
|
||||||
|
params := url.Values{}
|
||||||
|
headers := b.Container.bsc.client.getStandardHeaders()
|
||||||
|
headers["x-ms-blob-type"] = string(BlobTypePage)
|
||||||
|
headers["x-ms-blob-content-length"] = fmt.Sprintf("%v", b.Properties.ContentLength)
|
||||||
|
headers["x-ms-blob-sequence-number"] = fmt.Sprintf("%v", b.Properties.SequenceNumber)
|
||||||
|
headers = mergeHeaders(headers, headersFromStruct(b.Properties))
|
||||||
|
headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata)
|
||||||
|
|
||||||
|
if options != nil {
|
||||||
|
params = addTimeout(params, options.Timeout)
|
||||||
|
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||||
|
}
|
||||||
|
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
|
||||||
|
|
||||||
|
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
readAndCloseBody(resp.body)
|
||||||
|
return checkRespCode(resp.statusCode, []int{http.StatusCreated})
|
||||||
|
}
|
|
@ -0,0 +1,427 @@
|
||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/xml"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strconv"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// casing is per Golang's http.Header canonicalizing the header names.
|
||||||
|
approximateMessagesCountHeader = "X-Ms-Approximate-Messages-Count"
|
||||||
|
)
|
||||||
|
|
||||||
|
// QueueAccessPolicy represents each access policy in the queue ACL.
|
||||||
|
type QueueAccessPolicy struct {
|
||||||
|
ID string
|
||||||
|
StartTime time.Time
|
||||||
|
ExpiryTime time.Time
|
||||||
|
CanRead bool
|
||||||
|
CanAdd bool
|
||||||
|
CanUpdate bool
|
||||||
|
CanProcess bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueuePermissions represents the queue ACLs.
|
||||||
|
type QueuePermissions struct {
|
||||||
|
AccessPolicies []QueueAccessPolicy
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetQueuePermissionOptions includes options for a set queue permissions operation
|
||||||
|
type SetQueuePermissionOptions struct {
|
||||||
|
Timeout uint
|
||||||
|
RequestID string `header:"x-ms-client-request-id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Queue represents an Azure queue.
|
||||||
|
type Queue struct {
|
||||||
|
qsc *QueueServiceClient
|
||||||
|
Name string
|
||||||
|
Metadata map[string]string
|
||||||
|
AproxMessageCount uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *Queue) buildPath() string {
|
||||||
|
return fmt.Sprintf("/%s", q.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *Queue) buildPathMessages() string {
|
||||||
|
return fmt.Sprintf("%s/messages", q.buildPath())
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueueServiceOptions includes options for some queue service operations
|
||||||
|
type QueueServiceOptions struct {
|
||||||
|
Timeout uint
|
||||||
|
RequestID string `header:"x-ms-client-request-id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create operation creates a queue under the given account.
|
||||||
|
//
|
||||||
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Queue4
|
||||||
|
func (q *Queue) Create(options *QueueServiceOptions) error {
|
||||||
|
params := url.Values{}
|
||||||
|
headers := q.qsc.client.getStandardHeaders()
|
||||||
|
headers = q.qsc.client.addMetadataToHeaders(headers, q.Metadata)
|
||||||
|
|
||||||
|
if options != nil {
|
||||||
|
params = addTimeout(params, options.Timeout)
|
||||||
|
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||||
|
}
|
||||||
|
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params)
|
||||||
|
|
||||||
|
resp, err := q.qsc.client.exec(http.MethodPut, uri, headers, nil, q.qsc.auth)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
readAndCloseBody(resp.body)
|
||||||
|
return checkRespCode(resp.statusCode, []int{http.StatusCreated})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete operation permanently deletes the specified queue.
|
||||||
|
//
|
||||||
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Queue3
|
||||||
|
func (q *Queue) Delete(options *QueueServiceOptions) error {
|
||||||
|
params := url.Values{}
|
||||||
|
headers := q.qsc.client.getStandardHeaders()
|
||||||
|
|
||||||
|
if options != nil {
|
||||||
|
params = addTimeout(params, options.Timeout)
|
||||||
|
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||||
|
}
|
||||||
|
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params)
|
||||||
|
resp, err := q.qsc.client.exec(http.MethodDelete, uri, headers, nil, q.qsc.auth)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
readAndCloseBody(resp.body)
|
||||||
|
return checkRespCode(resp.statusCode, []int{http.StatusNoContent})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exists returns true if a queue with given name exists.
|
||||||
|
func (q *Queue) Exists() (bool, error) {
|
||||||
|
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), url.Values{"comp": {"metadata"}})
|
||||||
|
resp, err := q.qsc.client.exec(http.MethodGet, uri, q.qsc.client.getStandardHeaders(), nil, q.qsc.auth)
|
||||||
|
if resp != nil {
|
||||||
|
defer readAndCloseBody(resp.body)
|
||||||
|
if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound {
|
||||||
|
return resp.statusCode == http.StatusOK, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetMetadata operation sets user-defined metadata on the specified queue.
|
||||||
|
// Metadata is associated with the queue as name-value pairs.
|
||||||
|
//
|
||||||
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Queue-Metadata
|
||||||
|
func (q *Queue) SetMetadata(options *QueueServiceOptions) error {
|
||||||
|
params := url.Values{"comp": {"metadata"}}
|
||||||
|
headers := q.qsc.client.getStandardHeaders()
|
||||||
|
headers = q.qsc.client.addMetadataToHeaders(headers, q.Metadata)
|
||||||
|
|
||||||
|
if options != nil {
|
||||||
|
params = addTimeout(params, options.Timeout)
|
||||||
|
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||||
|
}
|
||||||
|
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params)
|
||||||
|
|
||||||
|
resp, err := q.qsc.client.exec(http.MethodPut, uri, headers, nil, q.qsc.auth)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
readAndCloseBody(resp.body)
|
||||||
|
return checkRespCode(resp.statusCode, []int{http.StatusNoContent})
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetMetadata operation retrieves user-defined metadata and queue
|
||||||
|
// properties on the specified queue. Metadata is associated with
|
||||||
|
// the queue as name-values pairs.
|
||||||
|
//
|
||||||
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Queue-Metadata
|
||||||
|
//
|
||||||
|
// Because the way Golang's http client (and http.Header in particular)
|
||||||
|
// canonicalize header names, the returned metadata names would always
|
||||||
|
// be all lower case.
|
||||||
|
func (q *Queue) GetMetadata(options *QueueServiceOptions) error {
|
||||||
|
params := url.Values{"comp": {"metadata"}}
|
||||||
|
headers := q.qsc.client.getStandardHeaders()
|
||||||
|
|
||||||
|
if options != nil {
|
||||||
|
params = addTimeout(params, options.Timeout)
|
||||||
|
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||||
|
}
|
||||||
|
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), url.Values{"comp": {"metadata"}})
|
||||||
|
|
||||||
|
resp, err := q.qsc.client.exec(http.MethodGet, uri, headers, nil, q.qsc.auth)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer readAndCloseBody(resp.body)
|
||||||
|
|
||||||
|
if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
aproxMessagesStr := resp.headers.Get(http.CanonicalHeaderKey(approximateMessagesCountHeader))
|
||||||
|
if aproxMessagesStr != "" {
|
||||||
|
aproxMessages, err := strconv.ParseUint(aproxMessagesStr, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
q.AproxMessageCount = aproxMessages
|
||||||
|
}
|
||||||
|
|
||||||
|
q.Metadata = getMetadataFromHeaders(resp.headers)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetMessageReference returns a message object with the specified text.
|
||||||
|
func (q *Queue) GetMessageReference(text string) *Message {
|
||||||
|
return &Message{
|
||||||
|
Queue: q,
|
||||||
|
Text: text,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetMessagesOptions is the set of options can be specified for Get
|
||||||
|
// Messsages operation. A zero struct does not use any preferences for the
|
||||||
|
// request.
|
||||||
|
type GetMessagesOptions struct {
|
||||||
|
Timeout uint
|
||||||
|
NumOfMessages int
|
||||||
|
VisibilityTimeout int
|
||||||
|
RequestID string `header:"x-ms-client-request-id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type messages struct {
|
||||||
|
XMLName xml.Name `xml:"QueueMessagesList"`
|
||||||
|
Messages []Message `xml:"QueueMessage"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetMessages operation retrieves one or more messages from the front of the
|
||||||
|
// queue.
|
||||||
|
//
|
||||||
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Messages
|
||||||
|
func (q *Queue) GetMessages(options *GetMessagesOptions) ([]Message, error) {
|
||||||
|
query := url.Values{}
|
||||||
|
headers := q.qsc.client.getStandardHeaders()
|
||||||
|
|
||||||
|
if options != nil {
|
||||||
|
if options.NumOfMessages != 0 {
|
||||||
|
query.Set("numofmessages", strconv.Itoa(options.NumOfMessages))
|
||||||
|
}
|
||||||
|
if options.VisibilityTimeout != 0 {
|
||||||
|
query.Set("visibilitytimeout", strconv.Itoa(options.VisibilityTimeout))
|
||||||
|
}
|
||||||
|
query = addTimeout(query, options.Timeout)
|
||||||
|
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||||
|
}
|
||||||
|
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPathMessages(), query)
|
||||||
|
|
||||||
|
resp, err := q.qsc.client.exec(http.MethodGet, uri, headers, nil, q.qsc.auth)
|
||||||
|
if err != nil {
|
||||||
|
return []Message{}, err
|
||||||
|
}
|
||||||
|
defer readAndCloseBody(resp.body)
|
||||||
|
|
||||||
|
var out messages
|
||||||
|
err = xmlUnmarshal(resp.body, &out)
|
||||||
|
if err != nil {
|
||||||
|
return []Message{}, err
|
||||||
|
}
|
||||||
|
for i := range out.Messages {
|
||||||
|
out.Messages[i].Queue = q
|
||||||
|
}
|
||||||
|
return out.Messages, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// PeekMessagesOptions is the set of options can be specified for Peek
|
||||||
|
// Messsage operation. A zero struct does not use any preferences for the
|
||||||
|
// request.
|
||||||
|
type PeekMessagesOptions struct {
|
||||||
|
Timeout uint
|
||||||
|
NumOfMessages int
|
||||||
|
RequestID string `header:"x-ms-client-request-id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// PeekMessages retrieves one or more messages from the front of the queue, but
|
||||||
|
// does not alter the visibility of the message.
|
||||||
|
//
|
||||||
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Peek-Messages
|
||||||
|
func (q *Queue) PeekMessages(options *PeekMessagesOptions) ([]Message, error) {
|
||||||
|
query := url.Values{"peekonly": {"true"}} // Required for peek operation
|
||||||
|
headers := q.qsc.client.getStandardHeaders()
|
||||||
|
|
||||||
|
if options != nil {
|
||||||
|
if options.NumOfMessages != 0 {
|
||||||
|
query.Set("numofmessages", strconv.Itoa(options.NumOfMessages))
|
||||||
|
}
|
||||||
|
query = addTimeout(query, options.Timeout)
|
||||||
|
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||||
|
}
|
||||||
|
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPathMessages(), query)
|
||||||
|
|
||||||
|
resp, err := q.qsc.client.exec(http.MethodGet, uri, headers, nil, q.qsc.auth)
|
||||||
|
if err != nil {
|
||||||
|
return []Message{}, err
|
||||||
|
}
|
||||||
|
defer readAndCloseBody(resp.body)
|
||||||
|
|
||||||
|
var out messages
|
||||||
|
err = xmlUnmarshal(resp.body, &out)
|
||||||
|
if err != nil {
|
||||||
|
return []Message{}, err
|
||||||
|
}
|
||||||
|
for i := range out.Messages {
|
||||||
|
out.Messages[i].Queue = q
|
||||||
|
}
|
||||||
|
return out.Messages, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearMessages operation deletes all messages from the specified queue.
|
||||||
|
//
|
||||||
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Clear-Messages
|
||||||
|
func (q *Queue) ClearMessages(options *QueueServiceOptions) error {
|
||||||
|
params := url.Values{}
|
||||||
|
headers := q.qsc.client.getStandardHeaders()
|
||||||
|
|
||||||
|
if options != nil {
|
||||||
|
params = addTimeout(params, options.Timeout)
|
||||||
|
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||||
|
}
|
||||||
|
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPathMessages(), params)
|
||||||
|
|
||||||
|
resp, err := q.qsc.client.exec(http.MethodDelete, uri, headers, nil, q.qsc.auth)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
readAndCloseBody(resp.body)
|
||||||
|
return checkRespCode(resp.statusCode, []int{http.StatusNoContent})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetPermissions sets up queue permissions
|
||||||
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-queue-acl
|
||||||
|
func (q *Queue) SetPermissions(permissions QueuePermissions, options *SetQueuePermissionOptions) error {
|
||||||
|
body, length, err := generateQueueACLpayload(permissions.AccessPolicies)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
params := url.Values{
|
||||||
|
"comp": {"acl"},
|
||||||
|
}
|
||||||
|
headers := q.qsc.client.getStandardHeaders()
|
||||||
|
headers["Content-Length"] = strconv.Itoa(length)
|
||||||
|
|
||||||
|
if options != nil {
|
||||||
|
params = addTimeout(params, options.Timeout)
|
||||||
|
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||||
|
}
|
||||||
|
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params)
|
||||||
|
resp, err := q.qsc.client.exec(http.MethodPut, uri, headers, body, q.qsc.auth)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer readAndCloseBody(resp.body)
|
||||||
|
|
||||||
|
if err := checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil {
|
||||||
|
return errors.New("Unable to set permissions")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func generateQueueACLpayload(policies []QueueAccessPolicy) (io.Reader, int, error) {
|
||||||
|
sil := SignedIdentifiers{
|
||||||
|
SignedIdentifiers: []SignedIdentifier{},
|
||||||
|
}
|
||||||
|
for _, qapd := range policies {
|
||||||
|
permission := qapd.generateQueuePermissions()
|
||||||
|
signedIdentifier := convertAccessPolicyToXMLStructs(qapd.ID, qapd.StartTime, qapd.ExpiryTime, permission)
|
||||||
|
sil.SignedIdentifiers = append(sil.SignedIdentifiers, signedIdentifier)
|
||||||
|
}
|
||||||
|
return xmlMarshal(sil)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (qapd *QueueAccessPolicy) generateQueuePermissions() (permissions string) {
|
||||||
|
// generate the permissions string (raup).
|
||||||
|
// still want the end user API to have bool flags.
|
||||||
|
permissions = ""
|
||||||
|
|
||||||
|
if qapd.CanRead {
|
||||||
|
permissions += "r"
|
||||||
|
}
|
||||||
|
|
||||||
|
if qapd.CanAdd {
|
||||||
|
permissions += "a"
|
||||||
|
}
|
||||||
|
|
||||||
|
if qapd.CanUpdate {
|
||||||
|
permissions += "u"
|
||||||
|
}
|
||||||
|
|
||||||
|
if qapd.CanProcess {
|
||||||
|
permissions += "p"
|
||||||
|
}
|
||||||
|
|
||||||
|
return permissions
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetQueuePermissionOptions includes options for a get queue permissions operation
|
||||||
|
type GetQueuePermissionOptions struct {
|
||||||
|
Timeout uint
|
||||||
|
RequestID string `header:"x-ms-client-request-id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetPermissions gets the queue permissions as per https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-queue-acl
|
||||||
|
// If timeout is 0 then it will not be passed to Azure
|
||||||
|
func (q *Queue) GetPermissions(options *GetQueuePermissionOptions) (*QueuePermissions, error) {
|
||||||
|
params := url.Values{
|
||||||
|
"comp": {"acl"},
|
||||||
|
}
|
||||||
|
headers := q.qsc.client.getStandardHeaders()
|
||||||
|
|
||||||
|
if options != nil {
|
||||||
|
params = addTimeout(params, options.Timeout)
|
||||||
|
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||||
|
}
|
||||||
|
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params)
|
||||||
|
resp, err := q.qsc.client.exec(http.MethodGet, uri, headers, nil, q.qsc.auth)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer resp.body.Close()
|
||||||
|
|
||||||
|
var ap AccessPolicy
|
||||||
|
err = xmlUnmarshal(resp.body, &ap.SignedIdentifiersList)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return buildQueueAccessPolicy(ap, &resp.headers), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildQueueAccessPolicy(ap AccessPolicy, headers *http.Header) *QueuePermissions {
|
||||||
|
permissions := QueuePermissions{
|
||||||
|
AccessPolicies: []QueueAccessPolicy{},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, policy := range ap.SignedIdentifiersList.SignedIdentifiers {
|
||||||
|
qapd := QueueAccessPolicy{
|
||||||
|
ID: policy.ID,
|
||||||
|
StartTime: policy.AccessPolicy.StartTime,
|
||||||
|
ExpiryTime: policy.AccessPolicy.ExpiryTime,
|
||||||
|
}
|
||||||
|
qapd.CanRead = updatePermissions(policy.AccessPolicy.Permission, "r")
|
||||||
|
qapd.CanAdd = updatePermissions(policy.AccessPolicy.Permission, "a")
|
||||||
|
qapd.CanUpdate = updatePermissions(policy.AccessPolicy.Permission, "u")
|
||||||
|
qapd.CanProcess = updatePermissions(policy.AccessPolicy.Permission, "p")
|
||||||
|
|
||||||
|
permissions.AccessPolicies = append(permissions.AccessPolicies, qapd)
|
||||||
|
}
|
||||||
|
return &permissions
|
||||||
|
}
|
|
@ -9,12 +9,20 @@ type QueueServiceClient struct {
|
||||||
|
|
||||||
// GetServiceProperties gets the properties of your storage account's queue service.
|
// GetServiceProperties gets the properties of your storage account's queue service.
|
||||||
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-queue-service-properties
|
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-queue-service-properties
|
||||||
func (c *QueueServiceClient) GetServiceProperties() (*ServiceProperties, error) {
|
func (q *QueueServiceClient) GetServiceProperties() (*ServiceProperties, error) {
|
||||||
return c.client.getServiceProperties(queueServiceName, c.auth)
|
return q.client.getServiceProperties(queueServiceName, q.auth)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetServiceProperties sets the properties of your storage account's queue service.
|
// SetServiceProperties sets the properties of your storage account's queue service.
|
||||||
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-queue-service-properties
|
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-queue-service-properties
|
||||||
func (c *QueueServiceClient) SetServiceProperties(props ServiceProperties) error {
|
func (q *QueueServiceClient) SetServiceProperties(props ServiceProperties) error {
|
||||||
return c.client.setServiceProperties(props, queueServiceName, c.auth)
|
return q.client.setServiceProperties(props, queueServiceName, q.auth)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetQueueReference returns a Container object for the specified queue name.
|
||||||
|
func (q *QueueServiceClient) GetQueueReference(name string) *Queue {
|
||||||
|
return &Queue{
|
||||||
|
qsc: q,
|
||||||
|
Name: name,
|
||||||
|
}
|
||||||
}
|
}
|
|
@ -30,9 +30,15 @@ func (s *Share) buildPath() string {
|
||||||
// Create this share under the associated account.
|
// Create this share under the associated account.
|
||||||
// If a share with the same name already exists, the operation fails.
|
// If a share with the same name already exists, the operation fails.
|
||||||
//
|
//
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dn167008.aspx
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Share
|
||||||
func (s *Share) Create() error {
|
func (s *Share) Create(options *FileRequestOptions) error {
|
||||||
headers, err := s.fsc.createResource(s.buildPath(), resourceShare, nil, mergeMDIntoExtraHeaders(s.Metadata, nil), []int{http.StatusCreated})
|
extraheaders := map[string]string{}
|
||||||
|
if s.Properties.Quota > 0 {
|
||||||
|
extraheaders["x-ms-share-quota"] = strconv.Itoa(s.Properties.Quota)
|
||||||
|
}
|
||||||
|
|
||||||
|
params := prepareOptions(options)
|
||||||
|
headers, err := s.fsc.createResource(s.buildPath(), resourceShare, params, mergeMDIntoExtraHeaders(s.Metadata, extraheaders), []int{http.StatusCreated})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -45,9 +51,15 @@ func (s *Share) Create() error {
|
||||||
// it does not exist. Returns true if the share is newly created or false if
|
// it does not exist. Returns true if the share is newly created or false if
|
||||||
// the share already exists.
|
// the share already exists.
|
||||||
//
|
//
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dn167008.aspx
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Share
|
||||||
func (s *Share) CreateIfNotExists() (bool, error) {
|
func (s *Share) CreateIfNotExists(options *FileRequestOptions) (bool, error) {
|
||||||
resp, err := s.fsc.createResourceNoClose(s.buildPath(), resourceShare, nil, nil)
|
extraheaders := map[string]string{}
|
||||||
|
if s.Properties.Quota > 0 {
|
||||||
|
extraheaders["x-ms-share-quota"] = strconv.Itoa(s.Properties.Quota)
|
||||||
|
}
|
||||||
|
|
||||||
|
params := prepareOptions(options)
|
||||||
|
resp, err := s.fsc.createResourceNoClose(s.buildPath(), resourceShare, params, extraheaders)
|
||||||
if resp != nil {
|
if resp != nil {
|
||||||
defer readAndCloseBody(resp.body)
|
defer readAndCloseBody(resp.body)
|
||||||
if resp.statusCode == http.StatusCreated || resp.statusCode == http.StatusConflict {
|
if resp.statusCode == http.StatusCreated || resp.statusCode == http.StatusConflict {
|
||||||
|
@ -55,7 +67,7 @@ func (s *Share) CreateIfNotExists() (bool, error) {
|
||||||
s.updateEtagAndLastModified(resp.headers)
|
s.updateEtagAndLastModified(resp.headers)
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
return false, s.FetchAttributes()
|
return false, s.FetchAttributes(nil)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -66,16 +78,16 @@ func (s *Share) CreateIfNotExists() (bool, error) {
|
||||||
// and directories contained within it are later deleted during garbage
|
// and directories contained within it are later deleted during garbage
|
||||||
// collection. If the share does not exist the operation fails
|
// collection. If the share does not exist the operation fails
|
||||||
//
|
//
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dn689090.aspx
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Share
|
||||||
func (s *Share) Delete() error {
|
func (s *Share) Delete(options *FileRequestOptions) error {
|
||||||
return s.fsc.deleteResource(s.buildPath(), resourceShare)
|
return s.fsc.deleteResource(s.buildPath(), resourceShare, options)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteIfExists operation marks this share for deletion if it exists.
|
// DeleteIfExists operation marks this share for deletion if it exists.
|
||||||
//
|
//
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dn689090.aspx
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Share
|
||||||
func (s *Share) DeleteIfExists() (bool, error) {
|
func (s *Share) DeleteIfExists(options *FileRequestOptions) (bool, error) {
|
||||||
resp, err := s.fsc.deleteResourceNoClose(s.buildPath(), resourceShare)
|
resp, err := s.fsc.deleteResourceNoClose(s.buildPath(), resourceShare, options)
|
||||||
if resp != nil {
|
if resp != nil {
|
||||||
defer readAndCloseBody(resp.body)
|
defer readAndCloseBody(resp.body)
|
||||||
if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound {
|
if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound {
|
||||||
|
@ -97,8 +109,10 @@ func (s *Share) Exists() (bool, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// FetchAttributes retrieves metadata and properties for this share.
|
// FetchAttributes retrieves metadata and properties for this share.
|
||||||
func (s *Share) FetchAttributes() error {
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-share-properties
|
||||||
headers, err := s.fsc.getResourceHeaders(s.buildPath(), compNone, resourceShare, http.MethodHead)
|
func (s *Share) FetchAttributes(options *FileRequestOptions) error {
|
||||||
|
params := prepareOptions(options)
|
||||||
|
headers, err := s.fsc.getResourceHeaders(s.buildPath(), compNone, resourceShare, params, http.MethodHead)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -130,9 +144,9 @@ func (s *Share) ServiceClient() *FileServiceClient {
|
||||||
// are case-insensitive so case munging should not matter to other
|
// are case-insensitive so case munging should not matter to other
|
||||||
// applications either.
|
// applications either.
|
||||||
//
|
//
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-share-metadata
|
||||||
func (s *Share) SetMetadata() error {
|
func (s *Share) SetMetadata(options *FileRequestOptions) error {
|
||||||
headers, err := s.fsc.setResourceHeaders(s.buildPath(), compMetadata, resourceShare, mergeMDIntoExtraHeaders(s.Metadata, nil))
|
headers, err := s.fsc.setResourceHeaders(s.buildPath(), compMetadata, resourceShare, mergeMDIntoExtraHeaders(s.Metadata, nil), options)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -148,15 +162,17 @@ func (s *Share) SetMetadata() error {
|
||||||
// are case-insensitive so case munging should not matter to other
|
// are case-insensitive so case munging should not matter to other
|
||||||
// applications either.
|
// applications either.
|
||||||
//
|
//
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/mt427368.aspx
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Share-Properties
|
||||||
func (s *Share) SetProperties() error {
|
func (s *Share) SetProperties(options *FileRequestOptions) error {
|
||||||
if s.Properties.Quota < 1 || s.Properties.Quota > 5120 {
|
extraheaders := map[string]string{}
|
||||||
return fmt.Errorf("invalid value %v for quota, valid values are [1, 5120]", s.Properties.Quota)
|
if s.Properties.Quota > 0 {
|
||||||
|
if s.Properties.Quota > 5120 {
|
||||||
|
return fmt.Errorf("invalid value %v for quota, valid values are [1, 5120]", s.Properties.Quota)
|
||||||
|
}
|
||||||
|
extraheaders["x-ms-share-quota"] = strconv.Itoa(s.Properties.Quota)
|
||||||
}
|
}
|
||||||
|
|
||||||
headers, err := s.fsc.setResourceHeaders(s.buildPath(), compProperties, resourceShare, map[string]string{
|
headers, err := s.fsc.setResourceHeaders(s.buildPath(), compProperties, resourceShare, extraheaders, options)
|
||||||
"x-ms-share-quota": strconv.Itoa(s.Properties.Quota),
|
|
||||||
})
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
|
@ -1,9 +1,9 @@
|
||||||
package storage
|
package storage
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
"strconv"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ServiceProperties represents the storage account service properties
|
// ServiceProperties represents the storage account service properties
|
||||||
|
@ -106,13 +106,12 @@ func (c Client) setServiceProperties(props ServiceProperties, service string, au
|
||||||
}
|
}
|
||||||
|
|
||||||
headers := c.getStandardHeaders()
|
headers := c.getStandardHeaders()
|
||||||
headers["Content-Length"] = fmt.Sprintf("%v", length)
|
headers["Content-Length"] = strconv.Itoa(length)
|
||||||
|
|
||||||
resp, err := c.exec(http.MethodPut, uri, headers, body, auth)
|
resp, err := c.exec(http.MethodPut, uri, headers, body, auth)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer readAndCloseBody(resp.body)
|
readAndCloseBody(resp.body)
|
||||||
|
|
||||||
return checkRespCode(resp.statusCode, []int{http.StatusAccepted})
|
return checkRespCode(resp.statusCode, []int{http.StatusAccepted})
|
||||||
}
|
}
|
|
@ -0,0 +1,412 @@
|
||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
tablesURIPath = "/Tables"
|
||||||
|
nextTableQueryParameter = "NextTableName"
|
||||||
|
headerNextPartitionKey = "x-ms-continuation-NextPartitionKey"
|
||||||
|
headerNextRowKey = "x-ms-continuation-NextRowKey"
|
||||||
|
nextPartitionKeyQueryParameter = "NextPartitionKey"
|
||||||
|
nextRowKeyQueryParameter = "NextRowKey"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TableAccessPolicy are used for SETTING table policies
|
||||||
|
type TableAccessPolicy struct {
|
||||||
|
ID string
|
||||||
|
StartTime time.Time
|
||||||
|
ExpiryTime time.Time
|
||||||
|
CanRead bool
|
||||||
|
CanAppend bool
|
||||||
|
CanUpdate bool
|
||||||
|
CanDelete bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// Table represents an Azure table.
|
||||||
|
type Table struct {
|
||||||
|
tsc *TableServiceClient
|
||||||
|
Name string `json:"TableName"`
|
||||||
|
OdataEditLink string `json:"odata.editLink"`
|
||||||
|
OdataID string `json:"odata.id"`
|
||||||
|
OdataMetadata string `json:"odata.metadata"`
|
||||||
|
OdataType string `json:"odata.type"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// EntityQueryResult contains the response from
|
||||||
|
// ExecuteQuery and ExecuteQueryNextResults functions.
|
||||||
|
type EntityQueryResult struct {
|
||||||
|
OdataMetadata string `json:"odata.metadata"`
|
||||||
|
Entities []*Entity `json:"value"`
|
||||||
|
QueryNextLink
|
||||||
|
table *Table
|
||||||
|
}
|
||||||
|
|
||||||
|
type continuationToken struct {
|
||||||
|
NextPartitionKey string
|
||||||
|
NextRowKey string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Table) buildPath() string {
|
||||||
|
return fmt.Sprintf("/%s", t.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Table) buildSpecificPath() string {
|
||||||
|
return fmt.Sprintf("%s('%s')", tablesURIPath, t.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get gets the referenced table.
|
||||||
|
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/querying-tables-and-entities
|
||||||
|
func (t *Table) Get(timeout uint, ml MetadataLevel) error {
|
||||||
|
if ml == EmptyPayload {
|
||||||
|
return errEmptyPayload
|
||||||
|
}
|
||||||
|
|
||||||
|
query := url.Values{
|
||||||
|
"timeout": {strconv.FormatUint(uint64(timeout), 10)},
|
||||||
|
}
|
||||||
|
headers := t.tsc.client.getStandardHeaders()
|
||||||
|
headers[headerAccept] = string(ml)
|
||||||
|
|
||||||
|
uri := t.tsc.client.getEndpoint(tableServiceName, t.buildSpecificPath(), query)
|
||||||
|
resp, err := t.tsc.client.exec(http.MethodGet, uri, headers, nil, t.tsc.auth)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer readAndCloseBody(resp.body)
|
||||||
|
|
||||||
|
if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
respBody, err := ioutil.ReadAll(resp.body)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = json.Unmarshal(respBody, t)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create creates the referenced table.
|
||||||
|
// This function fails if the name is not compliant
|
||||||
|
// with the specification or the tables already exists.
|
||||||
|
// ml determines the level of detail of metadata in the operation response,
|
||||||
|
// or no data at all.
|
||||||
|
// See https://docs.microsoft.com/rest/api/storageservices/fileservices/create-table
|
||||||
|
func (t *Table) Create(timeout uint, ml MetadataLevel, options *TableOptions) error {
|
||||||
|
uri := t.tsc.client.getEndpoint(tableServiceName, tablesURIPath, url.Values{
|
||||||
|
"timeout": {strconv.FormatUint(uint64(timeout), 10)},
|
||||||
|
})
|
||||||
|
|
||||||
|
type createTableRequest struct {
|
||||||
|
TableName string `json:"TableName"`
|
||||||
|
}
|
||||||
|
req := createTableRequest{TableName: t.Name}
|
||||||
|
buf := new(bytes.Buffer)
|
||||||
|
if err := json.NewEncoder(buf).Encode(req); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
headers := t.tsc.client.getStandardHeaders()
|
||||||
|
headers = addReturnContentHeaders(headers, ml)
|
||||||
|
headers = addBodyRelatedHeaders(headers, buf.Len())
|
||||||
|
headers = options.addToHeaders(headers)
|
||||||
|
|
||||||
|
resp, err := t.tsc.client.exec(http.MethodPost, uri, headers, buf, t.tsc.auth)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer readAndCloseBody(resp.body)
|
||||||
|
|
||||||
|
if ml == EmptyPayload {
|
||||||
|
if err := checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if err := checkRespCode(resp.statusCode, []int{http.StatusCreated}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if ml != EmptyPayload {
|
||||||
|
data, err := ioutil.ReadAll(resp.body)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = json.Unmarshal(data, t)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete deletes the referenced table.
|
||||||
|
// This function fails if the table is not present.
|
||||||
|
// Be advised: Delete deletes all the entries that may be present.
|
||||||
|
// See https://docs.microsoft.com/rest/api/storageservices/fileservices/delete-table
|
||||||
|
func (t *Table) Delete(timeout uint, options *TableOptions) error {
|
||||||
|
uri := t.tsc.client.getEndpoint(tableServiceName, t.buildSpecificPath(), url.Values{
|
||||||
|
"timeout": {strconv.Itoa(int(timeout))},
|
||||||
|
})
|
||||||
|
|
||||||
|
headers := t.tsc.client.getStandardHeaders()
|
||||||
|
headers = addReturnContentHeaders(headers, EmptyPayload)
|
||||||
|
headers = options.addToHeaders(headers)
|
||||||
|
|
||||||
|
resp, err := t.tsc.client.exec(http.MethodDelete, uri, headers, nil, t.tsc.auth)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer readAndCloseBody(resp.body)
|
||||||
|
|
||||||
|
if err := checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil {
|
||||||
|
return err
|
||||||
|
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryOptions includes options for a query entities operation.
|
||||||
|
// Top, filter and select are OData query options.
|
||||||
|
type QueryOptions struct {
|
||||||
|
Top uint
|
||||||
|
Filter string
|
||||||
|
Select []string
|
||||||
|
RequestID string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (options *QueryOptions) getParameters() (url.Values, map[string]string) {
|
||||||
|
query := url.Values{}
|
||||||
|
headers := map[string]string{}
|
||||||
|
if options != nil {
|
||||||
|
if options.Top > 0 {
|
||||||
|
query.Add(OdataTop, strconv.FormatUint(uint64(options.Top), 10))
|
||||||
|
}
|
||||||
|
if options.Filter != "" {
|
||||||
|
query.Add(OdataFilter, options.Filter)
|
||||||
|
}
|
||||||
|
if len(options.Select) > 0 {
|
||||||
|
query.Add(OdataSelect, strings.Join(options.Select, ","))
|
||||||
|
}
|
||||||
|
headers = addToHeaders(headers, "x-ms-client-request-id", options.RequestID)
|
||||||
|
}
|
||||||
|
return query, headers
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryEntities returns the entities in the table.
|
||||||
|
// You can use query options defined by the OData Protocol specification.
|
||||||
|
//
|
||||||
|
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/query-entities
|
||||||
|
func (t *Table) QueryEntities(timeout uint, ml MetadataLevel, options *QueryOptions) (*EntityQueryResult, error) {
|
||||||
|
if ml == EmptyPayload {
|
||||||
|
return nil, errEmptyPayload
|
||||||
|
}
|
||||||
|
query, headers := options.getParameters()
|
||||||
|
query = addTimeout(query, timeout)
|
||||||
|
uri := t.tsc.client.getEndpoint(tableServiceName, t.buildPath(), query)
|
||||||
|
return t.queryEntities(uri, headers, ml)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NextResults returns the next page of results
|
||||||
|
// from a QueryEntities or NextResults operation.
|
||||||
|
//
|
||||||
|
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/query-entities
|
||||||
|
// See https://docs.microsoft.com/rest/api/storageservices/fileservices/query-timeout-and-pagination
|
||||||
|
func (eqr *EntityQueryResult) NextResults(options *TableOptions) (*EntityQueryResult, error) {
|
||||||
|
if eqr == nil {
|
||||||
|
return nil, errNilPreviousResult
|
||||||
|
}
|
||||||
|
if eqr.NextLink == nil {
|
||||||
|
return nil, errNilNextLink
|
||||||
|
}
|
||||||
|
headers := options.addToHeaders(map[string]string{})
|
||||||
|
return eqr.table.queryEntities(*eqr.NextLink, headers, eqr.ml)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetPermissions sets up table ACL permissions
|
||||||
|
// See https://docs.microsoft.com/rest/api/storageservices/fileservices/Set-Table-ACL
|
||||||
|
func (t *Table) SetPermissions(tap []TableAccessPolicy, timeout uint, options *TableOptions) error {
|
||||||
|
params := url.Values{"comp": {"acl"},
|
||||||
|
"timeout": {strconv.Itoa(int(timeout))},
|
||||||
|
}
|
||||||
|
|
||||||
|
uri := t.tsc.client.getEndpoint(tableServiceName, t.Name, params)
|
||||||
|
headers := t.tsc.client.getStandardHeaders()
|
||||||
|
headers = options.addToHeaders(headers)
|
||||||
|
|
||||||
|
body, length, err := generateTableACLPayload(tap)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
headers["Content-Length"] = strconv.Itoa(length)
|
||||||
|
|
||||||
|
resp, err := t.tsc.client.exec(http.MethodPut, uri, headers, body, t.tsc.auth)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer readAndCloseBody(resp.body)
|
||||||
|
|
||||||
|
if err := checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func generateTableACLPayload(policies []TableAccessPolicy) (io.Reader, int, error) {
|
||||||
|
sil := SignedIdentifiers{
|
||||||
|
SignedIdentifiers: []SignedIdentifier{},
|
||||||
|
}
|
||||||
|
for _, tap := range policies {
|
||||||
|
permission := generateTablePermissions(&tap)
|
||||||
|
signedIdentifier := convertAccessPolicyToXMLStructs(tap.ID, tap.StartTime, tap.ExpiryTime, permission)
|
||||||
|
sil.SignedIdentifiers = append(sil.SignedIdentifiers, signedIdentifier)
|
||||||
|
}
|
||||||
|
return xmlMarshal(sil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetPermissions gets the table ACL permissions
|
||||||
|
// See https://docs.microsoft.com/rest/api/storageservices/fileservices/get-table-acl
|
||||||
|
func (t *Table) GetPermissions(timeout int, options *TableOptions) ([]TableAccessPolicy, error) {
|
||||||
|
params := url.Values{"comp": {"acl"},
|
||||||
|
"timeout": {strconv.Itoa(int(timeout))},
|
||||||
|
}
|
||||||
|
|
||||||
|
uri := t.tsc.client.getEndpoint(tableServiceName, t.Name, params)
|
||||||
|
headers := t.tsc.client.getStandardHeaders()
|
||||||
|
headers = options.addToHeaders(headers)
|
||||||
|
|
||||||
|
resp, err := t.tsc.client.exec(http.MethodGet, uri, headers, nil, t.tsc.auth)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer resp.body.Close()
|
||||||
|
|
||||||
|
if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var ap AccessPolicy
|
||||||
|
err = xmlUnmarshal(resp.body, &ap.SignedIdentifiersList)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return updateTableAccessPolicy(ap), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Table) queryEntities(uri string, headers map[string]string, ml MetadataLevel) (*EntityQueryResult, error) {
|
||||||
|
headers = mergeHeaders(headers, t.tsc.client.getStandardHeaders())
|
||||||
|
if ml != EmptyPayload {
|
||||||
|
headers[headerAccept] = string(ml)
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := t.tsc.client.exec(http.MethodGet, uri, headers, nil, t.tsc.auth)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer resp.body.Close()
|
||||||
|
|
||||||
|
if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
data, err := ioutil.ReadAll(resp.body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var entities EntityQueryResult
|
||||||
|
err = json.Unmarshal(data, &entities)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range entities.Entities {
|
||||||
|
entities.Entities[i].Table = t
|
||||||
|
}
|
||||||
|
entities.table = t
|
||||||
|
|
||||||
|
contToken := extractContinuationTokenFromHeaders(resp.headers)
|
||||||
|
if contToken == nil {
|
||||||
|
entities.NextLink = nil
|
||||||
|
} else {
|
||||||
|
originalURI, err := url.Parse(uri)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
v := originalURI.Query()
|
||||||
|
v.Set(nextPartitionKeyQueryParameter, contToken.NextPartitionKey)
|
||||||
|
v.Set(nextRowKeyQueryParameter, contToken.NextRowKey)
|
||||||
|
newURI := t.tsc.client.getEndpoint(tableServiceName, t.buildPath(), v)
|
||||||
|
entities.NextLink = &newURI
|
||||||
|
entities.ml = ml
|
||||||
|
}
|
||||||
|
|
||||||
|
return &entities, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func extractContinuationTokenFromHeaders(h http.Header) *continuationToken {
|
||||||
|
ct := continuationToken{
|
||||||
|
NextPartitionKey: h.Get(headerNextPartitionKey),
|
||||||
|
NextRowKey: h.Get(headerNextRowKey),
|
||||||
|
}
|
||||||
|
|
||||||
|
if ct.NextPartitionKey != "" && ct.NextRowKey != "" {
|
||||||
|
return &ct
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func updateTableAccessPolicy(ap AccessPolicy) []TableAccessPolicy {
|
||||||
|
taps := []TableAccessPolicy{}
|
||||||
|
for _, policy := range ap.SignedIdentifiersList.SignedIdentifiers {
|
||||||
|
tap := TableAccessPolicy{
|
||||||
|
ID: policy.ID,
|
||||||
|
StartTime: policy.AccessPolicy.StartTime,
|
||||||
|
ExpiryTime: policy.AccessPolicy.ExpiryTime,
|
||||||
|
}
|
||||||
|
tap.CanRead = updatePermissions(policy.AccessPolicy.Permission, "r")
|
||||||
|
tap.CanAppend = updatePermissions(policy.AccessPolicy.Permission, "a")
|
||||||
|
tap.CanUpdate = updatePermissions(policy.AccessPolicy.Permission, "u")
|
||||||
|
tap.CanDelete = updatePermissions(policy.AccessPolicy.Permission, "d")
|
||||||
|
|
||||||
|
taps = append(taps, tap)
|
||||||
|
}
|
||||||
|
return taps
|
||||||
|
}
|
||||||
|
|
||||||
|
func generateTablePermissions(tap *TableAccessPolicy) (permissions string) {
|
||||||
|
// generate the permissions string (raud).
|
||||||
|
// still want the end user API to have bool flags.
|
||||||
|
permissions = ""
|
||||||
|
|
||||||
|
if tap.CanRead {
|
||||||
|
permissions += "r"
|
||||||
|
}
|
||||||
|
|
||||||
|
if tap.CanAppend {
|
||||||
|
permissions += "a"
|
||||||
|
}
|
||||||
|
|
||||||
|
if tap.CanUpdate {
|
||||||
|
permissions += "u"
|
||||||
|
}
|
||||||
|
|
||||||
|
if tap.CanDelete {
|
||||||
|
permissions += "d"
|
||||||
|
}
|
||||||
|
return permissions
|
||||||
|
}
|
|
@ -0,0 +1,302 @@
|
||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"mime/multipart"
|
||||||
|
"net/http"
|
||||||
|
"net/textproto"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/satori/uuid"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Operation type. Insert, Delete, Replace etc.
|
||||||
|
type Operation int
|
||||||
|
|
||||||
|
// consts for batch operations.
|
||||||
|
const (
|
||||||
|
InsertOp = Operation(1)
|
||||||
|
DeleteOp = Operation(2)
|
||||||
|
ReplaceOp = Operation(3)
|
||||||
|
MergeOp = Operation(4)
|
||||||
|
InsertOrReplaceOp = Operation(5)
|
||||||
|
InsertOrMergeOp = Operation(6)
|
||||||
|
)
|
||||||
|
|
||||||
|
// BatchEntity used for tracking Entities to operate on and
|
||||||
|
// whether operations (replace/merge etc) should be forced.
|
||||||
|
// Wrapper for regular Entity with additional data specific for the entity.
|
||||||
|
type BatchEntity struct {
|
||||||
|
*Entity
|
||||||
|
Force bool
|
||||||
|
Op Operation
|
||||||
|
}
|
||||||
|
|
||||||
|
// TableBatch stores all the entities that will be operated on during a batch process.
|
||||||
|
// Entities can be inserted, replaced or deleted.
|
||||||
|
type TableBatch struct {
|
||||||
|
BatchEntitySlice []BatchEntity
|
||||||
|
|
||||||
|
// reference to table we're operating on.
|
||||||
|
Table *Table
|
||||||
|
}
|
||||||
|
|
||||||
|
// defaultChangesetHeaders for changeSets
|
||||||
|
var defaultChangesetHeaders = map[string]string{
|
||||||
|
"Accept": "application/json;odata=minimalmetadata",
|
||||||
|
"Content-Type": "application/json",
|
||||||
|
"Prefer": "return-no-content",
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewBatch return new TableBatch for populating.
|
||||||
|
func (t *Table) NewBatch() *TableBatch {
|
||||||
|
return &TableBatch{
|
||||||
|
Table: t,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// InsertEntity adds an entity in preparation for a batch insert.
|
||||||
|
func (t *TableBatch) InsertEntity(entity *Entity) {
|
||||||
|
be := BatchEntity{Entity: entity, Force: false, Op: InsertOp}
|
||||||
|
t.BatchEntitySlice = append(t.BatchEntitySlice, be)
|
||||||
|
}
|
||||||
|
|
||||||
|
// InsertOrReplaceEntity adds an entity in preparation for a batch insert or replace.
|
||||||
|
func (t *TableBatch) InsertOrReplaceEntity(entity *Entity, force bool) {
|
||||||
|
be := BatchEntity{Entity: entity, Force: false, Op: InsertOrReplaceOp}
|
||||||
|
t.BatchEntitySlice = append(t.BatchEntitySlice, be)
|
||||||
|
}
|
||||||
|
|
||||||
|
// InsertOrReplaceEntityByForce adds an entity in preparation for a batch insert or replace. Forces regardless of ETag
|
||||||
|
func (t *TableBatch) InsertOrReplaceEntityByForce(entity *Entity) {
|
||||||
|
t.InsertOrReplaceEntity(entity, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// InsertOrMergeEntity adds an entity in preparation for a batch insert or merge.
|
||||||
|
func (t *TableBatch) InsertOrMergeEntity(entity *Entity, force bool) {
|
||||||
|
be := BatchEntity{Entity: entity, Force: false, Op: InsertOrMergeOp}
|
||||||
|
t.BatchEntitySlice = append(t.BatchEntitySlice, be)
|
||||||
|
}
|
||||||
|
|
||||||
|
// InsertOrMergeEntityByForce adds an entity in preparation for a batch insert or merge. Forces regardless of ETag
|
||||||
|
func (t *TableBatch) InsertOrMergeEntityByForce(entity *Entity) {
|
||||||
|
t.InsertOrMergeEntity(entity, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReplaceEntity adds an entity in preparation for a batch replace.
|
||||||
|
func (t *TableBatch) ReplaceEntity(entity *Entity) {
|
||||||
|
be := BatchEntity{Entity: entity, Force: false, Op: ReplaceOp}
|
||||||
|
t.BatchEntitySlice = append(t.BatchEntitySlice, be)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteEntity adds an entity in preparation for a batch delete
|
||||||
|
func (t *TableBatch) DeleteEntity(entity *Entity, force bool) {
|
||||||
|
be := BatchEntity{Entity: entity, Force: false, Op: DeleteOp}
|
||||||
|
t.BatchEntitySlice = append(t.BatchEntitySlice, be)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteEntityByForce adds an entity in preparation for a batch delete. Forces regardless of ETag
|
||||||
|
func (t *TableBatch) DeleteEntityByForce(entity *Entity, force bool) {
|
||||||
|
t.DeleteEntity(entity, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MergeEntity adds an entity in preparation for a batch merge
|
||||||
|
func (t *TableBatch) MergeEntity(entity *Entity) {
|
||||||
|
be := BatchEntity{Entity: entity, Force: false, Op: MergeOp}
|
||||||
|
t.BatchEntitySlice = append(t.BatchEntitySlice, be)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecuteBatch executes many table operations in one request to Azure.
|
||||||
|
// The operations can be combinations of Insert, Delete, Replace and Merge
|
||||||
|
// Creates the inner changeset body (various operations, Insert, Delete etc) then creates the outer request packet that encompasses
|
||||||
|
// the changesets.
|
||||||
|
// As per document https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/performing-entity-group-transactions
|
||||||
|
func (t *TableBatch) ExecuteBatch() error {
|
||||||
|
changesetBoundary := fmt.Sprintf("changeset_%s", uuid.NewV1())
|
||||||
|
uri := t.Table.tsc.client.getEndpoint(tableServiceName, "$batch", nil)
|
||||||
|
changesetBody, err := t.generateChangesetBody(changesetBoundary)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
boundary := fmt.Sprintf("batch_%s", uuid.NewV1())
|
||||||
|
body, err := generateBody(changesetBody, changesetBoundary, boundary)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
headers := t.Table.tsc.client.getStandardHeaders()
|
||||||
|
headers[headerContentType] = fmt.Sprintf("multipart/mixed; boundary=%s", boundary)
|
||||||
|
|
||||||
|
resp, err := t.Table.tsc.client.execBatchOperationJSON(http.MethodPost, uri, headers, bytes.NewReader(body.Bytes()), t.Table.tsc.auth)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer resp.body.Close()
|
||||||
|
|
||||||
|
if err = checkRespCode(resp.statusCode, []int{http.StatusAccepted}); err != nil {
|
||||||
|
|
||||||
|
// check which batch failed.
|
||||||
|
operationFailedMessage := t.getFailedOperation(resp.odata.Err.Message.Value)
|
||||||
|
requestID, date, version := getDebugHeaders(resp.headers)
|
||||||
|
return AzureStorageServiceError{
|
||||||
|
StatusCode: resp.statusCode,
|
||||||
|
Code: resp.odata.Err.Code,
|
||||||
|
RequestID: requestID,
|
||||||
|
Date: date,
|
||||||
|
APIVersion: version,
|
||||||
|
Message: operationFailedMessage,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// getFailedOperation parses the original Azure error string and determines which operation failed
|
||||||
|
// and generates appropriate message.
|
||||||
|
func (t *TableBatch) getFailedOperation(errorMessage string) string {
|
||||||
|
// errorMessage consists of "number:string" we just need the number.
|
||||||
|
sp := strings.Split(errorMessage, ":")
|
||||||
|
if len(sp) > 1 {
|
||||||
|
msg := fmt.Sprintf("Element %s in the batch returned an unexpected response code.\n%s", sp[0], errorMessage)
|
||||||
|
return msg
|
||||||
|
}
|
||||||
|
|
||||||
|
// cant parse the message, just return the original message to client
|
||||||
|
return errorMessage
|
||||||
|
}
|
||||||
|
|
||||||
|
// generateBody generates the complete body for the batch request.
|
||||||
|
func generateBody(changeSetBody *bytes.Buffer, changesetBoundary string, boundary string) (*bytes.Buffer, error) {
|
||||||
|
|
||||||
|
body := new(bytes.Buffer)
|
||||||
|
writer := multipart.NewWriter(body)
|
||||||
|
writer.SetBoundary(boundary)
|
||||||
|
h := make(textproto.MIMEHeader)
|
||||||
|
h.Set(headerContentType, fmt.Sprintf("multipart/mixed; boundary=%s\r\n", changesetBoundary))
|
||||||
|
batchWriter, err := writer.CreatePart(h)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
batchWriter.Write(changeSetBody.Bytes())
|
||||||
|
writer.Close()
|
||||||
|
return body, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// generateChangesetBody generates the individual changesets for the various operations within the batch request.
|
||||||
|
// There is a changeset for Insert, Delete, Merge etc.
|
||||||
|
func (t *TableBatch) generateChangesetBody(changesetBoundary string) (*bytes.Buffer, error) {
|
||||||
|
|
||||||
|
body := new(bytes.Buffer)
|
||||||
|
writer := multipart.NewWriter(body)
|
||||||
|
writer.SetBoundary(changesetBoundary)
|
||||||
|
|
||||||
|
for _, be := range t.BatchEntitySlice {
|
||||||
|
t.generateEntitySubset(&be, writer)
|
||||||
|
}
|
||||||
|
|
||||||
|
writer.Close()
|
||||||
|
return body, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// generateVerb generates the HTTP request VERB required for each changeset.
|
||||||
|
func generateVerb(op Operation) (string, error) {
|
||||||
|
switch op {
|
||||||
|
case InsertOp:
|
||||||
|
return http.MethodPost, nil
|
||||||
|
case DeleteOp:
|
||||||
|
return http.MethodDelete, nil
|
||||||
|
case ReplaceOp, InsertOrReplaceOp:
|
||||||
|
return http.MethodPut, nil
|
||||||
|
case MergeOp, InsertOrMergeOp:
|
||||||
|
return "MERGE", nil
|
||||||
|
default:
|
||||||
|
return "", errors.New("Unable to detect operation")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// generateQueryPath generates the query path for within the changesets
|
||||||
|
// For inserts it will just be a table query path (table name)
|
||||||
|
// but for other operations (modifying an existing entity) then
|
||||||
|
// the partition/row keys need to be generated.
|
||||||
|
func (t *TableBatch) generateQueryPath(op Operation, entity *Entity) string {
|
||||||
|
if op == InsertOp {
|
||||||
|
return entity.Table.buildPath()
|
||||||
|
}
|
||||||
|
return entity.buildPath()
|
||||||
|
}
|
||||||
|
|
||||||
|
// generateGenericOperationHeaders generates common headers for a given operation.
|
||||||
|
func generateGenericOperationHeaders(be *BatchEntity) map[string]string {
|
||||||
|
retval := map[string]string{}
|
||||||
|
|
||||||
|
for k, v := range defaultChangesetHeaders {
|
||||||
|
retval[k] = v
|
||||||
|
}
|
||||||
|
|
||||||
|
if be.Op == DeleteOp || be.Op == ReplaceOp || be.Op == MergeOp {
|
||||||
|
if be.Force || be.Entity.OdataEtag == "" {
|
||||||
|
retval["If-Match"] = "*"
|
||||||
|
} else {
|
||||||
|
retval["If-Match"] = be.Entity.OdataEtag
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return retval
|
||||||
|
}
|
||||||
|
|
||||||
|
// generateEntitySubset generates body payload for particular batch entity
|
||||||
|
func (t *TableBatch) generateEntitySubset(batchEntity *BatchEntity, writer *multipart.Writer) error {
|
||||||
|
|
||||||
|
h := make(textproto.MIMEHeader)
|
||||||
|
h.Set(headerContentType, "application/http")
|
||||||
|
h.Set(headerContentTransferEncoding, "binary")
|
||||||
|
|
||||||
|
verb, err := generateVerb(batchEntity.Op)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
genericOpHeadersMap := generateGenericOperationHeaders(batchEntity)
|
||||||
|
queryPath := t.generateQueryPath(batchEntity.Op, batchEntity.Entity)
|
||||||
|
uri := t.Table.tsc.client.getEndpoint(tableServiceName, queryPath, nil)
|
||||||
|
|
||||||
|
operationWriter, err := writer.CreatePart(h)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
urlAndVerb := fmt.Sprintf("%s %s HTTP/1.1\r\n", verb, uri)
|
||||||
|
operationWriter.Write([]byte(urlAndVerb))
|
||||||
|
writeHeaders(genericOpHeadersMap, &operationWriter)
|
||||||
|
operationWriter.Write([]byte("\r\n")) // additional \r\n is needed per changeset separating the "headers" and the body.
|
||||||
|
|
||||||
|
// delete operation doesn't need a body.
|
||||||
|
if batchEntity.Op != DeleteOp {
|
||||||
|
//var e Entity = batchEntity.Entity
|
||||||
|
body, err := json.Marshal(batchEntity.Entity)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
operationWriter.Write(body)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func writeHeaders(h map[string]string, writer *io.Writer) {
|
||||||
|
// This way it is guaranteed the headers will be written in a sorted order
|
||||||
|
var keys []string
|
||||||
|
for k := range h {
|
||||||
|
keys = append(keys, k)
|
||||||
|
}
|
||||||
|
sort.Strings(keys)
|
||||||
|
for _, k := range keys {
|
||||||
|
(*writer).Write([]byte(fmt.Sprintf("%s: %s\r\n", k, h[k])))
|
||||||
|
}
|
||||||
|
}
|
190
vendor/github.com/Azure/azure-sdk-for-go/storage/tableserviceclient.go
generated
vendored
Normal file
190
vendor/github.com/Azure/azure-sdk-for-go/storage/tableserviceclient.go
generated
vendored
Normal file
|
@ -0,0 +1,190 @@
|
||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
headerAccept = "Accept"
|
||||||
|
headerEtag = "Etag"
|
||||||
|
headerPrefer = "Prefer"
|
||||||
|
headerXmsContinuation = "x-ms-Continuation-NextTableName"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TableServiceClient contains operations for Microsoft Azure Table Storage
|
||||||
|
// Service.
|
||||||
|
type TableServiceClient struct {
|
||||||
|
client Client
|
||||||
|
auth authentication
|
||||||
|
}
|
||||||
|
|
||||||
|
// TableOptions includes options for some table operations
|
||||||
|
type TableOptions struct {
|
||||||
|
RequestID string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (options *TableOptions) addToHeaders(h map[string]string) map[string]string {
|
||||||
|
if options != nil {
|
||||||
|
h = addToHeaders(h, "x-ms-client-request-id", options.RequestID)
|
||||||
|
}
|
||||||
|
return h
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryNextLink includes information for getting the next page of
|
||||||
|
// results in query operations
|
||||||
|
type QueryNextLink struct {
|
||||||
|
NextLink *string
|
||||||
|
ml MetadataLevel
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetServiceProperties gets the properties of your storage account's table service.
|
||||||
|
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-table-service-properties
|
||||||
|
func (t *TableServiceClient) GetServiceProperties() (*ServiceProperties, error) {
|
||||||
|
return t.client.getServiceProperties(tableServiceName, t.auth)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetServiceProperties sets the properties of your storage account's table service.
|
||||||
|
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-table-service-properties
|
||||||
|
func (t *TableServiceClient) SetServiceProperties(props ServiceProperties) error {
|
||||||
|
return t.client.setServiceProperties(props, tableServiceName, t.auth)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetTableReference returns a Table object for the specified table name.
|
||||||
|
func (t *TableServiceClient) GetTableReference(name string) *Table {
|
||||||
|
return &Table{
|
||||||
|
tsc: t,
|
||||||
|
Name: name,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryTablesOptions includes options for some table operations
|
||||||
|
type QueryTablesOptions struct {
|
||||||
|
Top uint
|
||||||
|
Filter string
|
||||||
|
RequestID string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (options *QueryTablesOptions) getParameters() (url.Values, map[string]string) {
|
||||||
|
query := url.Values{}
|
||||||
|
headers := map[string]string{}
|
||||||
|
if options != nil {
|
||||||
|
if options.Top > 0 {
|
||||||
|
query.Add(OdataTop, strconv.FormatUint(uint64(options.Top), 10))
|
||||||
|
}
|
||||||
|
if options.Filter != "" {
|
||||||
|
query.Add(OdataFilter, options.Filter)
|
||||||
|
}
|
||||||
|
headers = addToHeaders(headers, "x-ms-client-request-id", options.RequestID)
|
||||||
|
}
|
||||||
|
return query, headers
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryTables returns the tables in the storage account.
|
||||||
|
// You can use query options defined by the OData Protocol specification.
|
||||||
|
//
|
||||||
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/query-tables
|
||||||
|
func (t *TableServiceClient) QueryTables(ml MetadataLevel, options *QueryTablesOptions) (*TableQueryResult, error) {
|
||||||
|
query, headers := options.getParameters()
|
||||||
|
uri := t.client.getEndpoint(tableServiceName, tablesURIPath, query)
|
||||||
|
return t.queryTables(uri, headers, ml)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NextResults returns the next page of results
|
||||||
|
// from a QueryTables or a NextResults operation.
|
||||||
|
//
|
||||||
|
// See https://docs.microsoft.com/rest/api/storageservices/fileservices/query-tables
|
||||||
|
// See https://docs.microsoft.com/rest/api/storageservices/fileservices/query-timeout-and-pagination
|
||||||
|
func (tqr *TableQueryResult) NextResults(options *TableOptions) (*TableQueryResult, error) {
|
||||||
|
if tqr == nil {
|
||||||
|
return nil, errNilPreviousResult
|
||||||
|
}
|
||||||
|
if tqr.NextLink == nil {
|
||||||
|
return nil, errNilNextLink
|
||||||
|
}
|
||||||
|
headers := options.addToHeaders(map[string]string{})
|
||||||
|
|
||||||
|
return tqr.tsc.queryTables(*tqr.NextLink, headers, tqr.ml)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TableQueryResult contains the response from
|
||||||
|
// QueryTables and QueryTablesNextResults functions.
|
||||||
|
type TableQueryResult struct {
|
||||||
|
OdataMetadata string `json:"odata.metadata"`
|
||||||
|
Tables []Table `json:"value"`
|
||||||
|
QueryNextLink
|
||||||
|
tsc *TableServiceClient
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TableServiceClient) queryTables(uri string, headers map[string]string, ml MetadataLevel) (*TableQueryResult, error) {
|
||||||
|
if ml == EmptyPayload {
|
||||||
|
return nil, errEmptyPayload
|
||||||
|
}
|
||||||
|
headers = mergeHeaders(headers, t.client.getStandardHeaders())
|
||||||
|
headers[headerAccept] = string(ml)
|
||||||
|
|
||||||
|
resp, err := t.client.exec(http.MethodGet, uri, headers, nil, t.auth)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer resp.body.Close()
|
||||||
|
|
||||||
|
if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
respBody, err := ioutil.ReadAll(resp.body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var out TableQueryResult
|
||||||
|
err = json.Unmarshal(respBody, &out)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range out.Tables {
|
||||||
|
out.Tables[i].tsc = t
|
||||||
|
}
|
||||||
|
out.tsc = t
|
||||||
|
|
||||||
|
nextLink := resp.headers.Get(http.CanonicalHeaderKey(headerXmsContinuation))
|
||||||
|
if nextLink == "" {
|
||||||
|
out.NextLink = nil
|
||||||
|
} else {
|
||||||
|
originalURI, err := url.Parse(uri)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
v := originalURI.Query()
|
||||||
|
v.Set(nextTableQueryParameter, nextLink)
|
||||||
|
newURI := t.client.getEndpoint(tableServiceName, tablesURIPath, v)
|
||||||
|
out.NextLink = &newURI
|
||||||
|
out.ml = ml
|
||||||
|
}
|
||||||
|
|
||||||
|
return &out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func addBodyRelatedHeaders(h map[string]string, length int) map[string]string {
|
||||||
|
h[headerContentType] = "application/json"
|
||||||
|
h[headerContentLength] = fmt.Sprintf("%v", length)
|
||||||
|
h[headerAcceptCharset] = "UTF-8"
|
||||||
|
return h
|
||||||
|
}
|
||||||
|
|
||||||
|
func addReturnContentHeaders(h map[string]string, ml MetadataLevel) map[string]string {
|
||||||
|
if ml != EmptyPayload {
|
||||||
|
h[headerPrefer] = "return-content"
|
||||||
|
h[headerAccept] = string(ml)
|
||||||
|
} else {
|
||||||
|
h[headerPrefer] = "return-no-content"
|
||||||
|
// From API version 2015-12-11 onwards, Accept header is required
|
||||||
|
h[headerAccept] = string(NoMetadata)
|
||||||
|
}
|
||||||
|
return h
|
||||||
|
}
|
|
@ -0,0 +1,199 @@
|
||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"crypto/hmac"
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/base64"
|
||||||
|
"encoding/xml"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"reflect"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
fixedTime = time.Date(2050, time.December, 20, 21, 55, 0, 0, time.FixedZone("GMT", -6))
|
||||||
|
)
|
||||||
|
|
||||||
|
func (c Client) computeHmac256(message string) string {
|
||||||
|
h := hmac.New(sha256.New, c.accountKey)
|
||||||
|
h.Write([]byte(message))
|
||||||
|
return base64.StdEncoding.EncodeToString(h.Sum(nil))
|
||||||
|
}
|
||||||
|
|
||||||
|
func currentTimeRfc1123Formatted() string {
|
||||||
|
return timeRfc1123Formatted(time.Now().UTC())
|
||||||
|
}
|
||||||
|
|
||||||
|
func timeRfc1123Formatted(t time.Time) string {
|
||||||
|
return t.Format(http.TimeFormat)
|
||||||
|
}
|
||||||
|
|
||||||
|
func mergeParams(v1, v2 url.Values) url.Values {
|
||||||
|
out := url.Values{}
|
||||||
|
for k, v := range v1 {
|
||||||
|
out[k] = v
|
||||||
|
}
|
||||||
|
for k, v := range v2 {
|
||||||
|
vals, ok := out[k]
|
||||||
|
if ok {
|
||||||
|
vals = append(vals, v...)
|
||||||
|
out[k] = vals
|
||||||
|
} else {
|
||||||
|
out[k] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func prepareBlockListRequest(blocks []Block) string {
|
||||||
|
s := `<?xml version="1.0" encoding="utf-8"?><BlockList>`
|
||||||
|
for _, v := range blocks {
|
||||||
|
s += fmt.Sprintf("<%s>%s</%s>", v.Status, v.ID, v.Status)
|
||||||
|
}
|
||||||
|
s += `</BlockList>`
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
func xmlUnmarshal(body io.Reader, v interface{}) error {
|
||||||
|
data, err := ioutil.ReadAll(body)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return xml.Unmarshal(data, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
func xmlMarshal(v interface{}) (io.Reader, int, error) {
|
||||||
|
b, err := xml.Marshal(v)
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
return bytes.NewReader(b), len(b), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func headersFromStruct(v interface{}) map[string]string {
|
||||||
|
headers := make(map[string]string)
|
||||||
|
value := reflect.ValueOf(v)
|
||||||
|
for i := 0; i < value.NumField(); i++ {
|
||||||
|
key := value.Type().Field(i).Tag.Get("header")
|
||||||
|
if key != "" {
|
||||||
|
reflectedValue := reflect.Indirect(value.Field(i))
|
||||||
|
var val string
|
||||||
|
if reflectedValue.IsValid() {
|
||||||
|
switch reflectedValue.Type() {
|
||||||
|
case reflect.TypeOf(fixedTime):
|
||||||
|
val = timeRfc1123Formatted(reflectedValue.Interface().(time.Time))
|
||||||
|
case reflect.TypeOf(uint64(0)), reflect.TypeOf(uint(0)):
|
||||||
|
val = strconv.FormatUint(reflectedValue.Uint(), 10)
|
||||||
|
case reflect.TypeOf(int(0)):
|
||||||
|
val = strconv.FormatInt(reflectedValue.Int(), 10)
|
||||||
|
default:
|
||||||
|
val = reflectedValue.String()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if val != "" {
|
||||||
|
headers[key] = val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return headers
|
||||||
|
}
|
||||||
|
|
||||||
|
// merges extraHeaders into headers and returns headers
|
||||||
|
func mergeHeaders(headers, extraHeaders map[string]string) map[string]string {
|
||||||
|
for k, v := range extraHeaders {
|
||||||
|
headers[k] = v
|
||||||
|
}
|
||||||
|
return headers
|
||||||
|
}
|
||||||
|
|
||||||
|
func addToHeaders(h map[string]string, key, value string) map[string]string {
|
||||||
|
if value != "" {
|
||||||
|
h[key] = value
|
||||||
|
}
|
||||||
|
return h
|
||||||
|
}
|
||||||
|
|
||||||
|
func addTimeToHeaders(h map[string]string, key string, value *time.Time) map[string]string {
|
||||||
|
if value != nil {
|
||||||
|
h = addToHeaders(h, key, timeRfc1123Formatted(*value))
|
||||||
|
}
|
||||||
|
return h
|
||||||
|
}
|
||||||
|
|
||||||
|
func addTimeout(params url.Values, timeout uint) url.Values {
|
||||||
|
if timeout > 0 {
|
||||||
|
params.Add("timeout", fmt.Sprintf("%v", timeout))
|
||||||
|
}
|
||||||
|
return params
|
||||||
|
}
|
||||||
|
|
||||||
|
func addSnapshot(params url.Values, snapshot *time.Time) url.Values {
|
||||||
|
if snapshot != nil {
|
||||||
|
params.Add("snapshot", timeRfc1123Formatted(*snapshot))
|
||||||
|
}
|
||||||
|
return params
|
||||||
|
}
|
||||||
|
|
||||||
|
func getTimeFromHeaders(h http.Header, key string) (*time.Time, error) {
|
||||||
|
var out time.Time
|
||||||
|
var err error
|
||||||
|
outStr := h.Get(key)
|
||||||
|
if outStr != "" {
|
||||||
|
out, err = time.Parse(time.RFC1123, outStr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return &out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// TimeRFC1123 is an alias for time.Time needed for custom Unmarshalling
|
||||||
|
type TimeRFC1123 time.Time
|
||||||
|
|
||||||
|
// UnmarshalXML is a custom unmarshaller that overrides the default time unmarshal which uses a different time layout.
|
||||||
|
func (t *TimeRFC1123) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
||||||
|
var value string
|
||||||
|
d.DecodeElement(&value, &start)
|
||||||
|
parse, err := time.Parse(time.RFC1123, value)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
*t = TimeRFC1123(parse)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// returns a map of custom metadata values from the specified HTTP header
|
||||||
|
func getMetadataFromHeaders(header http.Header) map[string]string {
|
||||||
|
metadata := make(map[string]string)
|
||||||
|
for k, v := range header {
|
||||||
|
// Can't trust CanonicalHeaderKey() to munge case
|
||||||
|
// reliably. "_" is allowed in identifiers:
|
||||||
|
// https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx
|
||||||
|
// https://msdn.microsoft.com/library/aa664670(VS.71).aspx
|
||||||
|
// http://tools.ietf.org/html/rfc7230#section-3.2
|
||||||
|
// ...but "_" is considered invalid by
|
||||||
|
// CanonicalMIMEHeaderKey in
|
||||||
|
// https://golang.org/src/net/textproto/reader.go?s=14615:14659#L542
|
||||||
|
// so k can be "X-Ms-Meta-Lol" or "x-ms-meta-lol_rofl".
|
||||||
|
k = strings.ToLower(k)
|
||||||
|
if len(v) == 0 || !strings.HasPrefix(k, strings.ToLower(userDefinedMetadataHeaderPrefix)) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// metadata["lol"] = content of the last X-Ms-Meta-Lol header
|
||||||
|
k = k[len(userDefinedMetadataHeaderPrefix):]
|
||||||
|
metadata[k] = v[len(v)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(metadata) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return metadata
|
||||||
|
}
|
|
@ -1,5 +1,5 @@
|
||||||
package storage
|
package storage
|
||||||
|
|
||||||
var (
|
var (
|
||||||
sdkVersion = "0.1.0"
|
sdkVersion = "10.0.2"
|
||||||
)
|
)
|
|
@ -1,21 +0,0 @@
|
||||||
MIT License
|
|
||||||
|
|
||||||
Copyright (c) Microsoft Corporation. All rights reserved.
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
|
||||||
in the Software without restriction, including without limitation the rights
|
|
||||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
copies of the Software, and to permit persons to whom the Software is
|
|
||||||
furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in all
|
|
||||||
copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
SOFTWARE
|
|
|
@ -1,10 +0,0 @@
|
||||||
# Azure Storage SDK for Go
|
|
||||||
[![GoDoc](https://godoc.org/github.com/Azure/azure-storage-go?status.svg)](https://godoc.org/github.com/Azure/azure-storage-go) [![Build Status](https://travis-ci.org/Azure/azure-storage-go.svg?branch=master)](https://travis-ci.org/Azure/azure-storage-go) [![Go Report Card](https://goreportcard.com/badge/github.com/Azure/azure-storage-go)](https://goreportcard.com/report/github.com/Azure/azure-storage-go)
|
|
||||||
|
|
||||||
The `github.com/Azure/azure-sdk-for-go/storage` package is used to perform operations in Azure Storage Service. To manage your storage accounts (Azure Resource Manager / ARM), use the [github.com/Azure/azure-sdk-for-go/arm/storage](../arm/storage) package. For your classic storage accounts (Azure Service Management / ASM), use [github.com/Azure/azure-sdk-for-go/management/storageservice](../management/storageservice) package.
|
|
||||||
|
|
||||||
This package includes support for [Azure Storage Emulator](https://azure.microsoft.com/documentation/articles/storage-use-emulator/)
|
|
||||||
|
|
||||||
# Contributing
|
|
||||||
|
|
||||||
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,14 +0,0 @@
|
||||||
hash: a97c0c90fe4d23bbd8e5745431f633e75530bb611131b786d76b8e1763bce85e
|
|
||||||
updated: 2017-02-23T09:58:57.3701584-08:00
|
|
||||||
imports:
|
|
||||||
- name: github.com/Azure/go-autorest
|
|
||||||
version: ec5f4903f77ed9927ac95b19ab8e44ada64c1356
|
|
||||||
subpackages:
|
|
||||||
- autorest/azure
|
|
||||||
- autorest
|
|
||||||
- autorest/date
|
|
||||||
- name: github.com/dgrijalva/jwt-go
|
|
||||||
version: 2268707a8f0843315e2004ee4f1d021dc08baedf
|
|
||||||
testImports:
|
|
||||||
- name: gopkg.in/check.v1
|
|
||||||
version: 20d25e2804050c1cd24a7eea1e7a6447dd0e74ec
|
|
|
@ -1,4 +0,0 @@
|
||||||
package: github.com/Azure/azure-sdk-for-go-storage
|
|
||||||
import: []
|
|
||||||
testImport:
|
|
||||||
- package: gopkg.in/check.v1
|
|
|
@ -1,339 +0,0 @@
|
||||||
package storage
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/xml"
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// casing is per Golang's http.Header canonicalizing the header names.
|
|
||||||
approximateMessagesCountHeader = "X-Ms-Approximate-Messages-Count"
|
|
||||||
userDefinedMetadataHeaderPrefix = "X-Ms-Meta-"
|
|
||||||
)
|
|
||||||
|
|
||||||
func pathForQueue(queue string) string { return fmt.Sprintf("/%s", queue) }
|
|
||||||
func pathForQueueMessages(queue string) string { return fmt.Sprintf("/%s/messages", queue) }
|
|
||||||
func pathForMessage(queue, name string) string { return fmt.Sprintf("/%s/messages/%s", queue, name) }
|
|
||||||
|
|
||||||
type putMessageRequest struct {
|
|
||||||
XMLName xml.Name `xml:"QueueMessage"`
|
|
||||||
MessageText string `xml:"MessageText"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// PutMessageParameters is the set of options can be specified for Put Messsage
|
|
||||||
// operation. A zero struct does not use any preferences for the request.
|
|
||||||
type PutMessageParameters struct {
|
|
||||||
VisibilityTimeout int
|
|
||||||
MessageTTL int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p PutMessageParameters) getParameters() url.Values {
|
|
||||||
out := url.Values{}
|
|
||||||
if p.VisibilityTimeout != 0 {
|
|
||||||
out.Set("visibilitytimeout", strconv.Itoa(p.VisibilityTimeout))
|
|
||||||
}
|
|
||||||
if p.MessageTTL != 0 {
|
|
||||||
out.Set("messagettl", strconv.Itoa(p.MessageTTL))
|
|
||||||
}
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetMessagesParameters is the set of options can be specified for Get
|
|
||||||
// Messsages operation. A zero struct does not use any preferences for the
|
|
||||||
// request.
|
|
||||||
type GetMessagesParameters struct {
|
|
||||||
NumOfMessages int
|
|
||||||
VisibilityTimeout int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p GetMessagesParameters) getParameters() url.Values {
|
|
||||||
out := url.Values{}
|
|
||||||
if p.NumOfMessages != 0 {
|
|
||||||
out.Set("numofmessages", strconv.Itoa(p.NumOfMessages))
|
|
||||||
}
|
|
||||||
if p.VisibilityTimeout != 0 {
|
|
||||||
out.Set("visibilitytimeout", strconv.Itoa(p.VisibilityTimeout))
|
|
||||||
}
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
// PeekMessagesParameters is the set of options can be specified for Peek
|
|
||||||
// Messsage operation. A zero struct does not use any preferences for the
|
|
||||||
// request.
|
|
||||||
type PeekMessagesParameters struct {
|
|
||||||
NumOfMessages int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p PeekMessagesParameters) getParameters() url.Values {
|
|
||||||
out := url.Values{"peekonly": {"true"}} // Required for peek operation
|
|
||||||
if p.NumOfMessages != 0 {
|
|
||||||
out.Set("numofmessages", strconv.Itoa(p.NumOfMessages))
|
|
||||||
}
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateMessageParameters is the set of options can be specified for Update Messsage
|
|
||||||
// operation. A zero struct does not use any preferences for the request.
|
|
||||||
type UpdateMessageParameters struct {
|
|
||||||
PopReceipt string
|
|
||||||
VisibilityTimeout int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p UpdateMessageParameters) getParameters() url.Values {
|
|
||||||
out := url.Values{}
|
|
||||||
if p.PopReceipt != "" {
|
|
||||||
out.Set("popreceipt", p.PopReceipt)
|
|
||||||
}
|
|
||||||
if p.VisibilityTimeout != 0 {
|
|
||||||
out.Set("visibilitytimeout", strconv.Itoa(p.VisibilityTimeout))
|
|
||||||
}
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetMessagesResponse represents a response returned from Get Messages
|
|
||||||
// operation.
|
|
||||||
type GetMessagesResponse struct {
|
|
||||||
XMLName xml.Name `xml:"QueueMessagesList"`
|
|
||||||
QueueMessagesList []GetMessageResponse `xml:"QueueMessage"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetMessageResponse represents a QueueMessage object returned from Get
|
|
||||||
// Messages operation response.
|
|
||||||
type GetMessageResponse struct {
|
|
||||||
MessageID string `xml:"MessageId"`
|
|
||||||
InsertionTime string `xml:"InsertionTime"`
|
|
||||||
ExpirationTime string `xml:"ExpirationTime"`
|
|
||||||
PopReceipt string `xml:"PopReceipt"`
|
|
||||||
TimeNextVisible string `xml:"TimeNextVisible"`
|
|
||||||
DequeueCount int `xml:"DequeueCount"`
|
|
||||||
MessageText string `xml:"MessageText"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// PeekMessagesResponse represents a response returned from Get Messages
|
|
||||||
// operation.
|
|
||||||
type PeekMessagesResponse struct {
|
|
||||||
XMLName xml.Name `xml:"QueueMessagesList"`
|
|
||||||
QueueMessagesList []PeekMessageResponse `xml:"QueueMessage"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// PeekMessageResponse represents a QueueMessage object returned from Peek
|
|
||||||
// Messages operation response.
|
|
||||||
type PeekMessageResponse struct {
|
|
||||||
MessageID string `xml:"MessageId"`
|
|
||||||
InsertionTime string `xml:"InsertionTime"`
|
|
||||||
ExpirationTime string `xml:"ExpirationTime"`
|
|
||||||
DequeueCount int `xml:"DequeueCount"`
|
|
||||||
MessageText string `xml:"MessageText"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// QueueMetadataResponse represents user defined metadata and queue
|
|
||||||
// properties on a specific queue.
|
|
||||||
//
|
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179384.aspx
|
|
||||||
type QueueMetadataResponse struct {
|
|
||||||
ApproximateMessageCount int
|
|
||||||
UserDefinedMetadata map[string]string
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetMetadata operation sets user-defined metadata on the specified queue.
|
|
||||||
// Metadata is associated with the queue as name-value pairs.
|
|
||||||
//
|
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179348.aspx
|
|
||||||
func (c QueueServiceClient) SetMetadata(name string, metadata map[string]string) error {
|
|
||||||
uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{"comp": []string{"metadata"}})
|
|
||||||
metadata = c.client.protectUserAgent(metadata)
|
|
||||||
headers := c.client.getStandardHeaders()
|
|
||||||
for k, v := range metadata {
|
|
||||||
headers[userDefinedMetadataHeaderPrefix+k] = v
|
|
||||||
}
|
|
||||||
|
|
||||||
resp, err := c.client.exec(http.MethodPut, uri, headers, nil, c.auth)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer readAndCloseBody(resp.body)
|
|
||||||
|
|
||||||
return checkRespCode(resp.statusCode, []int{http.StatusNoContent})
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetMetadata operation retrieves user-defined metadata and queue
|
|
||||||
// properties on the specified queue. Metadata is associated with
|
|
||||||
// the queue as name-values pairs.
|
|
||||||
//
|
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179384.aspx
|
|
||||||
//
|
|
||||||
// Because the way Golang's http client (and http.Header in particular)
|
|
||||||
// canonicalize header names, the returned metadata names would always
|
|
||||||
// be all lower case.
|
|
||||||
func (c QueueServiceClient) GetMetadata(name string) (QueueMetadataResponse, error) {
|
|
||||||
qm := QueueMetadataResponse{}
|
|
||||||
qm.UserDefinedMetadata = make(map[string]string)
|
|
||||||
uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{"comp": []string{"metadata"}})
|
|
||||||
headers := c.client.getStandardHeaders()
|
|
||||||
resp, err := c.client.exec(http.MethodGet, uri, headers, nil, c.auth)
|
|
||||||
if err != nil {
|
|
||||||
return qm, err
|
|
||||||
}
|
|
||||||
defer readAndCloseBody(resp.body)
|
|
||||||
|
|
||||||
for k, v := range resp.headers {
|
|
||||||
if len(v) != 1 {
|
|
||||||
return qm, fmt.Errorf("Unexpected number of values (%d) in response header '%s'", len(v), k)
|
|
||||||
}
|
|
||||||
|
|
||||||
value := v[0]
|
|
||||||
|
|
||||||
if k == approximateMessagesCountHeader {
|
|
||||||
qm.ApproximateMessageCount, err = strconv.Atoi(value)
|
|
||||||
if err != nil {
|
|
||||||
return qm, fmt.Errorf("Unexpected value in response header '%s': '%s' ", k, value)
|
|
||||||
}
|
|
||||||
} else if strings.HasPrefix(k, userDefinedMetadataHeaderPrefix) {
|
|
||||||
name := strings.TrimPrefix(k, userDefinedMetadataHeaderPrefix)
|
|
||||||
qm.UserDefinedMetadata[strings.ToLower(name)] = value
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return qm, checkRespCode(resp.statusCode, []int{http.StatusOK})
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateQueue operation creates a queue under the given account.
|
|
||||||
//
|
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179342.aspx
|
|
||||||
func (c QueueServiceClient) CreateQueue(name string) error {
|
|
||||||
uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{})
|
|
||||||
headers := c.client.getStandardHeaders()
|
|
||||||
resp, err := c.client.exec(http.MethodPut, uri, headers, nil, c.auth)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer readAndCloseBody(resp.body)
|
|
||||||
return checkRespCode(resp.statusCode, []int{http.StatusCreated})
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteQueue operation permanently deletes the specified queue.
|
|
||||||
//
|
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179436.aspx
|
|
||||||
func (c QueueServiceClient) DeleteQueue(name string) error {
|
|
||||||
uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{})
|
|
||||||
resp, err := c.client.exec(http.MethodDelete, uri, c.client.getStandardHeaders(), nil, c.auth)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer readAndCloseBody(resp.body)
|
|
||||||
return checkRespCode(resp.statusCode, []int{http.StatusNoContent})
|
|
||||||
}
|
|
||||||
|
|
||||||
// QueueExists returns true if a queue with given name exists.
|
|
||||||
func (c QueueServiceClient) QueueExists(name string) (bool, error) {
|
|
||||||
uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{"comp": {"metadata"}})
|
|
||||||
resp, err := c.client.exec(http.MethodGet, uri, c.client.getStandardHeaders(), nil, c.auth)
|
|
||||||
if resp != nil && (resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound) {
|
|
||||||
return resp.statusCode == http.StatusOK, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// PutMessage operation adds a new message to the back of the message queue.
|
|
||||||
//
|
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179346.aspx
|
|
||||||
func (c QueueServiceClient) PutMessage(queue string, message string, params PutMessageParameters) error {
|
|
||||||
uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), params.getParameters())
|
|
||||||
req := putMessageRequest{MessageText: message}
|
|
||||||
body, nn, err := xmlMarshal(req)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
headers := c.client.getStandardHeaders()
|
|
||||||
headers["Content-Length"] = strconv.Itoa(nn)
|
|
||||||
resp, err := c.client.exec(http.MethodPost, uri, headers, body, c.auth)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer readAndCloseBody(resp.body)
|
|
||||||
return checkRespCode(resp.statusCode, []int{http.StatusCreated})
|
|
||||||
}
|
|
||||||
|
|
||||||
// ClearMessages operation deletes all messages from the specified queue.
|
|
||||||
//
|
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179454.aspx
|
|
||||||
func (c QueueServiceClient) ClearMessages(queue string) error {
|
|
||||||
uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), url.Values{})
|
|
||||||
resp, err := c.client.exec(http.MethodDelete, uri, c.client.getStandardHeaders(), nil, c.auth)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer readAndCloseBody(resp.body)
|
|
||||||
return checkRespCode(resp.statusCode, []int{http.StatusNoContent})
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetMessages operation retrieves one or more messages from the front of the
|
|
||||||
// queue.
|
|
||||||
//
|
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179474.aspx
|
|
||||||
func (c QueueServiceClient) GetMessages(queue string, params GetMessagesParameters) (GetMessagesResponse, error) {
|
|
||||||
var r GetMessagesResponse
|
|
||||||
uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), params.getParameters())
|
|
||||||
resp, err := c.client.exec(http.MethodGet, uri, c.client.getStandardHeaders(), nil, c.auth)
|
|
||||||
if err != nil {
|
|
||||||
return r, err
|
|
||||||
}
|
|
||||||
defer resp.body.Close()
|
|
||||||
err = xmlUnmarshal(resp.body, &r)
|
|
||||||
return r, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// PeekMessages retrieves one or more messages from the front of the queue, but
|
|
||||||
// does not alter the visibility of the message.
|
|
||||||
//
|
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179472.aspx
|
|
||||||
func (c QueueServiceClient) PeekMessages(queue string, params PeekMessagesParameters) (PeekMessagesResponse, error) {
|
|
||||||
var r PeekMessagesResponse
|
|
||||||
uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), params.getParameters())
|
|
||||||
resp, err := c.client.exec(http.MethodGet, uri, c.client.getStandardHeaders(), nil, c.auth)
|
|
||||||
if err != nil {
|
|
||||||
return r, err
|
|
||||||
}
|
|
||||||
defer resp.body.Close()
|
|
||||||
err = xmlUnmarshal(resp.body, &r)
|
|
||||||
return r, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteMessage operation deletes the specified message.
|
|
||||||
//
|
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179347.aspx
|
|
||||||
func (c QueueServiceClient) DeleteMessage(queue, messageID, popReceipt string) error {
|
|
||||||
uri := c.client.getEndpoint(queueServiceName, pathForMessage(queue, messageID), url.Values{
|
|
||||||
"popreceipt": {popReceipt}})
|
|
||||||
resp, err := c.client.exec(http.MethodDelete, uri, c.client.getStandardHeaders(), nil, c.auth)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer readAndCloseBody(resp.body)
|
|
||||||
return checkRespCode(resp.statusCode, []int{http.StatusNoContent})
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateMessage operation deletes the specified message.
|
|
||||||
//
|
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/hh452234.aspx
|
|
||||||
func (c QueueServiceClient) UpdateMessage(queue string, messageID string, message string, params UpdateMessageParameters) error {
|
|
||||||
uri := c.client.getEndpoint(queueServiceName, pathForMessage(queue, messageID), params.getParameters())
|
|
||||||
req := putMessageRequest{MessageText: message}
|
|
||||||
body, nn, err := xmlMarshal(req)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
headers := c.client.getStandardHeaders()
|
|
||||||
headers["Content-Length"] = fmt.Sprintf("%d", nn)
|
|
||||||
resp, err := c.client.exec(http.MethodPut, uri, headers, body, c.auth)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer readAndCloseBody(resp.body)
|
|
||||||
return checkRespCode(resp.statusCode, []int{http.StatusNoContent})
|
|
||||||
}
|
|
|
@ -1,254 +0,0 @@
|
||||||
package storage
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"strconv"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// AzureTable is the typedef of the Azure Table name
|
|
||||||
type AzureTable string
|
|
||||||
|
|
||||||
const (
|
|
||||||
tablesURIPath = "/Tables"
|
|
||||||
)
|
|
||||||
|
|
||||||
type createTableRequest struct {
|
|
||||||
TableName string `json:"TableName"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// TableAccessPolicy are used for SETTING table policies
|
|
||||||
type TableAccessPolicy struct {
|
|
||||||
ID string
|
|
||||||
StartTime time.Time
|
|
||||||
ExpiryTime time.Time
|
|
||||||
CanRead bool
|
|
||||||
CanAppend bool
|
|
||||||
CanUpdate bool
|
|
||||||
CanDelete bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func pathForTable(table AzureTable) string { return fmt.Sprintf("%s", table) }
|
|
||||||
|
|
||||||
func (c *TableServiceClient) getStandardHeaders() map[string]string {
|
|
||||||
return map[string]string{
|
|
||||||
"x-ms-version": "2015-02-21",
|
|
||||||
"x-ms-date": currentTimeRfc1123Formatted(),
|
|
||||||
"Accept": "application/json;odata=nometadata",
|
|
||||||
"Accept-Charset": "UTF-8",
|
|
||||||
"Content-Type": "application/json",
|
|
||||||
userAgentHeader: c.client.userAgent,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// QueryTables returns the tables created in the
|
|
||||||
// *TableServiceClient storage account.
|
|
||||||
func (c *TableServiceClient) QueryTables() ([]AzureTable, error) {
|
|
||||||
uri := c.client.getEndpoint(tableServiceName, tablesURIPath, url.Values{})
|
|
||||||
|
|
||||||
headers := c.getStandardHeaders()
|
|
||||||
headers["Content-Length"] = "0"
|
|
||||||
|
|
||||||
resp, err := c.client.execInternalJSON(http.MethodGet, uri, headers, nil, c.auth)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer resp.body.Close()
|
|
||||||
|
|
||||||
if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
|
|
||||||
ioutil.ReadAll(resp.body)
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
buf := new(bytes.Buffer)
|
|
||||||
if _, err := buf.ReadFrom(resp.body); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var respArray queryTablesResponse
|
|
||||||
if err := json.Unmarshal(buf.Bytes(), &respArray); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
s := make([]AzureTable, len(respArray.TableName))
|
|
||||||
for i, elem := range respArray.TableName {
|
|
||||||
s[i] = AzureTable(elem.TableName)
|
|
||||||
}
|
|
||||||
|
|
||||||
return s, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateTable creates the table given the specific
|
|
||||||
// name. This function fails if the name is not compliant
|
|
||||||
// with the specification or the tables already exists.
|
|
||||||
func (c *TableServiceClient) CreateTable(table AzureTable) error {
|
|
||||||
uri := c.client.getEndpoint(tableServiceName, tablesURIPath, url.Values{})
|
|
||||||
|
|
||||||
headers := c.getStandardHeaders()
|
|
||||||
|
|
||||||
req := createTableRequest{TableName: string(table)}
|
|
||||||
buf := new(bytes.Buffer)
|
|
||||||
|
|
||||||
if err := json.NewEncoder(buf).Encode(req); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
headers["Content-Length"] = fmt.Sprintf("%d", buf.Len())
|
|
||||||
|
|
||||||
resp, err := c.client.execInternalJSON(http.MethodPost, uri, headers, buf, c.auth)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer readAndCloseBody(resp.body)
|
|
||||||
|
|
||||||
if err := checkRespCode(resp.statusCode, []int{http.StatusCreated}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteTable deletes the table given the specific
|
|
||||||
// name. This function fails if the table is not present.
|
|
||||||
// Be advised: DeleteTable deletes all the entries
|
|
||||||
// that may be present.
|
|
||||||
func (c *TableServiceClient) DeleteTable(table AzureTable) error {
|
|
||||||
uri := c.client.getEndpoint(tableServiceName, tablesURIPath, url.Values{})
|
|
||||||
uri += fmt.Sprintf("('%s')", string(table))
|
|
||||||
|
|
||||||
headers := c.getStandardHeaders()
|
|
||||||
|
|
||||||
headers["Content-Length"] = "0"
|
|
||||||
|
|
||||||
resp, err := c.client.execInternalJSON(http.MethodDelete, uri, headers, nil, c.auth)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer readAndCloseBody(resp.body)
|
|
||||||
|
|
||||||
if err := checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil {
|
|
||||||
return err
|
|
||||||
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetTablePermissions sets up table ACL permissions as per REST details https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Table-ACL
|
|
||||||
func (c *TableServiceClient) SetTablePermissions(table AzureTable, policies []TableAccessPolicy, timeout uint) (err error) {
|
|
||||||
params := url.Values{"comp": {"acl"}}
|
|
||||||
|
|
||||||
if timeout > 0 {
|
|
||||||
params.Add("timeout", fmt.Sprint(timeout))
|
|
||||||
}
|
|
||||||
|
|
||||||
uri := c.client.getEndpoint(tableServiceName, string(table), params)
|
|
||||||
headers := c.client.getStandardHeaders()
|
|
||||||
|
|
||||||
body, length, err := generateTableACLPayload(policies)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
headers["Content-Length"] = fmt.Sprintf("%v", length)
|
|
||||||
|
|
||||||
resp, err := c.client.execInternalJSON(http.MethodPut, uri, headers, body, c.auth)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer readAndCloseBody(resp.body)
|
|
||||||
|
|
||||||
if err := checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func generateTableACLPayload(policies []TableAccessPolicy) (io.Reader, int, error) {
|
|
||||||
sil := SignedIdentifiers{
|
|
||||||
SignedIdentifiers: []SignedIdentifier{},
|
|
||||||
}
|
|
||||||
for _, tap := range policies {
|
|
||||||
permission := generateTablePermissions(&tap)
|
|
||||||
signedIdentifier := convertAccessPolicyToXMLStructs(tap.ID, tap.StartTime, tap.ExpiryTime, permission)
|
|
||||||
sil.SignedIdentifiers = append(sil.SignedIdentifiers, signedIdentifier)
|
|
||||||
}
|
|
||||||
return xmlMarshal(sil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetTablePermissions gets the table ACL permissions, as per REST details https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-table-acl
|
|
||||||
func (c *TableServiceClient) GetTablePermissions(table AzureTable, timeout int) (permissionResponse []TableAccessPolicy, err error) {
|
|
||||||
params := url.Values{"comp": {"acl"}}
|
|
||||||
|
|
||||||
if timeout > 0 {
|
|
||||||
params.Add("timeout", strconv.Itoa(timeout))
|
|
||||||
}
|
|
||||||
|
|
||||||
uri := c.client.getEndpoint(tableServiceName, string(table), params)
|
|
||||||
headers := c.client.getStandardHeaders()
|
|
||||||
resp, err := c.client.execInternalJSON(http.MethodGet, uri, headers, nil, c.auth)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer resp.body.Close()
|
|
||||||
|
|
||||||
if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
|
|
||||||
ioutil.ReadAll(resp.body)
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var ap AccessPolicy
|
|
||||||
err = xmlUnmarshal(resp.body, &ap.SignedIdentifiersList)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
out := updateTableAccessPolicy(ap)
|
|
||||||
return out, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func updateTableAccessPolicy(ap AccessPolicy) []TableAccessPolicy {
|
|
||||||
out := []TableAccessPolicy{}
|
|
||||||
for _, policy := range ap.SignedIdentifiersList.SignedIdentifiers {
|
|
||||||
tap := TableAccessPolicy{
|
|
||||||
ID: policy.ID,
|
|
||||||
StartTime: policy.AccessPolicy.StartTime,
|
|
||||||
ExpiryTime: policy.AccessPolicy.ExpiryTime,
|
|
||||||
}
|
|
||||||
tap.CanRead = updatePermissions(policy.AccessPolicy.Permission, "r")
|
|
||||||
tap.CanAppend = updatePermissions(policy.AccessPolicy.Permission, "a")
|
|
||||||
tap.CanUpdate = updatePermissions(policy.AccessPolicy.Permission, "u")
|
|
||||||
tap.CanDelete = updatePermissions(policy.AccessPolicy.Permission, "d")
|
|
||||||
|
|
||||||
out = append(out, tap)
|
|
||||||
}
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
func generateTablePermissions(tap *TableAccessPolicy) (permissions string) {
|
|
||||||
// generate the permissions string (raud).
|
|
||||||
// still want the end user API to have bool flags.
|
|
||||||
permissions = ""
|
|
||||||
|
|
||||||
if tap.CanRead {
|
|
||||||
permissions += "r"
|
|
||||||
}
|
|
||||||
|
|
||||||
if tap.CanAppend {
|
|
||||||
permissions += "a"
|
|
||||||
}
|
|
||||||
|
|
||||||
if tap.CanUpdate {
|
|
||||||
permissions += "u"
|
|
||||||
}
|
|
||||||
|
|
||||||
if tap.CanDelete {
|
|
||||||
permissions += "d"
|
|
||||||
}
|
|
||||||
return permissions
|
|
||||||
}
|
|
|
@ -1,354 +0,0 @@
|
||||||
package storage
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"reflect"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Annotating as secure for gas scanning
|
|
||||||
/* #nosec */
|
|
||||||
const (
|
|
||||||
partitionKeyNode = "PartitionKey"
|
|
||||||
rowKeyNode = "RowKey"
|
|
||||||
tag = "table"
|
|
||||||
tagIgnore = "-"
|
|
||||||
continuationTokenPartitionKeyHeader = "X-Ms-Continuation-Nextpartitionkey"
|
|
||||||
continuationTokenRowHeader = "X-Ms-Continuation-Nextrowkey"
|
|
||||||
maxTopParameter = 1000
|
|
||||||
)
|
|
||||||
|
|
||||||
type queryTablesResponse struct {
|
|
||||||
TableName []struct {
|
|
||||||
TableName string `json:"TableName"`
|
|
||||||
} `json:"value"`
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
tableOperationTypeInsert = iota
|
|
||||||
tableOperationTypeUpdate = iota
|
|
||||||
tableOperationTypeMerge = iota
|
|
||||||
tableOperationTypeInsertOrReplace = iota
|
|
||||||
tableOperationTypeInsertOrMerge = iota
|
|
||||||
)
|
|
||||||
|
|
||||||
type tableOperation int
|
|
||||||
|
|
||||||
// TableEntity interface specifies
|
|
||||||
// the functions needed to support
|
|
||||||
// marshaling and unmarshaling into
|
|
||||||
// Azure Tables. The struct must only contain
|
|
||||||
// simple types because Azure Tables do not
|
|
||||||
// support hierarchy.
|
|
||||||
type TableEntity interface {
|
|
||||||
PartitionKey() string
|
|
||||||
RowKey() string
|
|
||||||
SetPartitionKey(string) error
|
|
||||||
SetRowKey(string) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// ContinuationToken is an opaque (ie not useful to inspect)
|
|
||||||
// struct that Get... methods can return if there are more
|
|
||||||
// entries to be returned than the ones already
|
|
||||||
// returned. Just pass it to the same function to continue
|
|
||||||
// receiving the remaining entries.
|
|
||||||
type ContinuationToken struct {
|
|
||||||
NextPartitionKey string
|
|
||||||
NextRowKey string
|
|
||||||
}
|
|
||||||
|
|
||||||
type getTableEntriesResponse struct {
|
|
||||||
Elements []map[string]interface{} `json:"value"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// QueryTableEntities queries the specified table and returns the unmarshaled
|
|
||||||
// entities of type retType.
|
|
||||||
// top parameter limits the returned entries up to top. Maximum top
|
|
||||||
// allowed by Azure API is 1000. In case there are more than top entries to be
|
|
||||||
// returned the function will return a non nil *ContinuationToken. You can call the
|
|
||||||
// same function again passing the received ContinuationToken as previousContToken
|
|
||||||
// parameter in order to get the following entries. The query parameter
|
|
||||||
// is the odata query. To retrieve all the entries pass the empty string.
|
|
||||||
// The function returns a pointer to a TableEntity slice, the *ContinuationToken
|
|
||||||
// if there are more entries to be returned and an error in case something went
|
|
||||||
// wrong.
|
|
||||||
//
|
|
||||||
// Example:
|
|
||||||
// entities, cToken, err = tSvc.QueryTableEntities("table", cToken, reflect.TypeOf(entity), 20, "")
|
|
||||||
func (c *TableServiceClient) QueryTableEntities(tableName AzureTable, previousContToken *ContinuationToken, retType reflect.Type, top int, query string) ([]TableEntity, *ContinuationToken, error) {
|
|
||||||
if top > maxTopParameter {
|
|
||||||
return nil, nil, fmt.Errorf("top accepts at maximum %d elements. Requested %d instead", maxTopParameter, top)
|
|
||||||
}
|
|
||||||
|
|
||||||
uri := c.client.getEndpoint(tableServiceName, pathForTable(tableName), url.Values{})
|
|
||||||
uri += fmt.Sprintf("?$top=%d", top)
|
|
||||||
if query != "" {
|
|
||||||
uri += fmt.Sprintf("&$filter=%s", url.QueryEscape(query))
|
|
||||||
}
|
|
||||||
|
|
||||||
if previousContToken != nil {
|
|
||||||
uri += fmt.Sprintf("&NextPartitionKey=%s&NextRowKey=%s", previousContToken.NextPartitionKey, previousContToken.NextRowKey)
|
|
||||||
}
|
|
||||||
|
|
||||||
headers := c.getStandardHeaders()
|
|
||||||
|
|
||||||
headers["Content-Length"] = "0"
|
|
||||||
|
|
||||||
resp, err := c.client.execInternalJSON(http.MethodGet, uri, headers, nil, c.auth)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
contToken := extractContinuationTokenFromHeaders(resp.headers)
|
|
||||||
|
|
||||||
defer resp.body.Close()
|
|
||||||
|
|
||||||
if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
|
|
||||||
return nil, contToken, err
|
|
||||||
}
|
|
||||||
|
|
||||||
retEntries, err := deserializeEntity(retType, resp.body)
|
|
||||||
if err != nil {
|
|
||||||
return nil, contToken, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return retEntries, contToken, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// InsertEntity inserts an entity in the specified table.
|
|
||||||
// The function fails if there is an entity with the same
|
|
||||||
// PartitionKey and RowKey in the table.
|
|
||||||
func (c *TableServiceClient) InsertEntity(table AzureTable, entity TableEntity) error {
|
|
||||||
sc, err := c.execTable(table, entity, false, http.MethodPost)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return checkRespCode(sc, []int{http.StatusCreated})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *TableServiceClient) execTable(table AzureTable, entity TableEntity, specifyKeysInURL bool, method string) (int, error) {
|
|
||||||
uri := c.client.getEndpoint(tableServiceName, pathForTable(table), url.Values{})
|
|
||||||
if specifyKeysInURL {
|
|
||||||
uri += fmt.Sprintf("(PartitionKey='%s',RowKey='%s')", url.QueryEscape(entity.PartitionKey()), url.QueryEscape(entity.RowKey()))
|
|
||||||
}
|
|
||||||
|
|
||||||
headers := c.getStandardHeaders()
|
|
||||||
|
|
||||||
var buf bytes.Buffer
|
|
||||||
|
|
||||||
if err := injectPartitionAndRowKeys(entity, &buf); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
headers["Content-Length"] = fmt.Sprintf("%d", buf.Len())
|
|
||||||
|
|
||||||
resp, err := c.client.execInternalJSON(method, uri, headers, &buf, c.auth)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
defer resp.body.Close()
|
|
||||||
|
|
||||||
return resp.statusCode, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateEntity updates the contents of an entity with the
|
|
||||||
// one passed as parameter. The function fails if there is no entity
|
|
||||||
// with the same PartitionKey and RowKey in the table.
|
|
||||||
func (c *TableServiceClient) UpdateEntity(table AzureTable, entity TableEntity) error {
|
|
||||||
sc, err := c.execTable(table, entity, true, http.MethodPut)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return checkRespCode(sc, []int{http.StatusNoContent})
|
|
||||||
}
|
|
||||||
|
|
||||||
// MergeEntity merges the contents of an entity with the
|
|
||||||
// one passed as parameter.
|
|
||||||
// The function fails if there is no entity
|
|
||||||
// with the same PartitionKey and RowKey in the table.
|
|
||||||
func (c *TableServiceClient) MergeEntity(table AzureTable, entity TableEntity) error {
|
|
||||||
sc, err := c.execTable(table, entity, true, "MERGE")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return checkRespCode(sc, []int{http.StatusNoContent})
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteEntityWithoutCheck deletes the entity matching by
|
|
||||||
// PartitionKey and RowKey. There is no check on IfMatch
|
|
||||||
// parameter so the entity is always deleted.
|
|
||||||
// The function fails if there is no entity
|
|
||||||
// with the same PartitionKey and RowKey in the table.
|
|
||||||
func (c *TableServiceClient) DeleteEntityWithoutCheck(table AzureTable, entity TableEntity) error {
|
|
||||||
return c.DeleteEntity(table, entity, "*")
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteEntity deletes the entity matching by
|
|
||||||
// PartitionKey, RowKey and ifMatch field.
|
|
||||||
// The function fails if there is no entity
|
|
||||||
// with the same PartitionKey and RowKey in the table or
|
|
||||||
// the ifMatch is different.
|
|
||||||
func (c *TableServiceClient) DeleteEntity(table AzureTable, entity TableEntity, ifMatch string) error {
|
|
||||||
uri := c.client.getEndpoint(tableServiceName, pathForTable(table), url.Values{})
|
|
||||||
uri += fmt.Sprintf("(PartitionKey='%s',RowKey='%s')", url.QueryEscape(entity.PartitionKey()), url.QueryEscape(entity.RowKey()))
|
|
||||||
|
|
||||||
headers := c.getStandardHeaders()
|
|
||||||
|
|
||||||
headers["Content-Length"] = "0"
|
|
||||||
headers["If-Match"] = ifMatch
|
|
||||||
|
|
||||||
resp, err := c.client.execInternalJSON(http.MethodDelete, uri, headers, nil, c.auth)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer resp.body.Close()
|
|
||||||
|
|
||||||
if err := checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// InsertOrReplaceEntity inserts an entity in the specified table
|
|
||||||
// or replaced the existing one.
|
|
||||||
func (c *TableServiceClient) InsertOrReplaceEntity(table AzureTable, entity TableEntity) error {
|
|
||||||
sc, err := c.execTable(table, entity, true, http.MethodPut)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return checkRespCode(sc, []int{http.StatusNoContent})
|
|
||||||
}
|
|
||||||
|
|
||||||
// InsertOrMergeEntity inserts an entity in the specified table
|
|
||||||
// or merges the existing one.
|
|
||||||
func (c *TableServiceClient) InsertOrMergeEntity(table AzureTable, entity TableEntity) error {
|
|
||||||
sc, err := c.execTable(table, entity, true, "MERGE")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return checkRespCode(sc, []int{http.StatusNoContent})
|
|
||||||
}
|
|
||||||
|
|
||||||
func injectPartitionAndRowKeys(entity TableEntity, buf *bytes.Buffer) error {
|
|
||||||
if err := json.NewEncoder(buf).Encode(entity); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
dec := make(map[string]interface{})
|
|
||||||
if err := json.NewDecoder(buf).Decode(&dec); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Inject PartitionKey and RowKey
|
|
||||||
dec[partitionKeyNode] = entity.PartitionKey()
|
|
||||||
dec[rowKeyNode] = entity.RowKey()
|
|
||||||
|
|
||||||
// Remove tagged fields
|
|
||||||
// The tag is defined in the const section
|
|
||||||
// This is useful to avoid storing the PartitionKey and RowKey twice.
|
|
||||||
numFields := reflect.ValueOf(entity).Elem().NumField()
|
|
||||||
for i := 0; i < numFields; i++ {
|
|
||||||
f := reflect.ValueOf(entity).Elem().Type().Field(i)
|
|
||||||
|
|
||||||
if f.Tag.Get(tag) == tagIgnore {
|
|
||||||
// we must look for its JSON name in the dictionary
|
|
||||||
// as the user can rename it using a tag
|
|
||||||
jsonName := f.Name
|
|
||||||
if f.Tag.Get("json") != "" {
|
|
||||||
jsonName = f.Tag.Get("json")
|
|
||||||
}
|
|
||||||
delete(dec, jsonName)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
buf.Reset()
|
|
||||||
|
|
||||||
if err := json.NewEncoder(buf).Encode(&dec); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func deserializeEntity(retType reflect.Type, reader io.Reader) ([]TableEntity, error) {
|
|
||||||
buf := new(bytes.Buffer)
|
|
||||||
|
|
||||||
var ret getTableEntriesResponse
|
|
||||||
if err := json.NewDecoder(reader).Decode(&ret); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
tEntries := make([]TableEntity, len(ret.Elements))
|
|
||||||
|
|
||||||
for i, entry := range ret.Elements {
|
|
||||||
|
|
||||||
buf.Reset()
|
|
||||||
if err := json.NewEncoder(buf).Encode(entry); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
dec := make(map[string]interface{})
|
|
||||||
if err := json.NewDecoder(buf).Decode(&dec); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var pKey, rKey string
|
|
||||||
// strip pk and rk
|
|
||||||
for key, val := range dec {
|
|
||||||
switch key {
|
|
||||||
case partitionKeyNode:
|
|
||||||
pKey = val.(string)
|
|
||||||
case rowKeyNode:
|
|
||||||
rKey = val.(string)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
delete(dec, partitionKeyNode)
|
|
||||||
delete(dec, rowKeyNode)
|
|
||||||
|
|
||||||
buf.Reset()
|
|
||||||
if err := json.NewEncoder(buf).Encode(dec); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create a empty retType instance
|
|
||||||
tEntries[i] = reflect.New(retType.Elem()).Interface().(TableEntity)
|
|
||||||
// Popolate it with the values
|
|
||||||
if err := json.NewDecoder(buf).Decode(&tEntries[i]); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reset PartitionKey and RowKey
|
|
||||||
if err := tEntries[i].SetPartitionKey(pKey); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err := tEntries[i].SetRowKey(rKey); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return tEntries, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func extractContinuationTokenFromHeaders(h http.Header) *ContinuationToken {
|
|
||||||
ct := ContinuationToken{h.Get(continuationTokenPartitionKeyHeader), h.Get(continuationTokenRowHeader)}
|
|
||||||
|
|
||||||
if ct.NextPartitionKey != "" && ct.NextRowKey != "" {
|
|
||||||
return &ct
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
|
@ -1,20 +0,0 @@
|
||||||
package storage
|
|
||||||
|
|
||||||
// TableServiceClient contains operations for Microsoft Azure Table Storage
|
|
||||||
// Service.
|
|
||||||
type TableServiceClient struct {
|
|
||||||
client Client
|
|
||||||
auth authentication
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetServiceProperties gets the properties of your storage account's table service.
|
|
||||||
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-table-service-properties
|
|
||||||
func (c *TableServiceClient) GetServiceProperties() (*ServiceProperties, error) {
|
|
||||||
return c.client.getServiceProperties(tableServiceName, c.auth)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetServiceProperties sets the properties of your storage account's table service.
|
|
||||||
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-table-service-properties
|
|
||||||
func (c *TableServiceClient) SetServiceProperties(props ServiceProperties) error {
|
|
||||||
return c.client.setServiceProperties(props, tableServiceName, c.auth)
|
|
||||||
}
|
|
|
@ -1,85 +0,0 @@
|
||||||
package storage
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"crypto/hmac"
|
|
||||||
"crypto/sha256"
|
|
||||||
"encoding/base64"
|
|
||||||
"encoding/xml"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"reflect"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (c Client) computeHmac256(message string) string {
|
|
||||||
h := hmac.New(sha256.New, c.accountKey)
|
|
||||||
h.Write([]byte(message))
|
|
||||||
return base64.StdEncoding.EncodeToString(h.Sum(nil))
|
|
||||||
}
|
|
||||||
|
|
||||||
func currentTimeRfc1123Formatted() string {
|
|
||||||
return timeRfc1123Formatted(time.Now().UTC())
|
|
||||||
}
|
|
||||||
|
|
||||||
func timeRfc1123Formatted(t time.Time) string {
|
|
||||||
return t.Format(http.TimeFormat)
|
|
||||||
}
|
|
||||||
|
|
||||||
func mergeParams(v1, v2 url.Values) url.Values {
|
|
||||||
out := url.Values{}
|
|
||||||
for k, v := range v1 {
|
|
||||||
out[k] = v
|
|
||||||
}
|
|
||||||
for k, v := range v2 {
|
|
||||||
vals, ok := out[k]
|
|
||||||
if ok {
|
|
||||||
vals = append(vals, v...)
|
|
||||||
out[k] = vals
|
|
||||||
} else {
|
|
||||||
out[k] = v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
func prepareBlockListRequest(blocks []Block) string {
|
|
||||||
s := `<?xml version="1.0" encoding="utf-8"?><BlockList>`
|
|
||||||
for _, v := range blocks {
|
|
||||||
s += fmt.Sprintf("<%s>%s</%s>", v.Status, v.ID, v.Status)
|
|
||||||
}
|
|
||||||
s += `</BlockList>`
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
func xmlUnmarshal(body io.Reader, v interface{}) error {
|
|
||||||
data, err := ioutil.ReadAll(body)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return xml.Unmarshal(data, v)
|
|
||||||
}
|
|
||||||
|
|
||||||
func xmlMarshal(v interface{}) (io.Reader, int, error) {
|
|
||||||
b, err := xml.Marshal(v)
|
|
||||||
if err != nil {
|
|
||||||
return nil, 0, err
|
|
||||||
}
|
|
||||||
return bytes.NewReader(b), len(b), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func headersFromStruct(v interface{}) map[string]string {
|
|
||||||
headers := make(map[string]string)
|
|
||||||
value := reflect.ValueOf(v)
|
|
||||||
for i := 0; i < value.NumField(); i++ {
|
|
||||||
key := value.Type().Field(i).Tag.Get("header")
|
|
||||||
val := value.Field(i).String()
|
|
||||||
if key != "" && val != "" {
|
|
||||||
headers[key] = val
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return headers
|
|
||||||
}
|
|
|
@ -0,0 +1,77 @@
|
||||||
|
package autorest
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NOTE: the GetBody() method on the http.Request object is new in 1.8.
|
||||||
|
// at present we support 1.7 and 1.8 so for now the branches specific
|
||||||
|
// to 1.8 have been commented out.
|
||||||
|
|
||||||
|
// RetriableRequest provides facilities for retrying an HTTP request.
|
||||||
|
type RetriableRequest struct {
|
||||||
|
req *http.Request
|
||||||
|
//rc io.ReadCloser
|
||||||
|
br *bytes.Reader
|
||||||
|
reset bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewRetriableRequest returns a wrapper around an HTTP request that support retry logic.
|
||||||
|
func NewRetriableRequest(req *http.Request) *RetriableRequest {
|
||||||
|
return &RetriableRequest{req: req}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Request returns the wrapped HTTP request.
|
||||||
|
func (rr *RetriableRequest) Request() *http.Request {
|
||||||
|
return rr.req
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prepare signals that the request is about to be sent.
|
||||||
|
func (rr *RetriableRequest) Prepare() (err error) {
|
||||||
|
// preserve the request body; this is to support retry logic as
|
||||||
|
// the underlying transport will always close the reqeust body
|
||||||
|
if rr.req.Body != nil {
|
||||||
|
if rr.reset {
|
||||||
|
/*if rr.rc != nil {
|
||||||
|
rr.req.Body = rr.rc
|
||||||
|
} else */if rr.br != nil {
|
||||||
|
_, err = rr.br.Seek(0, io.SeekStart)
|
||||||
|
}
|
||||||
|
rr.reset = false
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
/*if rr.req.GetBody != nil {
|
||||||
|
// this will allow us to preserve the body without having to
|
||||||
|
// make a copy. note we need to do this on each iteration
|
||||||
|
rr.rc, err = rr.req.GetBody()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else */if rr.br == nil {
|
||||||
|
// fall back to making a copy (only do this once)
|
||||||
|
b := []byte{}
|
||||||
|
if rr.req.ContentLength > 0 {
|
||||||
|
b = make([]byte, rr.req.ContentLength)
|
||||||
|
_, err = io.ReadFull(rr.req.Body, b)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
b, err = ioutil.ReadAll(rr.req.Body)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
rr.br = bytes.NewReader(b)
|
||||||
|
rr.req.Body = ioutil.NopCloser(rr.br)
|
||||||
|
}
|
||||||
|
// indicates that the request body needs to be reset
|
||||||
|
rr.reset = true
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
|
@ -1,9 +1,7 @@
|
||||||
package autorest
|
package autorest
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
|
||||||
"log"
|
"log"
|
||||||
"math"
|
"math"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
@ -175,8 +173,13 @@ func DoPollForStatusCodes(duration time.Duration, delay time.Duration, codes ...
|
||||||
func DoRetryForAttempts(attempts int, backoff time.Duration) SendDecorator {
|
func DoRetryForAttempts(attempts int, backoff time.Duration) SendDecorator {
|
||||||
return func(s Sender) Sender {
|
return func(s Sender) Sender {
|
||||||
return SenderFunc(func(r *http.Request) (resp *http.Response, err error) {
|
return SenderFunc(func(r *http.Request) (resp *http.Response, err error) {
|
||||||
|
rr := NewRetriableRequest(r)
|
||||||
for attempt := 0; attempt < attempts; attempt++ {
|
for attempt := 0; attempt < attempts; attempt++ {
|
||||||
resp, err = s.Do(r)
|
err = rr.Prepare()
|
||||||
|
if err != nil {
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
resp, err = s.Do(rr.Request())
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return resp, err
|
return resp, err
|
||||||
}
|
}
|
||||||
|
@ -194,19 +197,15 @@ func DoRetryForAttempts(attempts int, backoff time.Duration) SendDecorator {
|
||||||
func DoRetryForStatusCodes(attempts int, backoff time.Duration, codes ...int) SendDecorator {
|
func DoRetryForStatusCodes(attempts int, backoff time.Duration, codes ...int) SendDecorator {
|
||||||
return func(s Sender) Sender {
|
return func(s Sender) Sender {
|
||||||
return SenderFunc(func(r *http.Request) (resp *http.Response, err error) {
|
return SenderFunc(func(r *http.Request) (resp *http.Response, err error) {
|
||||||
b := []byte{}
|
rr := NewRetriableRequest(r)
|
||||||
if r.Body != nil {
|
|
||||||
b, err = ioutil.ReadAll(r.Body)
|
|
||||||
if err != nil {
|
|
||||||
return resp, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Increment to add the first call (attempts denotes number of retries)
|
// Increment to add the first call (attempts denotes number of retries)
|
||||||
attempts++
|
attempts++
|
||||||
for attempt := 0; attempt < attempts; attempt++ {
|
for attempt := 0; attempt < attempts; attempt++ {
|
||||||
r.Body = ioutil.NopCloser(bytes.NewBuffer(b))
|
err = rr.Prepare()
|
||||||
resp, err = s.Do(r)
|
if err != nil {
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
resp, err = s.Do(rr.Request())
|
||||||
if err != nil || !ResponseHasStatusCode(resp, codes...) {
|
if err != nil || !ResponseHasStatusCode(resp, codes...) {
|
||||||
return resp, err
|
return resp, err
|
||||||
}
|
}
|
||||||
|
@ -224,9 +223,14 @@ func DoRetryForStatusCodes(attempts int, backoff time.Duration, codes ...int) Se
|
||||||
func DoRetryForDuration(d time.Duration, backoff time.Duration) SendDecorator {
|
func DoRetryForDuration(d time.Duration, backoff time.Duration) SendDecorator {
|
||||||
return func(s Sender) Sender {
|
return func(s Sender) Sender {
|
||||||
return SenderFunc(func(r *http.Request) (resp *http.Response, err error) {
|
return SenderFunc(func(r *http.Request) (resp *http.Response, err error) {
|
||||||
|
rr := NewRetriableRequest(r)
|
||||||
end := time.Now().Add(d)
|
end := time.Now().Add(d)
|
||||||
for attempt := 0; time.Now().Before(end); attempt++ {
|
for attempt := 0; time.Now().Before(end); attempt++ {
|
||||||
resp, err = s.Do(r)
|
err = rr.Prepare()
|
||||||
|
if err != nil {
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
resp, err = s.Do(rr.Request())
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return resp, err
|
return resp, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -55,10 +55,10 @@
|
||||||
"revisionTime": "2017-06-16T14:00:43Z"
|
"revisionTime": "2017-06-16T14:00:43Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "upfBL0B4wGsgM+xg1QhZvFgO7PU=",
|
"checksumSHA1": "ZQkLlysjzw2rgUe/U0Zz7oMvjw8=",
|
||||||
"path": "github.com/Azure/azure-storage-go",
|
"path": "github.com/Azure/azure-sdk-for-go/storage",
|
||||||
"revision": "32cfbe17a139c17f84be16bdf8f9c45c840a046b",
|
"revision": "720c2a183bcccbe8a90f3159b63e1e2e5d4c9911",
|
||||||
"revisionTime": "2017-05-08T19:00:56Z"
|
"revisionTime": "2017-06-14T20:08:54Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "23FJUX+AInYeEM2hoUMvYZtXZd4=",
|
"checksumSHA1": "23FJUX+AInYeEM2hoUMvYZtXZd4=",
|
||||||
|
|
Loading…
Reference in New Issue