Merge branch 'master' into ui-input-styles

This commit is contained in:
Joshua Ogle 2018-07-09 13:22:23 -06:00 committed by GitHub
commit 28c782bd70
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
1725 changed files with 191702 additions and 77301 deletions

View File

@ -1,7 +1,14 @@
## Next
IMPROVEMENTS:
* core: A `max_request_size` parameter can now be set per-listener to adjust
the maximum allowed size per request [GH-4824]
BUG FIXES:
* core: Fix returning 500 instead of 503 if a rekey is attempted when Vault is
sealed [GH-4874]
* secrets/database: Fix panic during DB creds revocation [GH-4846]
## 0.10.3 (June 20th, 2018)

View File

@ -35,7 +35,7 @@ import (
auditSocket "github.com/hashicorp/vault/builtin/audit/socket"
auditSyslog "github.com/hashicorp/vault/builtin/audit/syslog"
credAzure "github.com/hashicorp/vault-plugin-auth-azure/plugin"
credAzure "github.com/hashicorp/vault-plugin-auth-azure"
credCentrify "github.com/hashicorp/vault-plugin-auth-centrify"
credGcp "github.com/hashicorp/vault-plugin-auth-gcp/plugin"
credKube "github.com/hashicorp/vault-plugin-auth-kubernetes"

View File

@ -182,8 +182,8 @@ func (t TableFormatter) printWarnings(ui cli.Ui, secret *api.Secret) {
ui.Warn("WARNING! The following warnings were returned from Vault:\n")
for _, warning := range secret.Warnings {
ui.Warn(wrapAtLengthWithPadding(fmt.Sprintf("* %s", warning), 2))
ui.Warn("")
}
ui.Warn("")
}
}

View File

@ -395,7 +395,7 @@ func (c *OperatorGenerateRootCommand) provide(client *api.Client, key string, dr
nonce = status.Nonce
w := getWriterFromUI(c.UI)
fmt.Fprintf(w, "Root generation operation nonce: %s\n", nonce)
fmt.Fprintf(w, "Operation nonce: %s\n", nonce)
fmt.Fprintf(w, "Unseal Key (will be hidden): ")
key, err = password.Read(os.Stdin)
fmt.Fprintf(w, "\n")
@ -489,10 +489,10 @@ func (c *OperatorGenerateRootCommand) printStatus(status *api.GenerateRootStatus
out = append(out, fmt.Sprintf("PGP Fingerprint | %s", status.PGPFingerprint))
}
switch {
case status.EncodedRootToken != "":
out = append(out, fmt.Sprintf("Root Token | %s", status.EncodedRootToken))
case status.EncodedToken != "":
out = append(out, fmt.Sprintf("Root Token | %s", status.EncodedToken))
out = append(out, fmt.Sprintf("Encoded Token | %s", status.EncodedToken))
case status.EncodedRootToken != "":
out = append(out, fmt.Sprintf("Encoded Root Token | %s", status.EncodedRootToken))
}
output := columnOutput(out, nil)

View File

@ -346,7 +346,7 @@ func TestOperatorGenerateRootCommand_Run(t *testing.T) {
t.Errorf("expected %d to be %d", code, exp)
}
reToken := regexp.MustCompile(`Root Token\s+(.+)`)
reToken := regexp.MustCompile(`Encoded Token\s+(.+)`)
combined := ui.OutputWriter.String() + ui.ErrorWriter.String()
match := reToken.FindAllStringSubmatch(combined, -1)
if len(match) < 1 || len(match[0]) < 2 {
@ -421,7 +421,7 @@ func TestOperatorGenerateRootCommand_Run(t *testing.T) {
t.Errorf("expected %d to be %d", code, exp)
}
reToken := regexp.MustCompile(`Root Token\s+(.+)`)
reToken := regexp.MustCompile(`Encoded Token\s+(.+)`)
combined := ui.OutputWriter.String() + ui.ErrorWriter.String()
match := reToken.FindAllStringSubmatch(combined, -1)
if len(match) < 1 || len(match[0]) < 2 {

View File

@ -97,7 +97,8 @@ type ServerCommand struct {
type ServerListener struct {
net.Listener
config map[string]interface{}
config map[string]interface{}
maxRequestSize int64
}
func (c *ServerCommand) Synopsis() string {
@ -689,11 +690,6 @@ CLUSTER_SYNTHESIS_COMPLETE:
return 1
}
lns = append(lns, ServerListener{
Listener: ln,
config: lnConfig.Config,
})
if reloadFunc != nil {
relSlice := (*c.reloadFuncs)["listener|"+lnConfig.Type]
relSlice = append(relSlice, reloadFunc)
@ -728,6 +724,26 @@ CLUSTER_SYNTHESIS_COMPLETE:
props["cluster address"] = addr
}
var maxRequestSize int64 = vaulthttp.DefaultMaxRequestSize
if valRaw, ok := lnConfig.Config["max_request_size"]; ok {
val, err := parseutil.ParseInt(valRaw)
if err != nil {
c.UI.Error(fmt.Sprintf("Could not parse max_request_size value %v", valRaw))
return 1
}
if val >= 0 {
maxRequestSize = val
}
}
props["max_request_size"] = fmt.Sprintf("%d", maxRequestSize)
lns = append(lns, ServerListener{
Listener: ln,
config: lnConfig.Config,
maxRequestSize: maxRequestSize,
})
// Store the listener props for output later
key := fmt.Sprintf("listener %d", i+1)
propsList := make([]string, 0, len(props))
@ -792,7 +808,9 @@ CLUSTER_SYNTHESIS_COMPLETE:
// This needs to happen before we first unseal, so before we trigger dev
// mode if it's set
core.SetClusterListenerAddrs(clusterAddrs)
core.SetClusterHandler(vaulthttp.Handler(core))
core.SetClusterHandler(vaulthttp.Handler(&vault.HandlerProperties{
Core: core,
}))
err = core.UnsealWithStoredKeys(context.Background())
if err != nil {
@ -925,7 +943,10 @@ CLUSTER_SYNTHESIS_COMPLETE:
// Initialize the HTTP servers
for _, ln := range lns {
handler := vaulthttp.Handler(core)
handler := vaulthttp.Handler(&vault.HandlerProperties{
Core: core,
MaxRequestSize: ln.maxRequestSize,
})
// We perform validation on the config earlier, we can just cast here
if _, ok := ln.config["x_forwarded_for_authorized_addrs"]; ok {
@ -1195,7 +1216,9 @@ func (c *ServerCommand) enableThreeNodeDevCluster(base *vault.CoreConfig, info m
c.UI.Output("")
for _, core := range testCluster.Cores {
core.Server.Handler = vaulthttp.Handler(core.Core)
core.Server.Handler = vaulthttp.Handler(&vault.HandlerProperties{
Core: core.Core,
})
core.SetClusterHandler(core.Server.Handler)
}

View File

@ -804,6 +804,7 @@ func parseListeners(result *Config, list *ast.ObjectList) error {
"x_forwarded_for_reject_not_authorized",
"x_forwarded_for_reject_not_present",
"infrastructure",
"max_request_size",
"node_id",
"proxy_protocol_behavior",
"proxy_protocol_authorized_addrs",

View File

@ -10,6 +10,7 @@ import (
"crypto/sha1"
"crypto/x509"
"encoding/pem"
"errors"
"fmt"
"math/big"
"strconv"
@ -273,3 +274,28 @@ func ComparePublicKeys(key1Iface, key2Iface crypto.PublicKey) (bool, error) {
return false, fmt.Errorf("cannot compare key with type %T", key1Iface)
}
}
// PasrsePublicKeyPEM is used to parse RSA and ECDSA public keys from PEMs
func ParsePublicKeyPEM(data []byte) (interface{}, error) {
block, data := pem.Decode(data)
if block != nil {
var rawKey interface{}
var err error
if rawKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil {
if cert, err := x509.ParseCertificate(block.Bytes); err == nil {
rawKey = cert.PublicKey
} else {
return nil, err
}
}
if rsaPublicKey, ok := rawKey.(*rsa.PublicKey); ok {
return rsaPublicKey, nil
}
if ecPublicKey, ok := rawKey.(*ecdsa.PublicKey); ok {
return ecPublicKey, nil
}
}
return nil, errors.New("data does not contain any valid RSA or ECDSA public keys")
}

View File

@ -4,6 +4,9 @@ import (
"bytes"
"crypto/tls"
"crypto/x509"
"errors"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
@ -56,11 +59,30 @@ func GenerateForwardedHTTPRequest(req *http.Request, addr string) (*http.Request
}
func GenerateForwardedRequest(req *http.Request) (*Request, error) {
var reader io.Reader = req.Body
ctx := req.Context()
maxRequestSize := ctx.Value("max_request_size")
if maxRequestSize != nil {
max, ok := maxRequestSize.(int64)
if !ok {
return nil, errors.New("could not parse max_request_size from request context")
}
if max > 0 {
reader = io.LimitReader(req.Body, max)
}
}
body, err := ioutil.ReadAll(reader)
if err != nil {
return nil, err
}
fq := Request{
Method: req.Method,
HeaderEntries: make(map[string]*HeaderEntry, len(req.Header)),
Host: req.Host,
RemoteAddr: req.RemoteAddr,
Body: body,
}
reqURL := req.URL
@ -80,13 +102,6 @@ func GenerateForwardedRequest(req *http.Request) (*Request, error) {
}
}
buf := bytes.NewBuffer(nil)
_, err := buf.ReadFrom(req.Body)
if err != nil {
return nil, err
}
fq.Body = buf.Bytes()
if req.TLS != nil && req.TLS.PeerCertificates != nil && len(req.TLS.PeerCertificates) > 0 {
fq.PeerCertificates = make([][]byte, len(req.TLS.PeerCertificates))
for i, cert := range req.TLS.PeerCertificates {

View File

@ -24,7 +24,7 @@ func TestHandler_XForwardedFor(t *testing.T) {
// First: test reject not present
t.Run("reject_not_present", func(t *testing.T) {
t.Parallel()
testHandler := func(c *vault.Core) http.Handler {
testHandler := func(props *vault.HandlerProperties) http.Handler {
origHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
w.Write([]byte(r.RemoteAddr))
@ -69,7 +69,7 @@ func TestHandler_XForwardedFor(t *testing.T) {
// Next: test allow unauth
t.Run("allow_unauth", func(t *testing.T) {
t.Parallel()
testHandler := func(c *vault.Core) http.Handler {
testHandler := func(props *vault.HandlerProperties) http.Handler {
origHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
w.Write([]byte(r.RemoteAddr))
@ -106,7 +106,7 @@ func TestHandler_XForwardedFor(t *testing.T) {
// Next: test fail unauth
t.Run("fail_unauth", func(t *testing.T) {
t.Parallel()
testHandler := func(c *vault.Core) http.Handler {
testHandler := func(props *vault.HandlerProperties) http.Handler {
origHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
w.Write([]byte(r.RemoteAddr))
@ -140,7 +140,7 @@ func TestHandler_XForwardedFor(t *testing.T) {
// Next: test bad hops (too many)
t.Run("too_many_hops", func(t *testing.T) {
t.Parallel()
testHandler := func(c *vault.Core) http.Handler {
testHandler := func(props *vault.HandlerProperties) http.Handler {
origHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
w.Write([]byte(r.RemoteAddr))
@ -174,7 +174,7 @@ func TestHandler_XForwardedFor(t *testing.T) {
// Next: test picking correct value
t.Run("correct_hop_skipping", func(t *testing.T) {
t.Parallel()
testHandler := func(c *vault.Core) http.Handler {
testHandler := func(props *vault.HandlerProperties) http.Handler {
origHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
w.Write([]byte(r.RemoteAddr))
@ -211,7 +211,7 @@ func TestHandler_XForwardedFor(t *testing.T) {
// Next: multi-header approach
t.Run("correct_hop_skipping_multi_header", func(t *testing.T) {
t.Parallel()
testHandler := func(c *vault.Core) http.Handler {
testHandler := func(props *vault.HandlerProperties) http.Handler {
origHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
w.Write([]byte(r.RemoteAddr))

View File

@ -1,7 +1,9 @@
package http
import (
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net"
@ -52,10 +54,11 @@ const (
// soft-mandatory Sentinel policies.
PolicyOverrideHeaderName = "X-Vault-Policy-Override"
// MaxRequestSize is the maximum accepted request size. This is to prevent
// a denial of service attack where no Content-Length is provided and the server
// is fed ever more data until it exhausts memory.
MaxRequestSize = 32 * 1024 * 1024
// DefaultMaxRequestSize is the default maximum accepted request size. This
// is to prevent a denial of service attack where no Content-Length is
// provided and the server is fed ever more data until it exhausts memory.
// Can be overridden per listener.
DefaultMaxRequestSize = 32 * 1024 * 1024
)
var (
@ -67,7 +70,9 @@ var (
// Handler returns an http.Handler for the API. This can be used on
// its own to mount the Vault API within another web server.
func Handler(core *vault.Core) http.Handler {
func Handler(props *vault.HandlerProperties) http.Handler {
core := props.Core
// Create the muxer to handle the actual endpoints
mux := http.NewServeMux()
mux.Handle("/v1/sys/init", handleSysInit(core))
@ -108,7 +113,7 @@ func Handler(core *vault.Core) http.Handler {
// Wrap the help wrapped handler with another layer with a generic
// handler
genericWrappedHandler := wrapGenericHandler(corsWrappedHandler)
genericWrappedHandler := wrapGenericHandler(corsWrappedHandler, props.MaxRequestSize)
// Wrap the handler with PrintablePathCheckHandler to check for non-printable
// characters in the request path.
@ -120,12 +125,20 @@ func Handler(core *vault.Core) http.Handler {
// wrapGenericHandler wraps the handler with an extra layer of handler where
// tasks that should be commonly handled for all the requests and/or responses
// are performed.
func wrapGenericHandler(h http.Handler) http.Handler {
func wrapGenericHandler(h http.Handler, maxRequestSize int64) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Set the Cache-Control header for all the responses returned
// by Vault
w.Header().Set("Cache-Control", "no-store")
h.ServeHTTP(w, r)
// Add a context and put the request limit for this handler in it
if maxRequestSize > 0 {
ctx := context.WithValue(r.Context(), "max_request_size", maxRequestSize)
h.ServeHTTP(w, r.WithContext(ctx))
} else {
h.ServeHTTP(w, r)
}
return
})
}
@ -326,8 +339,19 @@ func (fs *UIAssetWrapper) Open(name string) (http.File, error) {
func parseRequest(r *http.Request, w http.ResponseWriter, out interface{}) error {
// Limit the maximum number of bytes to MaxRequestSize to protect
// against an indefinite amount of data being read.
limit := http.MaxBytesReader(w, r.Body, MaxRequestSize)
err := jsonutil.DecodeJSONFromReader(limit, out)
reader := r.Body
ctx := r.Context()
maxRequestSize := ctx.Value("max_request_size")
if maxRequestSize != nil {
max, ok := maxRequestSize.(int64)
if !ok {
return errors.New("could not parse max_request_size from request context")
}
if max > 0 {
reader = http.MaxBytesReader(w, r.Body, max)
}
}
err := jsonutil.DecodeJSONFromReader(reader, out)
if err != nil && err != io.EOF {
return errwrap.Wrapf("failed to parse JSON input: {{err}}", err)
}
@ -422,13 +446,20 @@ func respondStandby(core *vault.Core, w http.ResponseWriter, reqURL *url.URL) {
// Request the leader address
_, redirectAddr, _, err := core.Leader()
if err != nil {
if err == vault.ErrHANotEnabled {
// Standalone node, serve 503
err = errors.New("node is not active")
respondError(w, http.StatusServiceUnavailable, err)
return
}
respondError(w, http.StatusInternalServerError, err)
return
}
// If there is no leader, generate a 503 error
if redirectAddr == "" {
err = fmt.Errorf("no active Vault instance found")
err = errors.New("no active Vault instance found")
respondError(w, http.StatusServiceUnavailable, err)
return
}

View File

@ -261,7 +261,7 @@ func TestLogical_RequestSizeLimit(t *testing.T) {
// Write a very large object, should fail
resp := testHttpPut(t, token, addr+"/v1/secret/foo", map[string]interface{}{
"data": make([]byte, MaxRequestSize),
"data": make([]byte, DefaultMaxRequestSize),
})
testResponseStatus(t, resp, 413)
}

View File

@ -30,7 +30,10 @@ func TestServerWithListener(tb testing.TB, ln net.Listener, addr string, core *v
// for tests.
mux := http.NewServeMux()
mux.Handle("/_test/auth", http.HandlerFunc(testHandleAuth))
mux.Handle("/", Handler(core))
mux.Handle("/", Handler(&vault.HandlerProperties{
Core: core,
MaxRequestSize: DefaultMaxRequestSize,
}))
server := &http.Server{
Addr: ln.Addr().String(),

View File

@ -24,7 +24,7 @@ export default DS.RESTAdapter.extend({
},
_preRequest(url, options) {
const token = this.get('auth.currentToken');
const token = options.clientToken || this.get('auth.currentToken');
if (token && !options.unauthenticated) {
options.headers = Ember.assign(options.headers || {}, {
'X-Vault-Token': token,

View File

@ -13,7 +13,22 @@ export default ApplicationAdapter.extend({
return 'mounts/auth';
},
findAll() {
findAll(store, type, sinceToken, snapshotRecordArray) {
let isUnauthenticated = Ember.get(snapshotRecordArray || {}, 'adapterOptions.unauthenticated');
if (isUnauthenticated) {
let url = `/${this.urlPrefix()}/internal/ui/mounts`;
return this.ajax(url, 'GET', {
unauthenticated: true,
})
.then(result => {
return {
data: result.data.auth,
};
})
.catch(() => {
return [];
});
}
return this.ajax(this.url(), 'GET').catch(e => {
if (e instanceof DS.AdapterError) {
Ember.set(e, 'policyPath', 'sys/auth');

View File

@ -15,9 +15,9 @@ export default ApplicationAdapter.extend({
},
toolAction(action, data, options = {}) {
const { wrapTTL } = options;
const { wrapTTL, clientToken } = options;
const url = this.toolUrlFor(action);
const ajaxOptions = wrapTTL ? { data, wrapTTL } : { data };
const ajaxOptions = wrapTTL ? { data, wrapTTL, clientToken } : { data, clientToken };
return this.ajax(url, 'POST', ajaxOptions);
},
});

View File

@ -1,5 +1,6 @@
import Ember from 'ember';
import { supportedAuthBackends } from 'vault/helpers/supported-auth-backends';
import { task } from 'ember-concurrency';
const BACKENDS = supportedAuthBackends();
const { computed, inject, get } = Ember;
@ -11,57 +12,125 @@ const DEFAULTS = {
export default Ember.Component.extend(DEFAULTS, {
classNames: ['auth-form'],
routing: inject.service('-routing'),
router: inject.service(),
auth: inject.service(),
flashMessages: inject.service(),
store: inject.service(),
csp: inject.service('csp-event'),
// set during init and potentially passed in via a query param
selectedAuth: null,
methods: null,
cluster: null,
redirectTo: null,
didRender() {
this._super(...arguments);
// on very narrow viewports the active tab may be overflowed, so we scroll it into view here
this.$('li.is-active').get(0).scrollIntoView();
let activeEle = this.element.querySelector('li.is-active');
if (activeEle) {
activeEle.scrollIntoView();
}
// this is here because we're changing the `with` attr and there's no way to short-circuit rendering,
// so we'll just nav -> get new attrs -> re-render
if (!this.get('selectedAuth') || (this.get('selectedAuth') && !this.get('selectedAuthBackend'))) {
this.get('router').replaceWith('vault.cluster.auth', this.get('cluster.name'), {
queryParams: {
with: this.firstMethod(),
wrappedToken: this.get('wrappedToken'),
},
});
}
},
firstMethod() {
let firstMethod = this.get('methodsToShow.firstObject');
// prefer backends with a path over those with a type
return get(firstMethod, 'path') || get(firstMethod, 'type');
},
didReceiveAttrs() {
this._super(...arguments);
let newMethod = this.get('selectedAuthType');
let oldMethod = this.get('oldSelectedAuthType');
let token = this.get('wrappedToken');
let newMethod = this.get('selectedAuth');
let oldMethod = this.get('oldSelectedAuth');
if (oldMethod && oldMethod !== newMethod) {
this.resetDefaults();
}
this.set('oldSelectedAuthType', newMethod);
this.set('oldSelectedAuth', newMethod);
if (token) {
this.get('unwrapToken').perform(token);
}
},
resetDefaults() {
this.setProperties(DEFAULTS);
},
cluster: null,
redirectTo: null,
selectedAuthIsPath: computed.match('selectedAuth', /\/$/),
selectedAuthBackend: Ember.computed(
'allSupportedMethods',
'selectedAuth',
'selectedAuthIsPath',
function() {
let methods = this.get('allSupportedMethods');
let keyIsPath = this.get('selectedAuthIsPath');
let findKey = keyIsPath ? 'path' : 'type';
return methods.findBy(findKey, this.get('selectedAuth'));
}
),
selectedAuthType: 'token',
selectedAuthBackend: Ember.computed('selectedAuthType', function() {
return BACKENDS.findBy('type', this.get('selectedAuthType'));
}),
providerPartialName: Ember.computed('selectedAuthType', function() {
const type = Ember.String.dasherize(this.get('selectedAuthType'));
return `partials/auth-form/${type}`;
providerPartialName: computed('selectedAuthBackend', function() {
let type = this.get('selectedAuthBackend.type') || 'token';
type = type.toLowerCase();
let templateName = Ember.String.dasherize(type);
return `partials/auth-form/${templateName}`;
}),
hasCSPError: computed.alias('csp.connectionViolations.firstObject'),
cspErrorText: `This is a standby Vault node but can't communicate with the active node via request forwarding. Sign in at the active node to use the Vault UI.`,
allSupportedMethods: computed('methodsToShow', 'hasMethodsWithPath', function() {
let hasMethodsWithPath = this.get('hasMethodsWithPath');
let methodsToShow = this.get('methodsToShow');
return hasMethodsWithPath ? methodsToShow.concat(BACKENDS) : methodsToShow;
}),
hasMethodsWithPath: computed('methodsToShow', function() {
return this.get('methodsToShow').isAny('path');
}),
methodsToShow: computed('methods', 'methods.[]', function() {
let methods = this.get('methods') || [];
let shownMethods = methods.filter(m =>
BACKENDS.find(b => get(b, 'type').toLowerCase() === get(m, 'type').toLowerCase())
);
return shownMethods.length ? shownMethods : BACKENDS;
}),
unwrapToken: task(function*(token) {
// will be using the token auth method, so set it here
this.set('selectedAuth', 'token');
let adapter = this.get('store').adapterFor('tools');
try {
let response = yield adapter.toolAction('unwrap', null, { clientToken: token });
this.set('token', response.auth.client_token);
this.send('doSubmit');
} catch (e) {
this.set('error', `Token unwrap failed: ${e.errors[0]}`);
}
}),
handleError(e) {
this.set('loading', false);
let errors = e.errors.map(error => {
if (error.detail) {
return error.detail;
}
return error;
});
this.set('error', `Authentication failed: ${errors.join('.')}`);
},
@ -73,19 +142,22 @@ export default Ember.Component.extend(DEFAULTS, {
error: null,
});
let targetRoute = this.get('redirectTo') || 'vault.cluster';
let backend = this.get('selectedAuthBackend');
let path = this.get('customPath');
let attributes = get(backend, 'formAttributes');
let backend = this.get('selectedAuthBackend') || {};
let path = get(backend, 'path') || this.get('customPath');
let backendMeta = BACKENDS.find(
b => get(b, 'type').toLowerCase() === get(backend, 'type').toLowerCase()
);
let attributes = get(backendMeta, 'formAttributes');
data = Ember.assign(data, this.getProperties(...attributes));
if (this.get('useCustomPath') && path) {
if (get(backend, 'path') || (this.get('useCustomPath') && path)) {
data.path = path;
}
const clusterId = this.get('cluster.id');
this.get('auth').authenticate({ clusterId, backend: get(backend, 'type'), data }).then(
({ isRoot }) => {
this.set('loading', false);
const transition = this.get('routing.router').transitionTo(targetRoute);
const transition = this.get('router').transitionTo(targetRoute);
if (isRoot) {
transition.followRedirects().then(() => {
this.get('flashMessages').warning(

View File

@ -17,6 +17,7 @@ export default Ember.Component.extend({
isFullscreen: false,
console: inject.service(),
router: inject.service(),
store: inject.service(),
inputValue: null,
log: computed.alias('console.log'),
@ -86,6 +87,7 @@ export default Ember.Component.extend({
let route = owner.lookup(`route:${routeName}`);
try {
this.get('store').clearAllDatasets();
yield route.refresh();
this.logAndOutput(null, { type: 'success', content: 'The current screen has been refreshed!' });
} catch (error) {

View File

@ -97,6 +97,11 @@ export default Ember.Component.extend({
this.get('onChange')(path, value);
},
setAndBroadcastBool(path, trueVal, falseVal, value) {
let valueToSet = value === true ? trueVal : falseVal;
this.send('setAndBroadcast', path, valueToSet);
},
codemirrorUpdated(path, value, codemirror) {
codemirror.performLint();
const hasErrors = codemirror.state.lint.marked.length > 0;

View File

@ -0,0 +1,10 @@
import Ember from 'ember';
export default Ember.Controller.extend({
queryParams: [
{
wrappedToken: 'wrapped_token',
},
],
wrappedToken: '',
});

View File

@ -1,11 +1,9 @@
import Ember from 'ember';
import { supportedAuthBackends } from 'vault/helpers/supported-auth-backends';
export default Ember.Controller.extend({
queryParams: ['with'],
with: Ember.computed(function() {
return supportedAuthBackends()[0].type;
}),
vaultController: Ember.inject.controller('vault'),
queryParams: [{ authMethod: 'with' }],
wrappedToken: Ember.computed.alias('vaultController.wrappedToken'),
authMethod: '',
redirectTo: null,
});

View File

@ -3,11 +3,11 @@ import Ember from 'ember';
const { Helper, inject } = Ember;
export default Helper.extend({
routing: inject.service('-routing'),
router: inject.service(),
compute([routeName, ...models], { replace = false }) {
return () => {
const router = this.get('routing.router');
const router = this.get('router');
const method = replace ? router.replaceWith : router.transitionTo;
return method.call(router, routeName, ...models);
};

View File

@ -44,7 +44,10 @@ export default DS.Model.extend({
}),
tuneAttrs: computed(function() {
return expandAttributeMeta(this, ['description', 'config.{defaultLeaseTtl,maxLeaseTtl}']);
return expandAttributeMeta(this, [
'description',
'config.{listingVisibility,defaultLeaseTtl,maxLeaseTtl,auditNonHmacRequestKeys,auditNonHmacResponseKeys,passthroughRequestHeaders}',
]);
}),
//sys/mounts/auth/[auth-path]/tune.
@ -61,12 +64,20 @@ export default DS.Model.extend({
'accessor',
'local',
'sealWrap',
'config.{defaultLeaseTtl,maxLeaseTtl}',
'config.{listingVisibility,defaultLeaseTtl,maxLeaseTtl,auditNonHmacRequestKeys,auditNonHmacResponseKeys,passthroughRequestHeaders}',
],
formFieldGroups: [
{ default: ['type', 'path'] },
{ 'Method Options': ['description', 'local', 'sealWrap', 'config.{defaultLeaseTtl,maxLeaseTtl}'] },
{
'Method Options': [
'description',
'config.listingVisibility',
'local',
'sealWrap',
'config.{defaultLeaseTtl,maxLeaseTtl,auditNonHmacRequestKeys,auditNonHmacResponseKeys,passthroughRequestHeaders}',
],
},
],
attrs: computed('formFields', function() {

View File

@ -10,4 +10,25 @@ export default Fragment.extend({
label: 'Max Lease TTL',
editType: 'ttl',
}),
auditNonHmacRequestKeys: attr({
label: 'Request keys excluded from HMACing in audit',
editType: 'stringArray',
helpText: "Keys that will not be HMAC'd by audit devices in the request data object.",
}),
auditNonHmacResponseKeys: attr({
label: 'Response keys excluded from HMACing in audit',
editType: 'stringArray',
helpText: "Keys that will not be HMAC'd by audit devices in the response data object.",
}),
listingVisibility: attr('string', {
editType: 'boolean',
label: 'List method when unauthenticated',
trueValue: 'unauth',
falseValue: 'hidden',
}),
passthroughRequestHeaders: attr({
label: 'Allowed passthrough request headers',
helpText: 'Headers to whitelist and pass from the request to the backend',
editType: 'stringArray',
}),
});

View File

@ -1 +1,29 @@
export { default } from './cluster-route-base';
import ClusterRouteBase from './cluster-route-base';
import Ember from 'ember';
const { RSVP } = Ember;
export default ClusterRouteBase.extend({
beforeModel() {
return this.store.unloadAll('auth-method');
},
model() {
let cluster = this._super(...arguments);
return this.store
.findAll('auth-method', {
adapterOptions: {
unauthenticated: true,
},
})
.then(result => {
return RSVP.hash({
cluster,
methods: result,
});
});
},
resetController(controller) {
controller.set('wrappedToken', '');
controller.set('authMethod', '');
},
});

View File

@ -2,10 +2,7 @@ import ApplicationSerializer from './application';
export default ApplicationSerializer.extend({
normalizeBackend(path, backend) {
let struct = {};
for (let attribute in backend) {
struct[attribute] = backend[attribute];
}
let struct = { ...backend };
// strip the trailing slash off of the path so we
// can navigate to it without getting `//` in the url
struct.id = path.slice(0, -1);
@ -17,7 +14,7 @@ export default ApplicationSerializer.extend({
const isCreate = requestType === 'createRecord';
const backends = isCreate
? payload.data
: Object.keys(payload.data).map(id => this.normalizeBackend(id, payload[id]));
: Object.keys(payload.data).map(path => this.normalizeBackend(path, payload.data[path]));
return this._super(store, primaryModelClass, backends, id, requestType);
},

View File

@ -29,6 +29,7 @@
background: none;
color: inherit;
font-size: $body-size;
min-height: 2rem;
&:not(.console-ui-command):not(.CodeMirror-line) {
padding-left: $console-spacing;
@ -57,6 +58,13 @@
}
}
.console-ui-panel .hover-copy-button,
.console-ui-panel .hover-copy-button-static {
top: auto;
bottom: 0;
right: 0;
}
.console-ui-input {
align-items: center;
display: flex;
@ -82,25 +90,13 @@
}
.console-ui-output {
transition: background-color $speed;
transition: background-color $speed ease-in-out;
will-change: background-color;
padding-right: $size-2;
position: relative;
.console-ui-output-actions {
opacity: 0;
position: absolute;
right: 0;
top: 0;
transition: opacity $speed;
will-change: opacity;
}
background-color: rgba(#000, 0);
&:hover {
background: rgba($black, 0.25);
.console-ui-output-actions {
opacity: 1;
}
background-color: rgba(#000, 0.5);
}
}

View File

@ -1,12 +1,29 @@
<nav class="tabs sub-nav is-marginless">
<ul>
{{#each (supported-auth-backends) as |backend|}}
<li class="{{if (eq selectedAuthBackend.type backend.type) 'is-active' ''}}" data-test-auth-method>
<a href="{{href-to 'vault.cluster.auth' cluster.name (query-params with=backend.type)}}" data-test-auth-method-link={{backend.type}}>
{{capitalize backend.type}}
{{#each methodsToShow as |method|}}
{{#with (or method.path method.type) as |methodKey|}}
{{#if hasMethodsWithPath}}
<li class="{{if (and selectedAuthIsPath (eq (or selectedAuthBackend.path selectedAuthBackend.type) methodKey)) 'is-active' ''}}" data-test-auth-method>
<a href="{{href-to 'vault.cluster.auth' cluster.name (query-params with=methodKey)}}" data-test-auth-method-link={{method.type}}>
{{or method.id (capitalize method.type)}}
</a>
</li>
{{else}}
<li class="{{if (eq (or selectedAuthBackend.path selectedAuthBackend.type) methodKey) 'is-active' ''}}" data-test-auth-method>
<a href="{{href-to 'vault.cluster.auth' cluster.name (query-params with=methodKey)}}" data-test-auth-method-link={{method.type}}>
{{or method.id (capitalize method.type)}}
</a>
</li>
{{/if}}
{{/with}}
{{/each}}
{{#if hasMethodsWithPath}}
<li class="{{if (not selectedAuthIsPath) 'is-active' ''}}" data-test-auth-method>
<a href="{{href-to 'vault.cluster.auth' cluster.name (query-params with='token')}}" data-test-auth-method-link=other>
Other
</a>
</li>
{{/each}}
{{/if}}
</ul>
</nav>
<form
@ -19,8 +36,31 @@
{{else}}
{{message-error errorMessage=error data-test-auth-error=true}}
{{/if}}
{{#if (and hasMethodsWithPath (not selectedAuthIsPath))}}
<div class="field">
<label for="selectedMethod" class="is-label">
Method
</label>
<div class="control is-expanded" >
<div class="select is-fullwidth">
<select
name="selectedMethod"
id="selectedMethod"
onchange={{action (mut selectedAuth) value="target.value"}}
data-test-method-select
>
{{#each (supported-auth-backends) as |method|}}
<option selected={{eq selectedAuthBackend.type method.type}} value={{method.type}}>
{{capitalize method.type}}
</option>
{{/each}}
</select>
</div>
</div>
</div>
{{/if}}
{{partial providerPartialName}}
{{#unless (eq selectedAuthBackend.type "token")}}
{{#unless (or selectedAuthIsPath (eq selectedAuthBackend.type "token"))}}
<div class="box has-slim-padding is-shadowless">
{{toggle-button toggleTarget=this toggleAttr="useCustomPath"}}
<div class="field">

View File

@ -1,10 +1,13 @@
{{json-editor
value=(stringify content)
options=(hash
readOnly=true
lineNumbers=false
autoHeight=true
gutters=false
theme='hashi auto-height'
)
}}
<div class="console-ui-output has-copy-button">
{{json-editor
value=(stringify content)
options=(hash
readOnly=true
lineNumbers=false
autoHeight=true
gutters=false
theme='hashi auto-height'
)
}}
<HoverCopyButton @copyValue={{stringify content}} />
</div>

View File

@ -1,21 +1,8 @@
<div class="console-ui-output">
<div class="console-ui-output has-copy-button">
<pre>Keys
{{#each list as |item|}}
{{item}}
{{/each}}
</pre>
<div class="console-ui-output-actions">
{{#tool-tip renderInPlace=true as |d|}}
{{#d.trigger data-test-tool-tip-trigger=true}}
{{#copy-button clipboardText=(multi-line-join list) class="button is-compact"}}
{{i-con glyph="copy" aria-hidden="true" size=16}}
{{/copy-button}}
{{/d.trigger}}
{{#d.content class="tool-tip"}}
<div class="box">
Copy
</div>
{{/d.content}}
{{/tool-tip}}
</div>
<HoverCopyButton @copyValue={{multi-line-join list}} />
</div>

View File

@ -1,18 +1,4 @@
<div class="console-ui-output">
<pre>{{columns}}</pre>
<div class="console-ui-output-actions">
{{#tool-tip renderInPlace=true as |d|}}
{{#d.trigger data-test-tool-tip-trigger=true}}
{{#copy-button clipboardText=columns class="button is-compact"}}
{{i-con glyph="copy" aria-hidden="true" size=16}}
{{/copy-button}}
{{/d.trigger}}
{{#d.content class="tool-tip"}}
<div class="box">
Copy
</div>
{{/d.content}}
{{/tool-tip}}
</div>
<div class="console-ui-output has-copy-button">
<pre>{{columns}}</pre>
<HoverCopyButton @copyValue={{columns}} />
</div>

View File

@ -1 +1,4 @@
<pre>{{content}}</pre>
<div class="console-ui-output has-copy-button">
<pre>{{content}}</pre>
<HoverCopyButton @copyValue={{content}} @alwaysShow=true />
</div>

View File

@ -27,6 +27,25 @@
</select>
</div>
</div>
{{else if (and (eq attr.type 'string') (eq attr.options.editType 'boolean'))}}
<div class="b-checkbox">
<input type="checkbox"
id="{{attr.name}}"
class="styled"
checked={{eq (get model valuePath) attr.options.trueValue}}
onchange={{action (action "setAndBroadcastBool" valuePath attr.options.trueValue attr.options.falseValue) value="target.checked"}}
data-test-input={{attr.name}}
/>
<label for="{{attr.name}}" class="is-label">
{{labelString}}
{{#if attr.options.helpText}}
{{#info-tooltip}}
{{attr.options.helpText}}
{{/info-tooltip}}
{{/if}}
</label>
</div>
{{else if (eq attr.options.editType 'mountAccessor')}}
{{mount-accessor-select
name=attr.name

View File

@ -1,4 +1,4 @@
<ToolTip @renderInPlace={{true}} @onClose={{action (mut tooltipText) "Copy"}} as |T|>
<ToolTip @onClose={{action (mut tooltipText) "Copy"}} as |T|>
<T.trigger data-test-tooltip-trigger tabindex=false>
<CopyButton
data-test-hover-copy-button

View File

@ -1,4 +1,4 @@
{{#tool-tip renderInPlace=true as |d|}}
{{#tool-tip as |d|}}
{{#d.trigger tagName="button" type="button" class=(concat "tool-tip-trigger button") data-test-tool-tip-trigger=true}}
{{i-con
glyph="information-reversed"

View File

@ -13,11 +13,10 @@
</PageHeader>
{{#each (sort-by "path" model) as |method|}}
{{#linked-block
"vault.cluster.access.methods"
class="box is-sideless is-marginless has-pointer "
data-test-auth-backend-link=method.id
}}
<div
class="box is-sideless is-marginless"
data-test-auth-backend-link={{method.id}}
>
<div class="level is-mobile">
<div class="level-left">
<div>
@ -76,5 +75,5 @@
</div>
</div>
</div>
{{/linked-block}}
</div>
{{/each}}

View File

@ -1,4 +1,3 @@
{{!-- {{i-con glyph="unlocked" size=20}} {{capitalize model.name}} is {{if model.unsealed 'unsealed' 'sealed'}} --}}
<SplashPage as |Page|>
<Page.header>
<h1 class="title is-3">
@ -7,10 +6,12 @@
</Page.header>
<Page.content>
<AuthForm
@cluster={{model}}
@wrappedToken={{wrappedToken}}
@cluster={{model.cluster}}
@methods={{model.methods}}
@redirectTo={{redirectTo}}
@selectedAuthType={{with}}
/>
@selectedAuth={{authMethod}}
/>
</Page.content>
<Page.footer>
<div class="has-short-padding">

View File

@ -21,7 +21,7 @@ test('auth query params', function(assert) {
const backends = supportedAuthBackends();
visit('/vault/auth');
andThen(() => {
assert.equal(currentURL(), '/vault/auth');
assert.equal(currentURL(), '/vault/auth?with=token');
});
backends.reverse().forEach(backend => {
click(`[data-test-auth-method-link="${backend.type}"]`);
@ -38,7 +38,7 @@ test('auth query params', function(assert) {
test('it clears token when changing selected auth method', function(assert) {
visit('/vault/auth');
andThen(() => {
assert.equal(currentURL(), '/vault/auth');
assert.equal(currentURL(), '/vault/auth?with=token');
});
component.token('token').tabs.filterBy('name', 'GitHub')[0].link();
component.tabs.filterBy('name', 'Token')[0].link();

View File

@ -1,13 +1,15 @@
import { moduleForComponent, test } from 'ember-qunit';
import { supportedAuthBackends } from 'vault/helpers/supported-auth-backends';
import Ember from 'ember';
import wait from 'ember-test-helpers/wait';
import hbs from 'htmlbars-inline-precompile';
import sinon from 'sinon';
import Pretender from 'pretender';
import { create } from 'ember-cli-page-object';
import authForm from '../../pages/components/auth-form';
const component = create(authForm);
const BACKENDS = supportedAuthBackends();
const authService = Ember.Service.extend({
authenticate() {
@ -15,11 +17,28 @@ const authService = Ember.Service.extend({
},
});
const workingAuthService = Ember.Service.extend({
authenticate() {
return Ember.RSVP.resolve({});
},
setLastFetch() {},
});
const routerService = Ember.Service.extend({
transitionTo() {
return Ember.RSVP.resolve();
},
replaceWith() {
return Ember.RSVP.resolve();
},
});
moduleForComponent('auth-form', 'Integration | Component | auth form', {
integration: true,
beforeEach() {
Ember.getOwner(this).lookup('service:csp-event').attach();
component.setContext(this);
this.register('service:router', routerService);
this.inject.service('router');
},
afterEach() {
@ -33,7 +52,8 @@ test('it renders error on CSP violation', function(assert) {
this.register('service:auth', authService);
this.inject.service('auth');
this.set('cluster', Ember.Object.create({ standby: true }));
this.render(hbs`{{auth-form cluster=cluster}}`);
this.set('selectedAuth', 'token');
this.render(hbs`{{auth-form cluster=cluster selectedAuth=selectedAuth}}`);
assert.equal(component.errorText, '');
component.login();
// because this is an ember-concurrency backed service,
@ -58,7 +78,8 @@ test('it renders with vault style errors', function(assert) {
});
this.set('cluster', Ember.Object.create({}));
this.render(hbs`{{auth-form cluster=cluster}}`);
this.set('selectedAuth', 'token');
this.render(hbs`{{auth-form cluster=cluster selectedAuth=selectedAuth}}`);
return component.login().then(() => {
assert.equal(component.errorText, 'Error Authentication failed: Not allowed');
server.shutdown();
@ -73,9 +94,113 @@ test('it renders AdapterError style errors', function(assert) {
});
this.set('cluster', Ember.Object.create({}));
this.render(hbs`{{auth-form cluster=cluster}}`);
this.set('selectedAuth', 'token');
this.render(hbs`{{auth-form cluster=cluster selectedAuth=selectedAuth}}`);
return component.login().then(() => {
assert.equal(component.errorText, 'Error Authentication failed: Bad Request');
server.shutdown();
});
});
test('it renders all the supported tabs when no methods are passed', function(assert) {
this.render(hbs`{{auth-form cluster=cluster}}`);
assert.equal(component.tabs.length, BACKENDS.length, 'renders a tab for every backend');
});
test('it renders all the supported methods and Other tab when methods are present', function(assert) {
let methods = [
{
type: 'userpass',
id: 'foo',
path: 'foo/',
},
{
type: 'approle',
id: 'approle',
path: 'approle/',
},
];
this.set('methods', methods);
this.render(hbs`{{auth-form cluster=cluster methods=methods}}`);
assert.equal(component.tabs.length, 2, 'renders a tab for userpass and Other');
assert.equal(component.tabs.objectAt(0).name, 'foo', 'uses the path in the label');
assert.equal(component.tabs.objectAt(1).name, 'Other', 'second tab is the Other tab');
});
test('it renders all the supported methods when no supported methods are present in passed methods', function(
assert
) {
let methods = [
{
type: 'approle',
id: 'approle',
path: 'approle/',
},
];
this.set('methods', methods);
this.render(hbs`{{auth-form cluster=cluster methods=methods}}`);
assert.equal(component.tabs.length, BACKENDS.length, 'renders a tab for every backend');
});
test('it makes a request to unwrap if passed a wrappedToken and logs in', function(assert) {
this.register('service:auth', workingAuthService);
this.inject.service('auth');
let authSpy = sinon.spy(this.get('auth'), 'authenticate');
let server = new Pretender(function() {
this.post('/v1/sys/wrapping/unwrap', () => {
return [
200,
{ 'Content-Type': 'application/json' },
JSON.stringify({
auth: {
client_token: '12345',
},
}),
];
});
});
let wrappedToken = '54321';
this.set('wrappedToken', wrappedToken);
this.render(hbs`{{auth-form cluster=cluster wrappedToken=wrappedToken}}`);
Ember.run.later(() => Ember.run.cancelTimers(), 50);
return wait().then(() => {
assert.equal(server.handledRequests[0].url, '/v1/sys/wrapping/unwrap', 'makes call to unwrap the token');
assert.equal(
server.handledRequests[0].requestHeaders['X-Vault-Token'],
wrappedToken,
'uses passed wrapped token for the unwrap'
);
assert.ok(authSpy.calledOnce, 'a call to authenticate was made');
server.shutdown();
authSpy.restore();
});
});
test('it shows an error if unwrap errors', function(assert) {
let server = new Pretender(function() {
this.post('/v1/sys/wrapping/unwrap', () => {
return [
400,
{ 'Content-Type': 'application/json' },
JSON.stringify({
errors: ['There was an error unwrapping!'],
}),
];
});
});
this.set('wrappedToken', '54321');
this.render(hbs`{{auth-form cluster=cluster wrappedToken=wrappedToken}}`);
Ember.run.later(() => Ember.run.cancelTimers(), 50);
return wait().then(() => {
assert.equal(
component.errorText,
'Error Token unwrap failed: There was an error unwrapping!',
'shows the error'
);
server.shutdown();
});
});

View File

@ -14,27 +14,28 @@ test('wrapping api urls', function(assert) {
},
});
let clientToken;
let data = { foo: 'bar' };
adapter.toolAction('wrap', data, { wrapTTL: '30m' });
assert.equal('/v1/sys/wrapping/wrap', url, 'wrapping:wrap url OK');
assert.equal('POST', method, 'wrapping:wrap method OK');
assert.deepEqual({ data: data, wrapTTL: '30m' }, options, 'wrapping:wrap options OK');
assert.deepEqual({ data: data, wrapTTL: '30m', clientToken }, options, 'wrapping:wrap options OK');
data = { token: 'token' };
adapter.toolAction('lookup', data);
assert.equal('/v1/sys/wrapping/lookup', url, 'wrapping:lookup url OK');
assert.equal('POST', method, 'wrapping:lookup method OK');
assert.deepEqual({ data }, options, 'wrapping:lookup options OK');
assert.deepEqual({ data, clientToken }, options, 'wrapping:lookup options OK');
adapter.toolAction('unwrap', data);
assert.equal('/v1/sys/wrapping/unwrap', url, 'wrapping:unwrap url OK');
assert.equal('POST', method, 'wrapping:unwrap method OK');
assert.deepEqual({ data }, options, 'wrapping:unwrap options OK');
assert.deepEqual({ data, clientToken }, options, 'wrapping:unwrap options OK');
adapter.toolAction('rewrap', data);
assert.equal('/v1/sys/wrapping/rewrap', url, 'wrapping:rewrap url OK');
assert.equal('POST', method, 'wrapping:rewrap method OK');
assert.deepEqual({ data }, options, 'wrapping:rewrap options OK');
assert.deepEqual({ data, clientToken }, options, 'wrapping:rewrap options OK');
});
test('tools api urls', function(assert) {

View File

@ -38,14 +38,17 @@ func mockBackendExpiration(t testing.TB, backend physical.Backend) (*Core, *Expi
func TestExpiration_Tidy(t *testing.T) {
var err error
exp := mockExpiration(t)
// We use this later for tidy testing where we need to check the output
logOut := new(bytes.Buffer)
logger := log.New(&log.LoggerOptions{
Output: logOut,
})
exp.logger = logger
testCore := TestCore(t)
testCore.logger = logger
testCoreUnsealed(t, testCore)
exp := testCore.expiration
if err := exp.Restore(nil); err != nil {
t.Fatal(err)

View File

@ -26,6 +26,13 @@ const (
replTimeout = 10 * time.Second
)
// HanlderProperties is used to seed configuration into a vaulthttp.Handler.
// It's in this package to avoid a circular dependency
type HandlerProperties struct {
Core *Core
MaxRequestSize int64
}
// fetchEntityAndDerivedPolicies returns the entity object for the given entity
// ID. If the entity is merged into a different entity object, the entity into
// which the given entity ID is merged into will be returned. This function

View File

@ -880,7 +880,7 @@ type TestClusterCore struct {
type TestClusterOptions struct {
KeepStandbysSealed bool
SkipInit bool
HandlerFunc func(*Core) http.Handler
HandlerFunc func(*HandlerProperties) http.Handler
BaseListenAddress string
NumCores int
SealFunc func() Seal
@ -1249,7 +1249,9 @@ func NewTestCluster(t testing.T, base *CoreConfig, opts *TestClusterOptions) *Te
}
cores = append(cores, c)
if opts != nil && opts.HandlerFunc != nil {
handlers[i] = opts.HandlerFunc(c)
handlers[i] = opts.HandlerFunc(&HandlerProperties{
Core: c,
})
servers[i].Handler = handlers[i]
}
}

View File

@ -1,4 +1,4 @@
// Copyright 2016 Google Inc. All Rights Reserved.
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.

View File

@ -1,4 +1,4 @@
// Copyright 2014 Google Inc. All Rights Reserved.
// Copyright 2014 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -64,7 +64,7 @@ var (
)
var (
metaClient = &http.Client{
defaultClient = &Client{hc: &http.Client{
Transport: &http.Transport{
Dial: (&net.Dialer{
Timeout: 2 * time.Second,
@ -72,15 +72,15 @@ var (
}).Dial,
ResponseHeaderTimeout: 2 * time.Second,
},
}
subscribeClient = &http.Client{
}}
subscribeClient = &Client{hc: &http.Client{
Transport: &http.Transport{
Dial: (&net.Dialer{
Timeout: 2 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
},
}
}}
)
// NotDefinedError is returned when requested metadata is not defined.
@ -95,74 +95,16 @@ func (suffix NotDefinedError) Error() string {
return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix))
}
// Get returns a value from the metadata service.
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
//
// If the GCE_METADATA_HOST environment variable is not defined, a default of
// 169.254.169.254 will be used instead.
//
// If the requested metadata is not defined, the returned error will
// be of type NotDefinedError.
func Get(suffix string) (string, error) {
val, _, err := getETag(metaClient, suffix)
return val, err
}
// getETag returns a value from the metadata service as well as the associated
// ETag using the provided client. This func is otherwise equivalent to Get.
func getETag(client *http.Client, suffix string) (value, etag string, err error) {
// Using a fixed IP makes it very difficult to spoof the metadata service in
// a container, which is an important use-case for local testing of cloud
// deployments. To enable spoofing of the metadata service, the environment
// variable GCE_METADATA_HOST is first inspected to decide where metadata
// requests shall go.
host := os.Getenv(metadataHostEnv)
if host == "" {
// Using 169.254.169.254 instead of "metadata" here because Go
// binaries built with the "netgo" tag and without cgo won't
// know the search suffix for "metadata" is
// ".google.internal", and this IP address is documented as
// being stable anyway.
host = metadataIP
}
url := "http://" + host + "/computeMetadata/v1/" + suffix
req, _ := http.NewRequest("GET", url, nil)
req.Header.Set("Metadata-Flavor", "Google")
req.Header.Set("User-Agent", userAgent)
res, err := client.Do(req)
if err != nil {
return "", "", err
}
defer res.Body.Close()
if res.StatusCode == http.StatusNotFound {
return "", "", NotDefinedError(suffix)
}
if res.StatusCode != 200 {
return "", "", fmt.Errorf("status code %d trying to fetch %s", res.StatusCode, url)
}
all, err := ioutil.ReadAll(res.Body)
if err != nil {
return "", "", err
}
return string(all), res.Header.Get("Etag"), nil
}
func getTrimmed(suffix string) (s string, err error) {
s, err = Get(suffix)
s = strings.TrimSpace(s)
return
}
func (c *cachedValue) get() (v string, err error) {
func (c *cachedValue) get(cl *Client) (v string, err error) {
defer c.mu.Unlock()
c.mu.Lock()
if c.v != "" {
return c.v, nil
}
if c.trim {
v, err = getTrimmed(c.k)
v, err = cl.getTrimmed(c.k)
} else {
v, err = Get(c.k)
v, err = cl.Get(c.k)
}
if err == nil {
c.v = v
@ -201,7 +143,7 @@ func testOnGCE() bool {
go func() {
req, _ := http.NewRequest("GET", "http://"+metadataIP, nil)
req.Header.Set("User-Agent", userAgent)
res, err := ctxhttp.Do(ctx, metaClient, req)
res, err := ctxhttp.Do(ctx, defaultClient.hc, req)
if err != nil {
resc <- false
return
@ -266,6 +208,255 @@ func systemInfoSuggestsGCE() bool {
return name == "Google" || name == "Google Compute Engine"
}
// Subscribe calls Client.Subscribe on a client designed for subscribing (one with no
// ResponseHeaderTimeout).
func Subscribe(suffix string, fn func(v string, ok bool) error) error {
return subscribeClient.Subscribe(suffix, fn)
}
// Get calls Client.Get on the default client.
func Get(suffix string) (string, error) { return defaultClient.Get(suffix) }
// ProjectID returns the current instance's project ID string.
func ProjectID() (string, error) { return defaultClient.ProjectID() }
// NumericProjectID returns the current instance's numeric project ID.
func NumericProjectID() (string, error) { return defaultClient.NumericProjectID() }
// InternalIP returns the instance's primary internal IP address.
func InternalIP() (string, error) { return defaultClient.InternalIP() }
// ExternalIP returns the instance's primary external (public) IP address.
func ExternalIP() (string, error) { return defaultClient.ExternalIP() }
// Hostname returns the instance's hostname. This will be of the form
// "<instanceID>.c.<projID>.internal".
func Hostname() (string, error) { return defaultClient.Hostname() }
// InstanceTags returns the list of user-defined instance tags,
// assigned when initially creating a GCE instance.
func InstanceTags() ([]string, error) { return defaultClient.InstanceTags() }
// InstanceID returns the current VM's numeric instance ID.
func InstanceID() (string, error) { return defaultClient.InstanceID() }
// InstanceName returns the current VM's instance ID string.
func InstanceName() (string, error) { return defaultClient.InstanceName() }
// Zone returns the current VM's zone, such as "us-central1-b".
func Zone() (string, error) { return defaultClient.Zone() }
// InstanceAttributes calls Client.InstanceAttributes on the default client.
func InstanceAttributes() ([]string, error) { return defaultClient.InstanceAttributes() }
// ProjectAttributes calls Client.ProjectAttributes on the default client.
func ProjectAttributes() ([]string, error) { return defaultClient.ProjectAttributes() }
// InstanceAttributeValue calls Client.InstanceAttributeValue on the default client.
func InstanceAttributeValue(attr string) (string, error) {
return defaultClient.InstanceAttributeValue(attr)
}
// ProjectAttributeValue calls Client.ProjectAttributeValue on the default client.
func ProjectAttributeValue(attr string) (string, error) {
return defaultClient.ProjectAttributeValue(attr)
}
// Scopes calls Client.Scopes on the default client.
func Scopes(serviceAccount string) ([]string, error) { return defaultClient.Scopes(serviceAccount) }
func strsContains(ss []string, s string) bool {
for _, v := range ss {
if v == s {
return true
}
}
return false
}
// A Client provides metadata.
type Client struct {
hc *http.Client
}
// NewClient returns a Client that can be used to fetch metadata. All HTTP requests
// will use the given http.Client instead of the default client.
func NewClient(c *http.Client) *Client {
return &Client{hc: c}
}
// getETag returns a value from the metadata service as well as the associated ETag.
// This func is otherwise equivalent to Get.
func (c *Client) getETag(suffix string) (value, etag string, err error) {
// Using a fixed IP makes it very difficult to spoof the metadata service in
// a container, which is an important use-case for local testing of cloud
// deployments. To enable spoofing of the metadata service, the environment
// variable GCE_METADATA_HOST is first inspected to decide where metadata
// requests shall go.
host := os.Getenv(metadataHostEnv)
if host == "" {
// Using 169.254.169.254 instead of "metadata" here because Go
// binaries built with the "netgo" tag and without cgo won't
// know the search suffix for "metadata" is
// ".google.internal", and this IP address is documented as
// being stable anyway.
host = metadataIP
}
url := "http://" + host + "/computeMetadata/v1/" + suffix
req, _ := http.NewRequest("GET", url, nil)
req.Header.Set("Metadata-Flavor", "Google")
req.Header.Set("User-Agent", userAgent)
res, err := c.hc.Do(req)
if err != nil {
return "", "", err
}
defer res.Body.Close()
if res.StatusCode == http.StatusNotFound {
return "", "", NotDefinedError(suffix)
}
if res.StatusCode != 200 {
return "", "", fmt.Errorf("status code %d trying to fetch %s", res.StatusCode, url)
}
all, err := ioutil.ReadAll(res.Body)
if err != nil {
return "", "", err
}
return string(all), res.Header.Get("Etag"), nil
}
// Get returns a value from the metadata service.
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
//
// If the GCE_METADATA_HOST environment variable is not defined, a default of
// 169.254.169.254 will be used instead.
//
// If the requested metadata is not defined, the returned error will
// be of type NotDefinedError.
func (c *Client) Get(suffix string) (string, error) {
val, _, err := c.getETag(suffix)
return val, err
}
func (c *Client) getTrimmed(suffix string) (s string, err error) {
s, err = c.Get(suffix)
s = strings.TrimSpace(s)
return
}
func (c *Client) lines(suffix string) ([]string, error) {
j, err := c.Get(suffix)
if err != nil {
return nil, err
}
s := strings.Split(strings.TrimSpace(j), "\n")
for i := range s {
s[i] = strings.TrimSpace(s[i])
}
return s, nil
}
// ProjectID returns the current instance's project ID string.
func (c *Client) ProjectID() (string, error) { return projID.get(c) }
// NumericProjectID returns the current instance's numeric project ID.
func (c *Client) NumericProjectID() (string, error) { return projNum.get(c) }
// InstanceID returns the current VM's numeric instance ID.
func (c *Client) InstanceID() (string, error) { return instID.get(c) }
// InternalIP returns the instance's primary internal IP address.
func (c *Client) InternalIP() (string, error) {
return c.getTrimmed("instance/network-interfaces/0/ip")
}
// ExternalIP returns the instance's primary external (public) IP address.
func (c *Client) ExternalIP() (string, error) {
return c.getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip")
}
// Hostname returns the instance's hostname. This will be of the form
// "<instanceID>.c.<projID>.internal".
func (c *Client) Hostname() (string, error) {
return c.getTrimmed("instance/hostname")
}
// InstanceTags returns the list of user-defined instance tags,
// assigned when initially creating a GCE instance.
func (c *Client) InstanceTags() ([]string, error) {
var s []string
j, err := c.Get("instance/tags")
if err != nil {
return nil, err
}
if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil {
return nil, err
}
return s, nil
}
// InstanceName returns the current VM's instance ID string.
func (c *Client) InstanceName() (string, error) {
host, err := c.Hostname()
if err != nil {
return "", err
}
return strings.Split(host, ".")[0], nil
}
// Zone returns the current VM's zone, such as "us-central1-b".
func (c *Client) Zone() (string, error) {
zone, err := c.getTrimmed("instance/zone")
// zone is of the form "projects/<projNum>/zones/<zoneName>".
if err != nil {
return "", err
}
return zone[strings.LastIndex(zone, "/")+1:], nil
}
// InstanceAttributes returns the list of user-defined attributes,
// assigned when initially creating a GCE VM instance. The value of an
// attribute can be obtained with InstanceAttributeValue.
func (c *Client) InstanceAttributes() ([]string, error) { return c.lines("instance/attributes/") }
// ProjectAttributes returns the list of user-defined attributes
// applying to the project as a whole, not just this VM. The value of
// an attribute can be obtained with ProjectAttributeValue.
func (c *Client) ProjectAttributes() ([]string, error) { return c.lines("project/attributes/") }
// InstanceAttributeValue returns the value of the provided VM
// instance attribute.
//
// If the requested attribute is not defined, the returned error will
// be of type NotDefinedError.
//
// InstanceAttributeValue may return ("", nil) if the attribute was
// defined to be the empty string.
func (c *Client) InstanceAttributeValue(attr string) (string, error) {
return c.Get("instance/attributes/" + attr)
}
// ProjectAttributeValue returns the value of the provided
// project attribute.
//
// If the requested attribute is not defined, the returned error will
// be of type NotDefinedError.
//
// ProjectAttributeValue may return ("", nil) if the attribute was
// defined to be the empty string.
func (c *Client) ProjectAttributeValue(attr string) (string, error) {
return c.Get("project/attributes/" + attr)
}
// Scopes returns the service account scopes for the given account.
// The account may be empty or the string "default" to use the instance's
// main account.
func (c *Client) Scopes(serviceAccount string) ([]string, error) {
if serviceAccount == "" {
serviceAccount = "default"
}
return c.lines("instance/service-accounts/" + serviceAccount + "/scopes")
}
// Subscribe subscribes to a value from the metadata service.
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
// The suffix may contain query parameters.
@ -275,11 +466,11 @@ func systemInfoSuggestsGCE() bool {
// and ok false. Subscribe blocks until fn returns a non-nil error or the value
// is deleted. Subscribe returns the error value returned from the last call to
// fn, which may be nil when ok == false.
func Subscribe(suffix string, fn func(v string, ok bool) error) error {
func (c *Client) Subscribe(suffix string, fn func(v string, ok bool) error) error {
const failedSubscribeSleep = time.Second * 5
// First check to see if the metadata value exists at all.
val, lastETag, err := getETag(subscribeClient, suffix)
val, lastETag, err := c.getETag(suffix)
if err != nil {
return err
}
@ -295,7 +486,7 @@ func Subscribe(suffix string, fn func(v string, ok bool) error) error {
suffix += "?wait_for_change=true&last_etag="
}
for {
val, etag, err := getETag(subscribeClient, suffix+url.QueryEscape(lastETag))
val, etag, err := c.getETag(suffix + url.QueryEscape(lastETag))
if err != nil {
if _, deleted := err.(NotDefinedError); !deleted {
time.Sleep(failedSubscribeSleep)
@ -310,128 +501,3 @@ func Subscribe(suffix string, fn func(v string, ok bool) error) error {
}
}
}
// ProjectID returns the current instance's project ID string.
func ProjectID() (string, error) { return projID.get() }
// NumericProjectID returns the current instance's numeric project ID.
func NumericProjectID() (string, error) { return projNum.get() }
// InternalIP returns the instance's primary internal IP address.
func InternalIP() (string, error) {
return getTrimmed("instance/network-interfaces/0/ip")
}
// ExternalIP returns the instance's primary external (public) IP address.
func ExternalIP() (string, error) {
return getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip")
}
// Hostname returns the instance's hostname. This will be of the form
// "<instanceID>.c.<projID>.internal".
func Hostname() (string, error) {
return getTrimmed("instance/hostname")
}
// InstanceTags returns the list of user-defined instance tags,
// assigned when initially creating a GCE instance.
func InstanceTags() ([]string, error) {
var s []string
j, err := Get("instance/tags")
if err != nil {
return nil, err
}
if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil {
return nil, err
}
return s, nil
}
// InstanceID returns the current VM's numeric instance ID.
func InstanceID() (string, error) {
return instID.get()
}
// InstanceName returns the current VM's instance ID string.
func InstanceName() (string, error) {
host, err := Hostname()
if err != nil {
return "", err
}
return strings.Split(host, ".")[0], nil
}
// Zone returns the current VM's zone, such as "us-central1-b".
func Zone() (string, error) {
zone, err := getTrimmed("instance/zone")
// zone is of the form "projects/<projNum>/zones/<zoneName>".
if err != nil {
return "", err
}
return zone[strings.LastIndex(zone, "/")+1:], nil
}
// InstanceAttributes returns the list of user-defined attributes,
// assigned when initially creating a GCE VM instance. The value of an
// attribute can be obtained with InstanceAttributeValue.
func InstanceAttributes() ([]string, error) { return lines("instance/attributes/") }
// ProjectAttributes returns the list of user-defined attributes
// applying to the project as a whole, not just this VM. The value of
// an attribute can be obtained with ProjectAttributeValue.
func ProjectAttributes() ([]string, error) { return lines("project/attributes/") }
func lines(suffix string) ([]string, error) {
j, err := Get(suffix)
if err != nil {
return nil, err
}
s := strings.Split(strings.TrimSpace(j), "\n")
for i := range s {
s[i] = strings.TrimSpace(s[i])
}
return s, nil
}
// InstanceAttributeValue returns the value of the provided VM
// instance attribute.
//
// If the requested attribute is not defined, the returned error will
// be of type NotDefinedError.
//
// InstanceAttributeValue may return ("", nil) if the attribute was
// defined to be the empty string.
func InstanceAttributeValue(attr string) (string, error) {
return Get("instance/attributes/" + attr)
}
// ProjectAttributeValue returns the value of the provided
// project attribute.
//
// If the requested attribute is not defined, the returned error will
// be of type NotDefinedError.
//
// ProjectAttributeValue may return ("", nil) if the attribute was
// defined to be the empty string.
func ProjectAttributeValue(attr string) (string, error) {
return Get("project/attributes/" + attr)
}
// Scopes returns the service account scopes for the given account.
// The account may be empty or the string "default" to use the instance's
// main account.
func Scopes(serviceAccount string) ([]string, error) {
if serviceAccount == "" {
serviceAccount = "default"
}
return lines("instance/service-accounts/" + serviceAccount + "/scopes")
}
func strsContains(ss []string, s string) bool {
for _, v := range ss {
if v == s {
return true
}
}
return false
}

View File

@ -1,4 +1,4 @@
// Copyright 2016 Google Inc. All Rights Reserved.
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -104,7 +104,15 @@ type Handle struct {
// InternalNewHandle returns a Handle for resource.
// The conn parameter refers to a server that must support the IAMPolicy service.
func InternalNewHandle(conn *grpc.ClientConn, resource string) *Handle {
return InternalNewHandleClient(&grpcClient{c: pb.NewIAMPolicyClient(conn)}, resource)
return InternalNewHandleGRPCClient(pb.NewIAMPolicyClient(conn), resource)
}
// InternalNewHandleGRPCClient is for use by the Google Cloud Libraries only.
//
// InternalNewHandleClient returns a Handle for resource using the given
// grpc service that implements IAM as a mixin
func InternalNewHandleGRPCClient(c pb.IAMPolicyClient, resource string) *Handle {
return InternalNewHandleClient(&grpcClient{c: c}, resource)
}
// InternalNewHandleClient is for use by the Google Cloud Libraries only.

View File

@ -1,4 +1,4 @@
// Copyright 2017 Google Inc. All Rights Reserved.
// Copyright 2017 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.

View File

@ -1,4 +1,4 @@
// Copyright 2016 Google Inc. All Rights Reserved.
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.

View File

@ -1,4 +1,4 @@
// Copyright 2016 Google Inc. All Rights Reserved.
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.

View File

@ -1,4 +1,4 @@
// Copyright 2016 Google Inc. All Rights Reserved.
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.

View File

@ -1,4 +1,4 @@
// Copyright 2016 Google Inc. All Rights Reserved.
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.

View File

@ -1,4 +1,4 @@
// Copyright 2017 Google Inc. All Rights Reserved.
// Copyright 2017 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.

View File

@ -1,4 +1,4 @@
// Copyright 2016 Google Inc. All Rights Reserved.
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.

83
vendor/cloud.google.com/go/internal/trace/go18.go generated vendored Normal file
View File

@ -0,0 +1,83 @@
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build go1.8
package trace
import (
"go.opencensus.io/trace"
"golang.org/x/net/context"
"google.golang.org/api/googleapi"
"google.golang.org/genproto/googleapis/rpc/code"
"google.golang.org/grpc/status"
)
func StartSpan(ctx context.Context, name string) context.Context {
ctx, _ = trace.StartSpan(ctx, name)
return ctx
}
func EndSpan(ctx context.Context, err error) {
span := trace.FromContext(ctx)
if err != nil {
span.SetStatus(toStatus(err))
}
span.End()
}
// ToStatus interrogates an error and converts it to an appropriate
// OpenCensus status.
func toStatus(err error) trace.Status {
if err2, ok := err.(*googleapi.Error); ok {
return trace.Status{Code: httpStatusCodeToOCCode(err2.Code), Message: err2.Message}
} else if s, ok := status.FromError(err); ok {
return trace.Status{Code: int32(s.Code()), Message: s.Message()}
} else {
return trace.Status{Code: int32(code.Code_UNKNOWN), Message: err.Error()}
}
}
// TODO (deklerk): switch to using OpenCensus function when it becomes available.
// Reference: https://github.com/googleapis/googleapis/blob/26b634d2724ac5dd30ae0b0cbfb01f07f2e4050e/google/rpc/code.proto
func httpStatusCodeToOCCode(httpStatusCode int) int32 {
switch httpStatusCode {
case 200:
return int32(code.Code_OK)
case 499:
return int32(code.Code_CANCELLED)
case 500:
return int32(code.Code_UNKNOWN) // Could also be Code_INTERNAL, Code_DATA_LOSS
case 400:
return int32(code.Code_INVALID_ARGUMENT) // Could also be Code_OUT_OF_RANGE
case 504:
return int32(code.Code_DEADLINE_EXCEEDED)
case 404:
return int32(code.Code_NOT_FOUND)
case 409:
return int32(code.Code_ALREADY_EXISTS) // Could also be Code_ABORTED
case 403:
return int32(code.Code_PERMISSION_DENIED)
case 401:
return int32(code.Code_UNAUTHENTICATED)
case 429:
return int32(code.Code_RESOURCE_EXHAUSTED)
case 501:
return int32(code.Code_UNIMPLEMENTED)
case 503:
return int32(code.Code_UNAVAILABLE)
default:
return int32(code.Code_UNKNOWN)
}
}

30
vendor/cloud.google.com/go/internal/trace/not_go18.go generated vendored Normal file
View File

@ -0,0 +1,30 @@
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !go1.8
package trace
import (
"golang.org/x/net/context"
)
// OpenCensus only supports go 1.8 and higher.
func StartSpan(ctx context.Context, _ string) context.Context {
return ctx
}
func EndSpan(context.Context, error) {
}

View File

@ -1,4 +1,4 @@
// Copyright 2016 Google Inc. All Rights Reserved.
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -26,7 +26,7 @@ import (
// Repo is the current version of the client libraries in this
// repo. It should be a date in YYYYMMDD format.
const Repo = "20180118"
const Repo = "20180226"
// Go returns the Go runtime version. The returned string
// has no whitespace.

View File

@ -1,4 +1,4 @@
// Copyright 2017 Google Inc. All Rights Reserved.
// Copyright 2017 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.

View File

@ -1,5 +1,5 @@
/*
Copyright 2017 Google Inc. All Rights Reserved.
Copyright 2017 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

345
vendor/cloud.google.com/go/spanner/batch.go generated vendored Normal file
View File

@ -0,0 +1,345 @@
/*
Copyright 2018 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package spanner
import (
"bytes"
"encoding/gob"
"log"
"time"
"github.com/golang/protobuf/proto"
"golang.org/x/net/context"
sppb "google.golang.org/genproto/googleapis/spanner/v1"
)
// BatchReadOnlyTransaction is a ReadOnlyTransaction that allows for exporting
// arbitrarily large amounts of data from Cloud Spanner databases.
// BatchReadOnlyTransaction partitions a read/query request. Read/query request
// can then be executed independently over each partition while observing the
// same snapshot of the database. BatchReadOnlyTransaction can also be shared
// across multiple clients by passing around the BatchReadOnlyTransactionID and
// then recreating the transaction using Client.BatchReadOnlyTransactionFromID.
//
// Note: if a client is used only to run partitions, you can
// create it using a ClientConfig with both MinOpened and MaxIdle set to
// zero to avoid creating unnecessary sessions. You can also avoid excess
// gRPC channels by setting ClientConfig.NumChannels to the number of
// concurrently active BatchReadOnlyTransactions you expect to have.
type BatchReadOnlyTransaction struct {
ReadOnlyTransaction
ID BatchReadOnlyTransactionID
}
// BatchReadOnlyTransactionID is a unique identifier for a
// BatchReadOnlyTransaction. It can be used to re-create a
// BatchReadOnlyTransaction on a different machine or process by calling
// Client.BatchReadOnlyTransactionFromID.
type BatchReadOnlyTransactionID struct {
// unique ID for the transaction.
tid transactionID
// sid is the id of the Cloud Spanner session used for this transaction.
sid string
// rts is the read timestamp of this transaction.
rts time.Time
}
// Partition defines a segment of data to be read in a batch read or query. A
// partition can be serialized and processed across several different machines
// or processes.
type Partition struct {
pt []byte
qreq *sppb.ExecuteSqlRequest
rreq *sppb.ReadRequest
}
// PartitionOptions specifies options for a PartitionQueryRequest and
// PartitionReadRequest. See
// https://godoc.org/google.golang.org/genproto/googleapis/spanner/v1#PartitionOptions
// for more details.
type PartitionOptions struct {
// The desired data size for each partition generated.
PartitionBytes int64
// The desired maximum number of partitions to return.
MaxPartitions int64
}
// toProto converts a spanner.PartitionOptions into a sppb.PartitionOptions
func (opt PartitionOptions) toProto() *sppb.PartitionOptions {
return &sppb.PartitionOptions{
PartitionSizeBytes: opt.PartitionBytes,
MaxPartitions: opt.MaxPartitions,
}
}
// PartitionRead returns a list of Partitions that can be used to read rows from
// the database. These partitions can be executed across multiple processes,
// even across different machines. The partition size and count hints can be
// configured using PartitionOptions.
func (t *BatchReadOnlyTransaction) PartitionRead(ctx context.Context, table string, keys KeySet, columns []string, opt PartitionOptions) ([]*Partition, error) {
return t.PartitionReadUsingIndex(ctx, table, "", keys, columns, opt)
}
// PartitionReadUsingIndex returns a list of Partitions that can be used to read
// rows from the database using an index.
func (t *BatchReadOnlyTransaction) PartitionReadUsingIndex(ctx context.Context, table, index string, keys KeySet, columns []string, opt PartitionOptions) ([]*Partition, error) {
sh, ts, err := t.acquire(ctx)
if err != nil {
return nil, err
}
sid, client := sh.getID(), sh.getClient()
var (
kset *sppb.KeySet
resp *sppb.PartitionResponse
partitions []*Partition
)
kset, err = keys.keySetProto()
// request Partitions
if err != nil {
return nil, err
}
resp, err = client.PartitionRead(ctx, &sppb.PartitionReadRequest{
Session: sid,
Transaction: ts,
Table: table,
Index: index,
Columns: columns,
KeySet: kset,
PartitionOptions: opt.toProto(),
})
// prepare ReadRequest
req := &sppb.ReadRequest{
Session: sid,
Transaction: ts,
Table: table,
Index: index,
Columns: columns,
KeySet: kset,
}
// generate Partitions
for _, p := range resp.GetPartitions() {
partitions = append(partitions, &Partition{
pt: p.PartitionToken,
rreq: req,
})
}
return partitions, err
}
// PartitionQuery returns a list of Partitions that can be used to execute a query against the database.
func (t *BatchReadOnlyTransaction) PartitionQuery(ctx context.Context, statement Statement, opt PartitionOptions) ([]*Partition, error) {
sh, ts, err := t.acquire(ctx)
if err != nil {
return nil, err
}
sid, client := sh.getID(), sh.getClient()
var (
resp *sppb.PartitionResponse
partitions []*Partition
)
// request Partitions
req := &sppb.PartitionQueryRequest{
Session: sid,
Transaction: ts,
Sql: statement.SQL,
PartitionOptions: opt.toProto(),
}
if err := statement.bindParams(req); err != nil {
return nil, err
}
resp, err = client.PartitionQuery(ctx, req)
// prepare ExecuteSqlRequest
r := &sppb.ExecuteSqlRequest{
Session: sid,
Transaction: ts,
Sql: statement.SQL,
}
if err := statement.bindParams(r); err != nil {
return nil, err
}
// generate Partitions
for _, p := range resp.GetPartitions() {
partitions = append(partitions, &Partition{
pt: p.PartitionToken,
qreq: r,
})
}
return partitions, err
}
// release implements txReadEnv.release, noop.
func (t *BatchReadOnlyTransaction) release(err error) {
}
// setTimestamp implements txReadEnv.setTimestamp, noop.
// read timestamp is ready on txn initialization, avoid contending writing to it with future partitions.
func (t *BatchReadOnlyTransaction) setTimestamp(ts time.Time) {
}
// Close marks the txn as closed.
func (t *BatchReadOnlyTransaction) Close() {
t.mu.Lock()
defer t.mu.Unlock()
t.state = txClosed
}
// Cleanup cleans up all the resources used by this transaction and makes
// it unusable. Once this method is invoked, the transaction is no longer
// usable anywhere, including other clients/processes with which this
// transaction was shared.
//
// Calling Cleanup is optional, but recommended. If Cleanup is not called, the
// transaction's resources will be freed when the session expires on the backend and
// is deleted. For more information about recycled sessions, see
// https://cloud.google.com/spanner/docs/sessions.
func (t *BatchReadOnlyTransaction) Cleanup(ctx context.Context) {
t.Close()
t.mu.Lock()
defer t.mu.Unlock()
sh := t.sh
if sh == nil {
return
}
t.sh = nil
sid, client := sh.getID(), sh.getClient()
err := runRetryable(ctx, func(ctx context.Context) error {
_, e := client.DeleteSession(ctx, &sppb.DeleteSessionRequest{Name: sid})
return e
})
if err != nil {
log.Printf("Failed to delete session %v. Error: %v", sid, err)
}
}
// Execute runs a single Partition obtained from PartitionRead or PartitionQuery.
func (t *BatchReadOnlyTransaction) Execute(ctx context.Context, p *Partition) *RowIterator {
var (
sh *sessionHandle
err error
rpc func(ct context.Context, resumeToken []byte) (streamingReceiver, error)
)
if sh, _, err = t.acquire(ctx); err != nil {
return &RowIterator{err: err}
}
client := sh.getClient()
if client == nil {
// Might happen if transaction is closed in the middle of a API call.
return &RowIterator{err: errSessionClosed(sh)}
}
// read or query partition
if p.rreq != nil {
p.rreq.PartitionToken = p.pt
rpc = func(ctx context.Context, resumeToken []byte) (streamingReceiver, error) {
p.rreq.ResumeToken = resumeToken
return client.StreamingRead(ctx, p.rreq)
}
} else {
p.qreq.PartitionToken = p.pt
rpc = func(ctx context.Context, resumeToken []byte) (streamingReceiver, error) {
p.qreq.ResumeToken = resumeToken
return client.ExecuteStreamingSql(ctx, p.qreq)
}
}
return stream(
contextWithOutgoingMetadata(ctx, sh.getMetadata()),
rpc,
t.setTimestamp,
t.release)
}
// MarshalBinary implements BinaryMarshaler.
func (tid BatchReadOnlyTransactionID) MarshalBinary() (data []byte, err error) {
var buf bytes.Buffer
enc := gob.NewEncoder(&buf)
if err := enc.Encode(tid.tid); err != nil {
return nil, err
}
if err := enc.Encode(tid.sid); err != nil {
return nil, err
}
if err := enc.Encode(tid.rts); err != nil {
return nil, err
}
return buf.Bytes(), nil
}
// UnmarshalBinary implements BinaryUnmarshaler.
func (tid *BatchReadOnlyTransactionID) UnmarshalBinary(data []byte) error {
dec := gob.NewDecoder(bytes.NewReader(data))
if err := dec.Decode(&tid.tid); err != nil {
return err
}
if err := dec.Decode(&tid.sid); err != nil {
return err
}
return dec.Decode(&tid.rts)
}
// MarshalBinary implements BinaryMarshaler.
func (p Partition) MarshalBinary() (data []byte, err error) {
var buf bytes.Buffer
enc := gob.NewEncoder(&buf)
if err := enc.Encode(p.pt); err != nil {
return nil, err
}
var isReadPartition bool
var req proto.Message
if p.rreq != nil {
isReadPartition = true
req = p.rreq
} else {
isReadPartition = false
req = p.qreq
}
if err := enc.Encode(isReadPartition); err != nil {
return nil, err
}
if data, err = proto.Marshal(req); err != nil {
return nil, err
}
if err := enc.Encode(data); err != nil {
return nil, err
}
return buf.Bytes(), nil
}
// UnmarshalBinary implements BinaryUnmarshaler.
func (p *Partition) UnmarshalBinary(data []byte) error {
var (
isReadPartition bool
d []byte
err error
)
dec := gob.NewDecoder(bytes.NewReader(data))
if err := dec.Decode(&p.pt); err != nil {
return err
}
if err := dec.Decode(&isReadPartition); err != nil {
return err
}
if err := dec.Decode(&d); err != nil {
return err
}
if isReadPartition {
p.rreq = &sppb.ReadRequest{}
err = proto.Unmarshal(d, p.rreq)
} else {
p.qreq = &sppb.ExecuteSqlRequest{}
err = proto.Unmarshal(d, p.qreq)
}
return err
}

View File

@ -1,5 +1,5 @@
/*
Copyright 2017 Google Inc. All Rights Reserved.
Copyright 2017 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -18,6 +18,7 @@ package spanner
import (
"fmt"
"log"
"regexp"
"sync/atomic"
"time"
@ -33,7 +34,7 @@ import (
)
const (
prodAddr = "spanner.googleapis.com:443"
endpoint = "spanner.googleapis.com:443"
// resourcePrefixHeader is the name of the metadata header used to indicate
// the resource being operated on.
@ -75,6 +76,8 @@ type Client struct {
// Metadata to be sent with each request.
md metadata.MD
idleSessions *sessionPool
// sessionLabels for the sessions created by this client.
sessionLabels map[string]string
}
// ClientConfig has configurations for the client.
@ -85,6 +88,9 @@ type ClientConfig struct {
co []option.ClientOption
// SessionPoolConfig is the configuration for session pool.
SessionPoolConfig
// SessionLabels for the sessions created by this client.
// See https://cloud.google.com/spanner/docs/reference/rpc/google.spanner.v1#session for more info.
SessionLabels map[string]string
}
// errDial returns error for dialing to Cloud Spanner.
@ -111,7 +117,7 @@ func NewClient(ctx context.Context, database string, opts ...option.ClientOption
// NewClientWithConfig creates a client to a database. A valid database name has the
// form projects/PROJECT_ID/instances/INSTANCE_ID/databases/DATABASE_ID.
func NewClientWithConfig(ctx context.Context, database string, config ClientConfig, opts ...option.ClientOption) (_ *Client, err error) {
func NewClientWithConfig(ctx context.Context, database string, config ClientConfig, opts ...option.ClientOption) (c *Client, err error) {
ctx = traceStartSpan(ctx, "cloud.google.com/go/spanner.NewClient")
defer func() { traceEndSpan(ctx, err) }()
@ -119,14 +125,20 @@ func NewClientWithConfig(ctx context.Context, database string, config ClientConf
if err := validDatabaseName(database); err != nil {
return nil, err
}
c := &Client{
c = &Client{
database: database,
md: metadata.Pairs(
resourcePrefixHeader, database,
xGoogHeaderKey, xGoogHeaderVal),
}
// Make a copy of labels.
c.sessionLabels = make(map[string]string)
for k, v := range config.SessionLabels {
c.sessionLabels[k] = v
}
// gRPC options
allOpts := []option.ClientOption{
option.WithEndpoint(prodAddr),
option.WithEndpoint(endpoint),
option.WithScopes(Scope),
option.WithGRPCDialOption(
grpc.WithDefaultCallOptions(
@ -135,13 +147,12 @@ func NewClientWithConfig(ctx context.Context, database string, config ClientConf
),
),
}
allOpts = append(allOpts, openCensusOptions()...)
allOpts = append(allOpts, opts...)
// Prepare gRPC channels.
if config.NumChannels == 0 {
config.NumChannels = numChannels
}
// Default MaxOpened sessions
// Default configs for session pool.
if config.MaxOpened == 0 {
config.MaxOpened = uint64(config.NumChannels * 100)
}
@ -161,6 +172,7 @@ func NewClientWithConfig(ctx context.Context, database string, config ClientConf
// TODO: support more loadbalancing options.
return c.rrNext(), nil
}
config.SessionPoolConfig.sessionLabels = c.sessionLabels
sp, err := newSessionPool(database, config.SessionPoolConfig, c.md)
if err != nil {
c.Close()
@ -219,6 +231,112 @@ func (c *Client) ReadOnlyTransaction() *ReadOnlyTransaction {
return t
}
// BatchReadOnlyTransaction returns a BatchReadOnlyTransaction that can be used
// for partitioned reads or queries from a snapshot of the database. This is
// useful in batch processing pipelines where one wants to divide the work of
// reading from the database across multiple machines.
//
// Note: This transaction does not use the underlying session pool but creates a
// new session each time, and the session is reused across clients.
//
// You should call Close() after the txn is no longer needed on local
// client, and call Cleanup() when the txn is finished for all clients, to free
// the session.
func (c *Client) BatchReadOnlyTransaction(ctx context.Context, tb TimestampBound) (*BatchReadOnlyTransaction, error) {
var (
tx transactionID
rts time.Time
s *session
sh *sessionHandle
err error
)
defer func() {
if err != nil && sh != nil {
e := runRetryable(ctx, func(ctx context.Context) error {
_, e := s.client.DeleteSession(ctx, &sppb.DeleteSessionRequest{Name: s.getID()})
return e
})
if e != nil {
log.Printf("Failed to delete session %v. Error: %v", s.getID(), e)
}
}
}()
// create session
sc := c.rrNext()
err = runRetryable(ctx, func(ctx context.Context) error {
sid, e := sc.CreateSession(ctx, &sppb.CreateSessionRequest{Database: c.database, Session: &sppb.Session{Labels: c.sessionLabels}})
if e != nil {
return e
}
// If no error, construct the new session.
s = &session{valid: true, client: sc, id: sid.Name, createTime: time.Now(), md: c.md}
return nil
})
if err != nil {
return nil, err
}
sh = &sessionHandle{session: s}
// begin transaction
err = runRetryable(contextWithOutgoingMetadata(ctx, sh.getMetadata()), func(ctx context.Context) error {
res, e := sh.getClient().BeginTransaction(ctx, &sppb.BeginTransactionRequest{
Session: sh.getID(),
Options: &sppb.TransactionOptions{
Mode: &sppb.TransactionOptions_ReadOnly_{
ReadOnly: buildTransactionOptionsReadOnly(tb, true),
},
},
})
if e != nil {
return e
}
tx = res.Id
if res.ReadTimestamp != nil {
rts = time.Unix(res.ReadTimestamp.Seconds, int64(res.ReadTimestamp.Nanos))
}
return nil
})
if err != nil {
return nil, err
}
t := &BatchReadOnlyTransaction{
ReadOnlyTransaction: ReadOnlyTransaction{
tx: tx,
txReadyOrClosed: make(chan struct{}),
state: txActive,
sh: sh,
rts: rts,
},
ID: BatchReadOnlyTransactionID{
tid: tx,
sid: sh.getID(),
rts: rts,
},
}
t.txReadOnly.txReadEnv = t
return t, nil
}
// BatchReadOnlyTransactionFromID reconstruct a BatchReadOnlyTransaction from BatchReadOnlyTransactionID
func (c *Client) BatchReadOnlyTransactionFromID(tid BatchReadOnlyTransactionID) *BatchReadOnlyTransaction {
sc := c.rrNext()
s := &session{valid: true, client: sc, id: tid.sid, createTime: time.Now(), md: c.md}
sh := &sessionHandle{session: s}
t := &BatchReadOnlyTransaction{
ReadOnlyTransaction: ReadOnlyTransaction{
tx: tid.tid,
txReadyOrClosed: make(chan struct{}),
state: txActive,
sh: sh,
rts: tid.rts,
},
ID: tid,
}
t.txReadOnly.txReadEnv = t
return t
}
type transactionInProgressKey struct{}
func checkNestedTxn(ctx context.Context) error {
@ -324,8 +442,7 @@ func (c *Client) Apply(ctx context.Context, ms []*Mutation, opts ...ApplyOption)
}
if !ao.atLeastOnce {
return c.ReadWriteTransaction(ctx, func(ctx context.Context, t *ReadWriteTransaction) error {
t.BufferWrite(ms)
return nil
return t.BufferWrite(ms)
})
}

View File

@ -1,5 +1,5 @@
/*
Copyright 2017 Google Inc. All Rights Reserved.
Copyright 2017 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -24,6 +24,10 @@ Note: This package is in beta. Some backwards-incompatible changes may occur.
See https://cloud.google.com/spanner/docs/getting-started/go/ for an introduction
to Cloud Spanner and additional help on using this API.
See https://godoc.org/cloud.google.com/go for authentication, timeouts,
connection pooling and similar aspects of this package.
Creating a Client
To start working with this package, create a client that refers to the database
@ -192,7 +196,7 @@ For Cloud Spanner columns that may contain NULL, use one of the NullXXX types,
like NullString:
var ns spanner.NullString
if err =: row.Column(0, &ns); err != nil {
if err := row.Column(0, &ns); err != nil {
// TODO: Handle error.
}
if ns.Valid {
@ -307,10 +311,5 @@ Tracing
This client has been instrumented to use OpenCensus tracing (http://opencensus.io).
To enable tracing, see "Enabling Tracing for a Program" at
https://godoc.org/go.opencensus.io/trace. OpenCensus tracing requires Go 1.8 or higher.
Authentication
See examples of authorization and authentication at
https://godoc.org/cloud.google.com/go#pkg-examples.
*/
package spanner // import "cloud.google.com/go/spanner"

View File

@ -1,5 +1,5 @@
/*
Copyright 2017 Google Inc. All Rights Reserved.
Copyright 2017 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

23
vendor/cloud.google.com/go/spanner/go17.go generated vendored Normal file
View File

@ -0,0 +1,23 @@
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build go1.7
package spanner
import "reflect"
func structTagLookup(tag reflect.StructTag, key string) (string, bool) {
return tag.Lookup(key)
}

View File

@ -1,4 +1,4 @@
// Copyright 2017 Google Inc. All Rights Reserved.
// Copyright 2017 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -19,19 +19,10 @@ package spanner
import (
"fmt"
ocgrpc "go.opencensus.io/plugin/grpc"
"go.opencensus.io/trace"
"golang.org/x/net/context"
"google.golang.org/api/option"
"google.golang.org/grpc"
)
func openCensusOptions() []option.ClientOption {
return []option.ClientOption{
option.WithGRPCDialOption(grpc.WithStatsHandler(ocgrpc.NewClientStatsHandler())),
}
}
func traceStartSpan(ctx context.Context, name string) context.Context {
ctx, _ = trace.StartSpan(ctx, name)
return ctx
@ -52,15 +43,15 @@ func tracePrintf(ctx context.Context, attrMap map[string]interface{}, format str
var a trace.Attribute
switch v := v.(type) {
case string:
a = trace.StringAttribute{k, v}
a = trace.StringAttribute(k, v)
case bool:
a = trace.BoolAttribute{k, v}
a = trace.BoolAttribute(k, v)
case int:
a = trace.Int64Attribute{k, int64(v)}
a = trace.Int64Attribute(k, int64(v))
case int64:
a = trace.Int64Attribute{k, v}
a = trace.Int64Attribute(k, v)
default:
a = trace.StringAttribute{k, fmt.Sprintf("%#v", v)}
a = trace.StringAttribute(k, fmt.Sprintf("%#v", v))
}
attrs = append(attrs, a)
}

View File

@ -1,5 +1,5 @@
/*
Copyright 2017 Google Inc. All Rights Reserved.
Copyright 2017 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -30,13 +30,11 @@ import (
// A Key can be either a Cloud Spanner row's primary key or a secondary index key.
// It is essentially an interface{} array, which represents a set of Cloud Spanner
// columns. A Key type has the following usages:
// columns. A Key can be used as:
//
// - Used as primary key which uniquely identifies a Cloud Spanner row.
// - Used as secondary index key which maps to a set of Cloud Spanner rows
// indexed under it.
// - Used as endpoints of primary key/secondary index ranges,
// see also the KeyRange type.
// - A primary key which uniquely identifies a Cloud Spanner row.
// - A secondary index key which maps to a set of Cloud Spanner rows indexed under it.
// - An endpoint of primary key/secondary index ranges; see the KeyRange type.
//
// Rows that are identified by the Key type are outputs of read operation or targets of
// delete operation in a mutation. Note that for Insert/Update/InsertOrUpdate/Update

View File

@ -1,5 +1,5 @@
/*
Copyright 2017 Google Inc. All Rights Reserved.
Copyright 2017 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -53,23 +53,23 @@ const (
//
// Many mutations can be applied in a single atomic commit. For purposes of
// constraint checking (such as foreign key constraints), the operations can be
// viewed as applying in same order as the mutations are supplied in (so that
// e.g., a row and its logical "child" can be inserted in the same commit).
// viewed as applying in the same order as the mutations are provided (so that, e.g.,
// a row and its logical "child" can be inserted in the same commit).
//
// - The Apply function applies series of mutations.
// - A ReadWriteTransaction applies a series of mutations as part of an
// atomic read-modify-write operation.
// Example:
// The Apply function applies series of mutations. For example,
//
// m := spanner.Insert("User",
// []string{"user_id", "profile"},
// []interface{}{UserID, profile})
// _, err := client.Apply(ctx, []*spanner.Mutation{m})
// m := spanner.Insert("User",
// []string{"user_id", "profile"},
// []interface{}{UserID, profile})
// _, err := client.Apply(ctx, []*spanner.Mutation{m})
//
// In this example, we insert a new row into the User table. The primary key
// inserts a new row into the User table. The primary key
// for the new row is UserID (presuming that "user_id" has been declared as the
// primary key of the "User" table).
//
// To apply a series of mutations as part of an atomic read-modify-write operation,
// use ReadWriteTransaction.
//
// Updating a row
//
// Changing the values of columns in an existing row is very similar to

View File

@ -1,4 +1,4 @@
// Copyright 2017 Google Inc. All Rights Reserved.
// Copyright 2017 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.

74
vendor/cloud.google.com/go/spanner/not_go17.go generated vendored Normal file
View File

@ -0,0 +1,74 @@
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !go1.7
package spanner
import (
"reflect"
"strconv"
)
func structTagLookup(tag reflect.StructTag, key string) (string, bool) {
// from go1.10.2 implementation of StructTag.Lookup.
for tag != "" {
// Skip leading space.
i := 0
for i < len(tag) && tag[i] == ' ' {
i++
}
tag = tag[i:]
if tag == "" {
break
}
// Scan to colon. A space, a quote or a control character is a syntax error.
// Strictly speaking, control chars include the range [0x7f, 0x9f], not just
// [0x00, 0x1f], but in practice, we ignore the multi-byte control characters
// as it is simpler to inspect the tag's bytes than the tag's runes.
i = 0
for i < len(tag) && tag[i] > ' ' && tag[i] != ':' && tag[i] != '"' && tag[i] != 0x7f {
i++
}
if i == 0 || i+1 >= len(tag) || tag[i] != ':' || tag[i+1] != '"' {
break
}
name := string(tag[:i])
tag = tag[i+1:]
// Scan quoted string to find value.
i = 1
for i < len(tag) && tag[i] != '"' {
if tag[i] == '\\' {
i++
}
i++
}
if i >= len(tag) {
break
}
qvalue := string(tag[:i+1])
tag = tag[i+1:]
if key == name {
value, err := strconv.Unquote(qvalue)
if err != nil {
break
}
return value, true
}
}
return "", false
}

View File

@ -1,4 +1,4 @@
// Copyright 2017 Google Inc. All Rights Reserved.
// Copyright 2017 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -16,15 +16,10 @@
package spanner
import (
"golang.org/x/net/context"
"google.golang.org/api/option"
)
import "golang.org/x/net/context"
// OpenCensus only supports go 1.8 and higher.
func openCensusOptions() []option.ClientOption { return nil }
func traceStartSpan(ctx context.Context, _ string) context.Context {
return ctx
}

View File

@ -1,5 +1,5 @@
/*
Copyright 2017 Google Inc. All Rights Reserved.
Copyright 2017 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@ -1,5 +1,5 @@
/*
Copyright 2017 Google Inc. All Rights Reserved.
Copyright 2017 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -137,7 +137,7 @@ func (r *RowIterator) Do(f func(r *Row) error) error {
}
}
// Stop terminates the iteration. It should be called after every iteration.
// Stop terminates the iteration. It should be called after you finish using the iterator.
func (r *RowIterator) Stop() {
if r.streamd != nil {
defer traceEndSpan(r.streamd.ctx, r.err)
@ -255,7 +255,7 @@ type resumableStreamDecoder struct {
// ctx is the caller's context, used for cancel/timeout Next().
ctx context.Context
// rpc is a factory of streamingReceiver, which might resume
// a pervious stream from the point encoded in restartToken.
// a previous stream from the point encoded in restartToken.
// rpc is always a wrapper of a Cloud Spanner query which is
// resumable.
rpc func(ctx context.Context, restartToken []byte) (streamingReceiver, error)

View File

@ -1,5 +1,5 @@
/*
Copyright 2017 Google Inc. All Rights Reserved.
Copyright 2017 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@ -1,5 +1,5 @@
/*
Copyright 2017 Google Inc. All Rights Reserved.
Copyright 2017 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -273,12 +273,14 @@ func errToStructArgType(p interface{}) error {
// ToStruct fetches the columns in a row into the fields of a struct.
// The rules for mapping a row's columns into a struct's exported fields
// are as the following:
// 1. If a field has a `spanner: "column_name"` tag, then decode column
// 'column_name' into the field. A special case is the `spanner: "-"`
// tag, which instructs ToStruct to ignore the field during decoding.
// 2. Otherwise, if the name of a field matches the name of a column (ignoring case),
// decode the column into the field.
// are:
//
// 1. If a field has a `spanner: "column_name"` tag, then decode column
// 'column_name' into the field. A special case is the `spanner: "-"`
// tag, which instructs ToStruct to ignore the field during decoding.
//
// 2. Otherwise, if the name of a field matches the name of a column (ignoring case),
// decode the column into the field.
//
// The fields of the destination struct can be of any type that is acceptable
// to spanner.Row.Column.
@ -286,6 +288,10 @@ func errToStructArgType(p interface{}) error {
// Slice and pointer fields will be set to nil if the source column is NULL, and a
// non-nil value if the column is not NULL. To decode NULL values of other types, use
// one of the spanner.NullXXX types as the type of the destination field.
//
// If ToStruct returns an error, the contents of p are undefined. Some fields may
// have been successfully populated, while others were not; you should not use any of
// the fields.
func (r *Row) ToStruct(p interface{}) error {
// Check if p is a pointer to a struct
if t := reflect.TypeOf(p); t == nil || t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Struct {

View File

@ -1,5 +1,5 @@
/*
Copyright 2017 Google Inc. All Rights Reserved.
Copyright 2017 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -310,6 +310,8 @@ type SessionPoolConfig struct {
HealthCheckInterval time.Duration
// healthCheckSampleInterval is how often the health checker samples live session (for use in maintaining session pool size). Defaults to 1 min.
healthCheckSampleInterval time.Duration
// sessionLabels for the sessions created in the session pool.
sessionLabels map[string]string
}
// errNoRPCGetter returns error for SessionPoolConfig missing getRPCClient method.
@ -318,9 +320,9 @@ func errNoRPCGetter() error {
}
// errMinOpenedGTMapOpened returns error for SessionPoolConfig.MaxOpened < SessionPoolConfig.MinOpened when SessionPoolConfig.MaxOpened is set.
func errMinOpenedGTMaxOpened(spc *SessionPoolConfig) error {
func errMinOpenedGTMaxOpened(maxOpened, minOpened uint64) error {
return spannerErrorf(codes.InvalidArgument,
"require SessionPoolConfig.MaxOpened >= SessionPoolConfig.MinOpened, got %v and %v", spc.MaxOpened, spc.MinOpened)
"require SessionPoolConfig.MaxOpened >= SessionPoolConfig.MinOpened, got %v and %v", maxOpened, minOpened)
}
// validate verifies that the SessionPoolConfig is good for use.
@ -329,7 +331,7 @@ func (spc *SessionPoolConfig) validate() error {
return errNoRPCGetter()
}
if spc.MinOpened > spc.MaxOpened && spc.MaxOpened > 0 {
return errMinOpenedGTMaxOpened(spc)
return errMinOpenedGTMaxOpened(spc.MaxOpened, spc.MinOpened)
}
return nil
}
@ -463,7 +465,10 @@ func (p *sessionPool) createSession(ctx context.Context) (*session, error) {
}
var s *session
err = runRetryable(ctx, func(ctx context.Context) error {
sid, e := sc.CreateSession(ctx, &sppb.CreateSessionRequest{Database: p.db})
sid, e := sc.CreateSession(ctx, &sppb.CreateSessionRequest{
Database: p.db,
Session: &sppb.Session{Labels: p.sessionLabels},
})
if e != nil {
return e
}
@ -1067,7 +1072,7 @@ func shouldDropSession(err error) bool {
}
// If a Cloud Spanner can no longer locate the session (for example, if session is garbage collected), then caller
// should not try to return the session back into the session pool.
// TODO: once gRPC can return auxilary error information, stop parsing the error message.
// TODO: once gRPC can return auxiliary error information, stop parsing the error message.
if ErrCode(err) == codes.NotFound && strings.Contains(ErrDesc(err), "Session not found:") {
return true
}

View File

@ -1,5 +1,5 @@
/*
Copyright 2017 Google Inc. All Rights Reserved.
Copyright 2017 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -66,12 +66,12 @@ var (
errNoType = errors.New("no type information")
)
// bindParams binds parameters in a Statement to a sppb.ExecuteSqlRequest.
func (s *Statement) bindParams(r *sppb.ExecuteSqlRequest) error {
r.Params = &proto3.Struct{
// bindParams binds parameters in a Statement to a sppb.ExecuteSqlRequest or sppb.PartitionQueryRequest.
func (s *Statement) bindParams(i interface{}) error {
params := &proto3.Struct{
Fields: map[string]*proto3.Value{},
}
r.ParamTypes = map[string]*sppb.Type{}
paramTypes := map[string]*sppb.Type{}
for k, v := range s.Params {
if v == nil {
return errBindParam(k, v, errNilParam)
@ -83,8 +83,19 @@ func (s *Statement) bindParams(r *sppb.ExecuteSqlRequest) error {
if t == nil { // should not happen, because of nil check above
return errBindParam(k, v, errNoType)
}
r.Params.Fields[k] = val
r.ParamTypes[k] = t
params.Fields[k] = val
paramTypes[k] = t
}
switch r := i.(type) {
default:
return fmt.Errorf("failed to bind query parameter, unexpected request type: %v", r)
case *sppb.ExecuteSqlRequest:
r.Params = params
r.ParamTypes = paramTypes
case *sppb.PartitionQueryRequest:
r.Params = params
r.ParamTypes = paramTypes
}
return nil
}

View File

@ -1,5 +1,5 @@
/*
Copyright 2017 Google Inc. All Rights Reserved.
Copyright 2017 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -39,11 +39,8 @@ const (
// TimestampBound defines how Cloud Spanner will choose a timestamp for a single
// read/query or read-only transaction.
//
// The types of timestamp bound are:
//
// - Strong (the default).
// - Bounded staleness.
// - Exact staleness.
// There are three types of timestamp bound: strong, bounded staleness and exact
// staleness. Strong is the default.
//
// If the Cloud Spanner database to be read is geographically distributed, stale
// read-only transactions can execute more quickly than strong or read-write
@ -57,7 +54,7 @@ const (
//
// Strong reads are guaranteed to see the effects of all transactions that have
// committed before the start of the read. Furthermore, all rows yielded by a
// single read are consistent with each other - if any part of the read
// single read are consistent with each other: if any part of the read
// observes a transaction, all parts of the read see the transaction.
//
// Strong reads are not repeatable: two consecutive strong read-only
@ -65,18 +62,17 @@ const (
// writes. If consistency across reads is required, the reads should be
// executed within a transaction or at an exact read timestamp.
//
// Use StrongRead() to create a bound of this type.
// Use StrongRead to create a bound of this type.
//
// Exact staleness
//
// These timestamp bounds execute reads at a user-specified timestamp. Reads at
// a timestamp are guaranteed to see a consistent prefix of the global
// transaction history: they observe modifications done by all transactions
// with a commit timestamp less than or equal to the read timestamp, and
// observe none of the modifications done by transactions with a larger commit
// timestamp. They will block until all conflicting transactions that may be
// assigned commit timestamps less than or equal to the read timestamp have
// finished.
// An exact staleness timestamp bound executes reads at a user-specified timestamp.
// Reads at a timestamp are guaranteed to see a consistent prefix of the global
// transaction history: they observe modifications done by all transactions with a
// commit timestamp less than or equal to the read timestamp, and observe none of the
// modifications done by transactions with a larger commit timestamp. They will block
// until all conflicting transactions that may be assigned commit timestamps less
// than or equal to the read timestamp have finished.
//
// The timestamp can either be expressed as an absolute Cloud Spanner commit
// timestamp or a staleness relative to the current time.
@ -86,7 +82,7 @@ const (
// concurrency modes. On the other hand, boundedly stale reads usually return
// fresher results.
//
// Use ReadTimestamp() and ExactStaleness() to create a bound of this type.
// Use ReadTimestamp and ExactStaleness to create a bound of this type.
//
// Bounded staleness
//
@ -95,17 +91,17 @@ const (
// the staleness bound that allows execution of the reads at the closest
// available replica without blocking.
//
// All rows yielded are consistent with each other -- if any part of the read
// All rows yielded are consistent with each other: if any part of the read
// observes a transaction, all parts of the read see the transaction. Boundedly
// stale reads are not repeatable: two stale reads, even if they use the same
// staleness bound, can execute at different timestamps and thus return
// inconsistent results.
//
// Boundedly stale reads execute in two phases: the first phase negotiates a
// Boundedly stale reads execute in two phases. The first phase negotiates a
// timestamp among all replicas needed to serve the read. In the second phase,
// reads are executed at the negotiated timestamp.
//
// As a result of the two phase execution, bounded staleness reads are usually
// As a result of this two-phase execution, bounded staleness reads are usually
// a little slower than comparable exact staleness reads. However, they are
// typically able to return fresher results, and are more likely to execute at
// the closest replica.
@ -114,7 +110,7 @@ const (
// will be read, it can only be used with single-use reads and single-use
// read-only transactions.
//
// Use MinReadTimestamp() and MaxStaleness() to create a bound of this type.
// Use MinReadTimestamp and MaxStaleness to create a bound of this type.
//
// Old read timestamps and garbage collection
//
@ -123,7 +119,7 @@ const (
// GC". By default, version GC reclaims versions after they are four hours
// old. Because of this, Cloud Spanner cannot perform reads at read timestamps more
// than four hours in the past. This restriction also applies to in-progress
// reads and/or SQL queries whose timestamp become too old while
// reads and/or SQL queries whose timestamps become too old while
// executing. Reads and SQL queries with too-old read timestamps fail with the
// error ErrorCode.FAILED_PRECONDITION.
type TimestampBound struct {
@ -174,7 +170,6 @@ func ReadTimestamp(t time.Time) TimestampBound {
}
}
// String implements fmt.Stringer.
func (tb TimestampBound) String() string {
switch tb.mode {
case strong:

View File

@ -1,5 +1,5 @@
/*
Copyright 2017 Google Inc. All Rights Reserved.
Copyright 2017 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -268,7 +268,7 @@ func errUnexpectedTxState(ts txState) error {
// applications do not need to worry about this in practice. See the
// documentation of TimestampBound for more details.
//
// A ReadOnlyTransaction consumes resources on the server until Close() is
// A ReadOnlyTransaction consumes resources on the server until Close is
// called.
type ReadOnlyTransaction struct {
// txReadOnly contains methods for performing transactional reads.
@ -329,7 +329,7 @@ func (t *ReadOnlyTransaction) begin(ctx context.Context) error {
}
t.mu.Unlock()
if err != nil && sh != nil {
// Got a valid session handle, but failed to initalize transaction on Cloud Spanner.
// Got a valid session handle, but failed to initialize transaction on Cloud Spanner.
if shouldDropSession(err) {
sh.destroy()
}
@ -623,7 +623,7 @@ type ReadWriteTransaction struct {
mu sync.Mutex
// state is the current transaction status of the read-write transaction.
state txState
// wb is the set of buffered mutations waiting to be commited.
// wb is the set of buffered mutations waiting to be committed.
wb []*Mutation
}
@ -720,7 +720,7 @@ func (t *ReadWriteTransaction) begin(ctx context.Context) error {
func (t *ReadWriteTransaction) commit(ctx context.Context) (time.Time, error) {
var ts time.Time
t.mu.Lock()
t.state = txClosed // No futher operations after commit.
t.state = txClosed // No further operations after commit.
mPb, err := mutationsProto(t.wb)
t.mu.Unlock()
if err != nil {
@ -809,9 +809,9 @@ type writeOnlyTransaction struct {
sp *sessionPool
}
// applyAtLeastOnce commits a list of mutations to Cloud Spanner for at least once, unless one of the following happends:
// 1) Context is timeout.
// 2) An unretryable error(e.g. database not found) occurs.
// applyAtLeastOnce commits a list of mutations to Cloud Spanner at least once, unless one of the following happens:
// 1) Context times out.
// 2) An unretryable error (e.g. database not found) occurs.
// 3) There is a malformed Mutation object.
func (t *writeOnlyTransaction) applyAtLeastOnce(ctx context.Context, ms ...*Mutation) (time.Time, error) {
var (

View File

@ -1,5 +1,5 @@
/*
Copyright 2017 Google Inc. All Rights Reserved.
Copyright 2017 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@ -1,5 +1,5 @@
/*
Copyright 2017 Google Inc. All Rights Reserved.
Copyright 2017 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -32,6 +32,19 @@ import (
"google.golang.org/grpc/codes"
)
const commitTimestampPlaceholderString = "spanner.commit_timestamp()"
var (
// CommitTimestamp is a special value used to tell Cloud Spanner
// to insert the commit timestamp of the transaction into a column.
// It can be used in a Mutation, or directly used in
// InsertStruct or InsertMap. See ExampleCommitTimestamp.
// This is just a placeholder and the actual value stored in this
// variable has no meaning.
CommitTimestamp time.Time = commitTimestamp
commitTimestamp = time.Unix(0, 0).In(time.FixedZone("CommitTimestamp placeholder", 0xDB))
)
// NullInt64 represents a Cloud Spanner INT64 that may be NULL.
type NullInt64 struct {
Int64 int64
@ -66,6 +79,25 @@ type NullFloat64 struct {
Valid bool // Valid is true if Float64 is not NULL.
}
// Cloud Spanner STRUCT (aka STRUCT) values (https://cloud.google.com/spanner/docs/data-types#struct-type)
// can be represented by a Go struct value.
// The spanner.StructType of such values is built from the field types and field tag information
// of the Go struct. If a field in the struct type definition has a "spanner:<field_name>" tag,
// then the value of the "spanner" key in the tag is used as the name for that field in the
// built spanner.StructType, otherwise the field name in the struct definition is used. To specify a
// field with an empty field name in a Cloud Spanner STRUCT type, use the `spanner:""` tag
// annotation against the corresponding field in the Go struct's type definition.
//
// A STRUCT value can contain STRUCT-typed and Array-of-STRUCT typed fields and these can be
// specified using named struct-typed and []struct-typed fields inside a Go struct. However,
// embedded struct fields are not allowed. Unexported struct fields are ignored.
//
// NULL STRUCT values in Cloud Spanner are typed. A nil pointer to a Go struct value can be used to
// specify a NULL STRUCT value of the corresponding spanner.StructType. Nil and empty slices of a
// Go STRUCT type can be used to specify NULL and empty array values respectively of the
// corresponding spanner.StructType. A slice of pointers to a Go struct type can be used to specify
// an array of NULL-able STRUCT values.
// String implements Stringer.String for NullFloat64
func (n NullFloat64) String() string {
if !n.Valid {
@ -183,6 +215,12 @@ func errNilArrElemType(t *sppb.Type) error {
return spannerErrorf(codes.FailedPrecondition, "array type %v is with nil array element type", t)
}
func errUnsupportedEmbeddedStructFields(fname string) error {
return spannerErrorf(codes.InvalidArgument, "Embedded field: %s. Embedded and anonymous fields are not allowed "+
"when converting Go structs to Cloud Spanner STRUCT values. To create a STRUCT value with an "+
"unnamed field, use a `spanner:\"\"` field tag.", fname)
}
// errDstNotForNull returns error for decoding a SQL NULL value into a destination which doesn't
// support NULL values.
func errDstNotForNull(dst interface{}) error {
@ -1041,7 +1079,7 @@ func errNotStructElement(i int, v *proto3.Value) error {
}
// decodeRowArray decodes proto3.ListValue pb into a NullRow slice according to
// the structual information given in sppb.StructType ty.
// the structural information given in sppb.StructType ty.
func decodeRowArray(ty *sppb.StructType, pb *proto3.ListValue) ([]NullRow, error) {
if pb == nil {
return nil, errNilListValue("STRUCT")
@ -1101,7 +1139,7 @@ func errDecodeStructField(ty *sppb.StructType, f string, err error) error {
}
// decodeStruct decodes proto3.ListValue pb into struct referenced by pointer ptr, according to
// the structual information given in sppb.StructType ty.
// the structural information given in sppb.StructType ty.
func decodeStruct(ty *sppb.StructType, pb *proto3.ListValue, ptr interface{}) error {
if reflect.ValueOf(ptr).IsNil() {
return errNilDst(ptr)
@ -1109,7 +1147,7 @@ func decodeStruct(ty *sppb.StructType, pb *proto3.ListValue, ptr interface{}) er
if ty == nil {
return errNilSpannerStructType()
}
// t holds the structual information of ptr.
// t holds the structural information of ptr.
t := reflect.TypeOf(ptr).Elem()
// v is the actual value that ptr points to.
v := reflect.ValueOf(ptr).Elem()
@ -1155,7 +1193,7 @@ func isPtrStructPtrSlice(t reflect.Type) bool {
}
// decodeStructArray decodes proto3.ListValue pb into struct slice referenced by pointer ptr, according to the
// structual information given in a sppb.StructType.
// structural information given in a sppb.StructType.
func decodeStructArray(ty *sppb.StructType, pb *proto3.ListValue, ptr interface{}) error {
if pb == nil {
return errNilListValue("STRUCT")
@ -1327,7 +1365,11 @@ func encodeValue(v interface{}) (*proto3.Value, *sppb.Type, error) {
}
pt = listType(floatType())
case time.Time:
pb.Kind = stringKind(v.UTC().Format(time.RFC3339Nano))
if v == commitTimestamp {
pb.Kind = stringKind(commitTimestampPlaceholderString)
} else {
pb.Kind = stringKind(v.UTC().Format(time.RFC3339Nano))
}
pt = timeType()
case []time.Time:
if v != nil {
@ -1379,17 +1421,158 @@ func encodeValue(v interface{}) (*proto3.Value, *sppb.Type, error) {
// transmission don't affect our encoded value.
pb = proto.Clone(v.Value).(*proto3.Value)
pt = proto.Clone(v.Type).(*sppb.Type)
default:
case []GenericColumnValue:
return nil, nil, errEncoderUnsupportedType(v)
default:
if !isStructOrArrayOfStructValue(v) {
return nil, nil, errEncoderUnsupportedType(v)
}
typ := reflect.TypeOf(v)
// Value is a Go struct value/ptr.
if (typ.Kind() == reflect.Struct) ||
(typ.Kind() == reflect.Ptr && typ.Elem().Kind() == reflect.Struct) {
return encodeStruct(v)
}
// Value is a slice of Go struct values/ptrs.
if typ.Kind() == reflect.Slice {
return encodeStructArray(v)
}
}
return pb, pt, nil
}
// Encodes a Go struct value/ptr in v to the spanner Value and Type protos. v itself must
// be non-nil.
func encodeStruct(v interface{}) (*proto3.Value, *sppb.Type, error) {
typ := reflect.TypeOf(v)
val := reflect.ValueOf(v)
// Pointer to struct.
if typ.Kind() == reflect.Ptr && typ.Elem().Kind() == reflect.Struct {
typ = typ.Elem()
if val.IsNil() {
// nil pointer to struct, representing a NULL STRUCT value. Use a dummy value to
// get the type.
_, st, err := encodeStruct(reflect.Zero(typ).Interface())
if err != nil {
return nil, nil, err
}
return nullProto(), st, nil
}
val = val.Elem()
}
if typ.Kind() != reflect.Struct {
return nil, nil, errEncoderUnsupportedType(v)
}
stf := make([]*sppb.StructType_Field, 0, typ.NumField())
stv := make([]*proto3.Value, 0, typ.NumField())
for i := 0; i < typ.NumField(); i++ {
// If the field has a 'spanner' tag, use the value of that tag as the field name.
// This is used to build STRUCT types with unnamed/duplicate fields.
sf := typ.Field(i)
fval := val.Field(i)
// Embedded fields are not allowed.
if sf.Anonymous {
return nil, nil, errUnsupportedEmbeddedStructFields(sf.Name)
}
// Unexported fields are ignored.
if !fval.CanInterface() {
continue
}
fname, ok := structTagLookup(sf.Tag, "spanner")
if !ok {
fname = sf.Name
}
eval, etype, err := encodeValue(fval.Interface())
if err != nil {
return nil, nil, err
}
stf = append(stf, mkField(fname, etype))
stv = append(stv, eval)
}
return listProto(stv...), structType(stf...), nil
}
// Encodes a slice of Go struct values/ptrs in v to the spanner Value and Type protos. v itself
// must be non-nil.
func encodeStructArray(v interface{}) (*proto3.Value, *sppb.Type, error) {
etyp := reflect.TypeOf(v).Elem()
sliceval := reflect.ValueOf(v)
// Slice of pointers to structs.
if etyp.Kind() == reflect.Ptr {
etyp = etyp.Elem()
}
// Use a dummy struct value to get the element type
_, elemTyp, err := encodeStruct(reflect.Zero(etyp).Interface())
if err != nil {
return nil, nil, err
}
// nil slice represents a NULL array-of-struct.
if sliceval.IsNil() {
return nullProto(), listType(elemTyp), nil
}
values := make([]*proto3.Value, 0, sliceval.Len())
for i := 0; i < sliceval.Len(); i++ {
ev, _, err := encodeStruct(sliceval.Index(i).Interface())
if err != nil {
return nil, nil, err
}
values = append(values, ev)
}
return listProto(values...), listType(elemTyp), nil
}
func isStructOrArrayOfStructValue(v interface{}) bool {
typ := reflect.TypeOf(v)
if typ.Kind() == reflect.Slice {
typ = typ.Elem()
}
if typ.Kind() == reflect.Ptr {
typ = typ.Elem()
}
return typ.Kind() == reflect.Struct
}
func isSupportedMutationType(v interface{}) bool {
switch v.(type) {
case string, NullString, []string, []NullString,
[]byte, [][]byte,
int, []int, int64, []int64, NullInt64, []NullInt64,
bool, []bool, NullBool, []NullBool,
float64, []float64, NullFloat64, []NullFloat64,
time.Time, []time.Time, NullTime, []NullTime,
civil.Date, []civil.Date, NullDate, []NullDate,
GenericColumnValue:
return true
default:
return false
}
}
// encodeValueArray encodes a Value array into a proto3.ListValue.
func encodeValueArray(vs []interface{}) (*proto3.ListValue, error) {
lv := &proto3.ListValue{}
lv.Values = make([]*proto3.Value, 0, len(vs))
for _, v := range vs {
if !isSupportedMutationType(v) {
return nil, errEncoderUnsupportedType(v)
}
pb, _, err := encodeValue(v)
if err != nil {
return nil, err

View File

@ -1,4 +1,4 @@
// Copyright 2014 Google Inc. All Rights Reserved.
// Copyright 2014 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -18,6 +18,7 @@ import (
"net/http"
"reflect"
"cloud.google.com/go/internal/trace"
"golang.org/x/net/context"
"google.golang.org/api/googleapi"
raw "google.golang.org/api/storage/v1"
@ -63,7 +64,10 @@ type ACLHandle struct {
}
// Delete permanently deletes the ACL entry for the given entity.
func (a *ACLHandle) Delete(ctx context.Context, entity ACLEntity) error {
func (a *ACLHandle) Delete(ctx context.Context, entity ACLEntity) (err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.ACL.Delete")
defer func() { trace.EndSpan(ctx, err) }()
if a.object != "" {
return a.objectDelete(ctx, entity)
}
@ -74,7 +78,10 @@ func (a *ACLHandle) Delete(ctx context.Context, entity ACLEntity) error {
}
// Set sets the permission level for the given entity.
func (a *ACLHandle) Set(ctx context.Context, entity ACLEntity, role ACLRole) error {
func (a *ACLHandle) Set(ctx context.Context, entity ACLEntity, role ACLRole) (err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.ACL.Set")
defer func() { trace.EndSpan(ctx, err) }()
if a.object != "" {
return a.objectSet(ctx, entity, role, false)
}
@ -85,7 +92,10 @@ func (a *ACLHandle) Set(ctx context.Context, entity ACLEntity, role ACLRole) err
}
// List retrieves ACL entries.
func (a *ACLHandle) List(ctx context.Context) ([]ACLRule, error) {
func (a *ACLHandle) List(ctx context.Context) (rules []ACLRule, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.ACL.List")
defer func() { trace.EndSpan(ctx, err) }()
if a.object != "" {
return a.objectList(ctx)
}

View File

@ -1,4 +1,4 @@
// Copyright 2014 Google Inc. LiveAndArchived Rights Reserved.
// Copyright 2014 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -21,6 +21,7 @@ import (
"time"
"cloud.google.com/go/internal/optional"
"cloud.google.com/go/internal/trace"
"golang.org/x/net/context"
"google.golang.org/api/googleapi"
"google.golang.org/api/iterator"
@ -63,7 +64,10 @@ func (c *Client) Bucket(name string) *BucketHandle {
// Create creates the Bucket in the project.
// If attrs is nil the API defaults will be used.
func (b *BucketHandle) Create(ctx context.Context, projectID string, attrs *BucketAttrs) error {
func (b *BucketHandle) Create(ctx context.Context, projectID string, attrs *BucketAttrs) (err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Create")
defer func() { trace.EndSpan(ctx, err) }()
var bkt *raw.Bucket
if attrs != nil {
bkt = attrs.toRawBucket()
@ -82,7 +86,10 @@ func (b *BucketHandle) Create(ctx context.Context, projectID string, attrs *Buck
}
// Delete deletes the Bucket.
func (b *BucketHandle) Delete(ctx context.Context) error {
func (b *BucketHandle) Delete(ctx context.Context) (err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Delete")
defer func() { trace.EndSpan(ctx, err) }()
req, err := b.newDeleteCall()
if err != nil {
return err
@ -139,7 +146,10 @@ func (b *BucketHandle) Object(name string) *ObjectHandle {
}
// Attrs returns the metadata for the bucket.
func (b *BucketHandle) Attrs(ctx context.Context) (*BucketAttrs, error) {
func (b *BucketHandle) Attrs(ctx context.Context) (attrs *BucketAttrs, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Attrs")
defer func() { trace.EndSpan(ctx, err) }()
req, err := b.newGetCall()
if err != nil {
return nil, err
@ -155,7 +165,7 @@ func (b *BucketHandle) Attrs(ctx context.Context) (*BucketAttrs, error) {
if err != nil {
return nil, err
}
return newBucket(resp), nil
return newBucket(resp)
}
func (b *BucketHandle) newGetCall() (*raw.BucketsGetCall, error) {
@ -170,7 +180,10 @@ func (b *BucketHandle) newGetCall() (*raw.BucketsGetCall, error) {
return req, nil
}
func (b *BucketHandle) Update(ctx context.Context, uattrs BucketAttrsToUpdate) (*BucketAttrs, error) {
func (b *BucketHandle) Update(ctx context.Context, uattrs BucketAttrsToUpdate) (attrs *BucketAttrs, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Create")
defer func() { trace.EndSpan(ctx, err) }()
req, err := b.newPatchCall(&uattrs)
if err != nil {
return nil, err
@ -180,7 +193,7 @@ func (b *BucketHandle) Update(ctx context.Context, uattrs BucketAttrsToUpdate) (
if err != nil {
return nil, err
}
return newBucket(rb), nil
return newBucket(rb)
}
func (b *BucketHandle) newPatchCall(uattrs *BucketAttrsToUpdate) (*raw.BucketsPatchCall, error) {
@ -241,8 +254,24 @@ type BucketAttrs struct {
// a user project (see BucketHandle.UserProject), which will be billed
// for the operations.
RequesterPays bool
// Lifecycle is the lifecycle configuration for objects in the bucket.
Lifecycle Lifecycle
// Retention policy enforces a minimum retention time for all objects
// contained in the bucket. A RetentionPolicy of nil implies the bucket
// has no minimum data retention.
//
// This feature is in private alpha release. It is not currently available to
// most customers. It might be changed in backwards-incompatible ways and is not
// subject to any SLA or deprecation policy.
RetentionPolicy *RetentionPolicy
// The bucket's Cross-Origin Resource Sharing (CORS) configuration.
CORS []CORS
// The encryption configuration used by default for newly inserted objects.
Encryption *BucketEncryption
}
// Lifecycle is the lifecycle configuration for objects in the bucket.
@ -250,12 +279,37 @@ type Lifecycle struct {
Rules []LifecycleRule
}
// Retention policy enforces a minimum retention time for all objects
// contained in the bucket.
//
// Any attempt to overwrite or delete objects younger than the retention
// period will result in an error. An unlocked retention policy can be
// modified or removed from the bucket via the Update method. A
// locked retention policy cannot be removed or shortened in duration
// for the lifetime of the bucket.
//
// This feature is in private alpha release. It is not currently available to
// most customers. It might be changed in backwards-incompatible ways and is not
// subject to any SLA or deprecation policy.
type RetentionPolicy struct {
// RetentionPeriod specifies the duration that objects need to be
// retained. Retention duration must be greater than zero and less than
// 100 years. Note that enforcement of retention periods less than a day
// is not guaranteed. Such periods should only be used for testing
// purposes.
RetentionPeriod time.Duration
// EffectiveTime is the time from which the policy was enforced and
// effective. This field is read-only.
EffectiveTime time.Time
}
const (
// RFC3339 date with only the date segment, used for CreatedBefore in LifecycleRule.
rfc3339Date = "2006-01-02"
// DeleteAction is a lifecycle action that deletes a live and/or archived
// objects. Takes precendence over SetStorageClass actions.
// objects. Takes precedence over SetStorageClass actions.
DeleteAction = "Delete"
// SetStorageClassAction changes the storage class of live and/or archived
@ -335,9 +389,13 @@ type LifecycleCondition struct {
NumNewerVersions int64
}
func newBucket(b *raw.Bucket) *BucketAttrs {
func newBucket(b *raw.Bucket) (*BucketAttrs, error) {
if b == nil {
return nil
return nil, nil
}
rp, err := toRetentionPolicy(b.RetentionPolicy)
if err != nil {
return nil, err
}
bucket := &BucketAttrs{
Name: b.Name,
@ -349,6 +407,9 @@ func newBucket(b *raw.Bucket) *BucketAttrs {
Labels: b.Labels,
RequesterPays: b.Billing != nil && b.Billing.RequesterPays,
Lifecycle: toLifecycle(b.Lifecycle),
RetentionPolicy: rp,
CORS: toCORS(b.Cors),
Encryption: toBucketEncryption(b.Encryption),
}
acl := make([]ACLRule, len(b.Acl))
for i, rule := range b.Acl {
@ -366,7 +427,7 @@ func newBucket(b *raw.Bucket) *BucketAttrs {
}
}
bucket.DefaultObjectACL = objACL
return bucket
return bucket, nil
}
// toRawBucket copies the editable attribute from b to the raw library's Bucket type.
@ -411,16 +472,70 @@ func (b *BucketAttrs) toRawBucket() *raw.Bucket {
Labels: labels,
Billing: bb,
Lifecycle: toRawLifecycle(b.Lifecycle),
RetentionPolicy: b.RetentionPolicy.toRawRetentionPolicy(),
Cors: toRawCORS(b.CORS),
Encryption: b.Encryption.toRawBucketEncryption(),
}
}
// CORS is the bucket's Cross-Origin Resource Sharing (CORS) configuration.
type CORS struct {
// MaxAge is the value to return in the Access-Control-Max-Age
// header used in preflight responses.
MaxAge time.Duration
// Methods is the list of HTTP methods on which to include CORS response
// headers, (GET, OPTIONS, POST, etc) Note: "*" is permitted in the list
// of methods, and means "any method".
Methods []string
// Origins is the list of Origins eligible to receive CORS response
// headers. Note: "*" is permitted in the list of origins, and means
// "any Origin".
Origins []string
// ResponseHeaders is the list of HTTP headers other than the simple
// response headers to give permission for the user-agent to share
// across domains.
ResponseHeaders []string
}
// BucketEncryption is a bucket's encryption configuration.
type BucketEncryption struct {
// A Cloud KMS key name, in the form
// projects/P/locations/L/keyRings/R/cryptoKeys/K, that will be used to encrypt
// objects inserted into this bucket, if no encryption method is specified.
// The key's location must be the same as the bucket's.
DefaultKMSKeyName string
}
type BucketAttrsToUpdate struct {
// VersioningEnabled, if set, updates whether the bucket uses versioning.
// If set, updates whether the bucket uses versioning.
VersioningEnabled optional.Bool
// RequesterPays, if set, updates whether the bucket is a Requester Pays bucket.
// If set, updates whether the bucket is a Requester Pays bucket.
RequesterPays optional.Bool
// If set, updates the retention policy of the bucket. Using
// RetentionPolicy.RetentionPeriod = 0 will delete the existing policy.
//
// This feature is in private alpha release. It is not currently available to
// most customers. It might be changed in backwards-incompatible ways and is not
// subject to any SLA or deprecation policy.
RetentionPolicy *RetentionPolicy
// If set, replaces the CORS configuration with a new configuration.
// An empty (rather than nil) slice causes all CORS policies to be removed.
CORS []CORS
// If set, replaces the encryption configuration of the bucket. Using
// BucketEncryption.DefaultKMSKeyName = "" will delete the existing
// configuration.
Encryption *BucketEncryption
// If set, replaces the lifecycle configuration of the bucket.
Lifecycle *Lifecycle
setLabels map[string]string
deleteLabels map[string]bool
}
@ -445,6 +560,18 @@ func (ua *BucketAttrsToUpdate) DeleteLabel(name string) {
func (ua *BucketAttrsToUpdate) toRawBucket() *raw.Bucket {
rb := &raw.Bucket{}
if ua.CORS != nil {
rb.Cors = toRawCORS(ua.CORS)
rb.ForceSendFields = append(rb.ForceSendFields, "Cors")
}
if ua.RetentionPolicy != nil {
if ua.RetentionPolicy.RetentionPeriod == 0 {
rb.NullFields = append(rb.NullFields, "RetentionPolicy")
rb.RetentionPolicy = nil
} else {
rb.RetentionPolicy = ua.RetentionPolicy.toRawRetentionPolicy()
}
}
if ua.VersioningEnabled != nil {
rb.Versioning = &raw.BucketVersioning{
Enabled: optional.ToBool(ua.VersioningEnabled),
@ -457,6 +584,17 @@ func (ua *BucketAttrsToUpdate) toRawBucket() *raw.Bucket {
ForceSendFields: []string{"RequesterPays"},
}
}
if ua.Encryption != nil {
if ua.Encryption.DefaultKMSKeyName == "" {
rb.NullFields = append(rb.NullFields, "Encryption")
rb.Encryption = nil
} else {
rb.Encryption = ua.Encryption.toRawBucketEncryption()
}
}
if ua.Lifecycle != nil {
rb.Lifecycle = toRawLifecycle(*ua.Lifecycle)
}
if ua.setLabels != nil || ua.deleteLabels != nil {
rb.Labels = map[string]string{}
for k, v := range ua.setLabels {
@ -521,6 +659,25 @@ func (b *BucketHandle) UserProject(projectID string) *BucketHandle {
return &b2
}
// LockRetentionPolicy locks a bucket's retention policy until a previously-configured
// RetentionPeriod past the EffectiveTime. Note that if RetentionPeriod is set to less
// than a day, the retention policy is treated as a development configuration and locking
// will have no effect. The BucketHandle must have a metageneration condition that
// matches the bucket's metageneration. See BucketHandle.If.
//
// This feature is in private alpha release. It is not currently available to
// most customers. It might be changed in backwards-incompatible ways and is not
// subject to any SLA or deprecation policy.
func (b *BucketHandle) LockRetentionPolicy(ctx context.Context) error {
var metageneration int64
if b.conds != nil {
metageneration = b.conds.MetagenerationMatch
}
req := b.c.raw.Buckets.LockRetentionPolicy(b.name, metageneration)
_, err := req.Context(ctx).Do()
return err
}
// applyBucketConds modifies the provided call using the conditions in conds.
// call is something that quacks like a *raw.WhateverCall.
func applyBucketConds(method string, conds *BucketConditions, call interface{}) error {
@ -544,6 +701,55 @@ func applyBucketConds(method string, conds *BucketConditions, call interface{})
return nil
}
func (rp *RetentionPolicy) toRawRetentionPolicy() *raw.BucketRetentionPolicy {
if rp == nil {
return nil
}
return &raw.BucketRetentionPolicy{
RetentionPeriod: int64(rp.RetentionPeriod / time.Second),
}
}
func toRetentionPolicy(rp *raw.BucketRetentionPolicy) (*RetentionPolicy, error) {
if rp == nil {
return nil, nil
}
t, err := time.Parse(time.RFC3339, rp.EffectiveTime)
if err != nil {
return nil, err
}
return &RetentionPolicy{
RetentionPeriod: time.Duration(rp.RetentionPeriod) * time.Second,
EffectiveTime: t,
}, nil
}
func toRawCORS(c []CORS) []*raw.BucketCors {
var out []*raw.BucketCors
for _, v := range c {
out = append(out, &raw.BucketCors{
MaxAgeSeconds: int64(v.MaxAge / time.Second),
Method: v.Methods,
Origin: v.Origins,
ResponseHeader: v.ResponseHeaders,
})
}
return out
}
func toCORS(rc []*raw.BucketCors) []CORS {
var out []CORS
for _, v := range rc {
out = append(out, CORS{
MaxAge: time.Duration(v.MaxAgeSeconds) * time.Second,
Methods: v.Method,
Origins: v.Origin,
ResponseHeaders: v.ResponseHeader,
})
}
return out
}
func toRawLifecycle(l Lifecycle) *raw.BucketLifecycle {
var rl raw.BucketLifecycle
if len(l.Rules) == 0 {
@ -614,6 +820,22 @@ func toLifecycle(rl *raw.BucketLifecycle) Lifecycle {
return l
}
func (e *BucketEncryption) toRawBucketEncryption() *raw.BucketEncryption {
if e == nil {
return nil
}
return &raw.BucketEncryption{
DefaultKmsKeyName: e.DefaultKMSKeyName,
}
}
func toBucketEncryption(e *raw.BucketEncryption) *BucketEncryption {
if e == nil {
return nil
}
return &BucketEncryption{DefaultKMSKeyName: e.DefaultKmsKeyName}
}
// Objects returns an iterator over the objects in the bucket that match the Query q.
// If q is nil, no filtering is done.
func (b *BucketHandle) Objects(ctx context.Context, q *Query) *ObjectIterator {
@ -695,8 +917,6 @@ func (it *ObjectIterator) fetch(pageSize int, pageToken string) (string, error)
return resp.NextPageToken, nil
}
// TODO(jbd): Add storage.buckets.update.
// Buckets returns an iterator over the buckets in the project. You may
// optionally set the iterator's Prefix field to restrict the list to buckets
// whose names begin with the prefix. By default, all buckets in the project
@ -742,7 +962,7 @@ func (it *BucketIterator) Next() (*BucketAttrs, error) {
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
func (it *BucketIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
func (it *BucketIterator) fetch(pageSize int, pageToken string) (string, error) {
func (it *BucketIterator) fetch(pageSize int, pageToken string) (token string, err error) {
req := it.client.raw.Buckets.List(it.projectID)
setClientHeader(req.Header())
req.Projection("full")
@ -752,7 +972,6 @@ func (it *BucketIterator) fetch(pageSize int, pageToken string) (string, error)
req.MaxResults(int64(pageSize))
}
var resp *raw.Buckets
var err error
err = runWithRetry(it.ctx, func() error {
resp, err = req.Context(it.ctx).Do()
return err
@ -761,7 +980,11 @@ func (it *BucketIterator) fetch(pageSize int, pageToken string) (string, error)
return "", err
}
for _, item := range resp.Items {
it.buckets = append(it.buckets, newBucket(item))
b, err := newBucket(item)
if err != nil {
return "", err
}
it.buckets = append(it.buckets, b)
}
return resp.NextPageToken, nil
}

View File

@ -1,4 +1,4 @@
// Copyright 2016 Google Inc. All Rights Reserved.
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -18,6 +18,7 @@ import (
"errors"
"fmt"
"cloud.google.com/go/internal/trace"
"golang.org/x/net/context"
raw "google.golang.org/api/storage/v1"
)
@ -59,17 +60,32 @@ type Copier struct {
// ProgressFunc should return quickly without blocking.
ProgressFunc func(copiedBytes, totalBytes uint64)
// The Cloud KMS key, in the form projects/P/locations/L/keyRings/R/cryptoKeys/K,
// that will be used to encrypt the object. Overrides the object's KMSKeyName, if
// any.
//
// Providing both a DestinationKMSKeyName and a customer-supplied encryption key
// (via ObjectHandle.Key) on the destination object will result in an error when
// Run is called.
DestinationKMSKeyName string
dst, src *ObjectHandle
}
// Run performs the copy.
func (c *Copier) Run(ctx context.Context) (*ObjectAttrs, error) {
func (c *Copier) Run(ctx context.Context) (attrs *ObjectAttrs, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Copier.Run")
defer func() { trace.EndSpan(ctx, err) }()
if err := c.src.validate(); err != nil {
return nil, err
}
if err := c.dst.validate(); err != nil {
return nil, err
}
if c.DestinationKMSKeyName != "" && c.dst.encryptionKey != nil {
return nil, errors.New("storage: cannot use DestinationKMSKeyName with a customer-supplied encryption key")
}
// Convert destination attributes to raw form, omitting the bucket.
// If the bucket is included but name or content-type aren't, the service
// returns a 400 with "Required" as the only message. Omitting the bucket
@ -96,6 +112,9 @@ func (c *Copier) callRewrite(ctx context.Context, rawObj *raw.Object) (*raw.Rewr
if c.RewriteToken != "" {
call.RewriteToken(c.RewriteToken)
}
if c.DestinationKMSKeyName != "" {
call.DestinationKmsKeyName(c.DestinationKMSKeyName)
}
if err := applyConds("Copy destination", c.dst.gen, c.dst.conds, call); err != nil {
return nil, err
}
@ -149,7 +168,10 @@ type Composer struct {
}
// Run performs the compose operation.
func (c *Composer) Run(ctx context.Context) (*ObjectAttrs, error) {
func (c *Composer) Run(ctx context.Context) (attrs *ObjectAttrs, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Composer.Run")
defer func() { trace.EndSpan(ctx, err) }()
if err := c.dst.validate(); err != nil {
return nil, err
}
@ -191,7 +213,6 @@ func (c *Composer) Run(ctx context.Context) (*ObjectAttrs, error) {
return nil, err
}
var obj *raw.Object
var err error
setClientHeader(call.Header())
err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err })
if err != nil {

View File

@ -1,4 +1,4 @@
// Copyright 2016 Google Inc. All Rights Reserved.
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -19,6 +19,9 @@ Google Cloud Storage stores data in named objects, which are grouped into bucket
More information about Google Cloud Storage is available at
https://cloud.google.com/storage/docs.
See https://godoc.org/cloud.google.com/go for authentication, timeouts,
connection pooling and similar aspects of this package.
All of the methods of this package use exponential backoff to retry calls
that fail with certain errors, as described in
https://cloud.google.com/storage/docs/exponential-backoff.
@ -61,7 +64,7 @@ global across all projects.
Each bucket has associated metadata, represented in this package by
BucketAttrs. The third argument to BucketHandle.Create allows you to set
the intial BucketAttrs of a bucket. To retrieve a bucket's attributes, use
the initial BucketAttrs of a bucket. To retrieve a bucket's attributes, use
Attrs:
attrs, err := bkt.Attrs(ctx)
@ -74,15 +77,16 @@ Attrs:
Objects
An object holds arbitrary data as a sequence of bytes, like a file. You
refer to objects using a handle, just as with buckets. You can use the
standard Go io.Reader and io.Writer interfaces to read and write
object data:
refer to objects using a handle, just as with buckets, but unlike buckets
you don't explicitly create an object. Instead, the first time you write
to an object it will be created. You can use the standard Go io.Reader
and io.Writer interfaces to read and write object data:
obj := bkt.Object("data")
// Write something to obj.
// w implements io.Writer.
w := obj.NewWriter(ctx)
// Write some text to obj. This will overwrite whatever is there.
// Write some text to obj. This will either create the object or overwrite whatever is there already.
if _, err := fmt.Fprintf(w, "This object contains text.\n"); err != nil {
// TODO: Handle error.
}
@ -157,10 +161,5 @@ SignedURL for details.
// TODO: Handle error.
}
fmt.Println(url)
Authentication
See examples of authorization and authentication at
https://godoc.org/cloud.google.com/go#pkg-examples.
*/
package storage // import "cloud.google.com/go/storage"

View File

@ -1,4 +1,4 @@
// Copyright 2017 Google Inc. All Rights Reserved.
// Copyright 2017 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.

View File

@ -1,4 +1,4 @@
// Copyright 2017 Google Inc. All Rights Reserved.
// Copyright 2017 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -24,3 +24,7 @@ import (
func withContext(r *http.Request, ctx context.Context) *http.Request {
return r.WithContext(ctx)
}
func goHTTPUncompressed(res *http.Response) bool {
return res.Uncompressed
}

View File

@ -1,4 +1,4 @@
// Copyright 2017 Google Inc. All Rights Reserved.
// Copyright 2017 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -16,6 +16,7 @@ package storage
import (
"cloud.google.com/go/iam"
"cloud.google.com/go/internal/trace"
"golang.org/x/net/context"
raw "google.golang.org/api/storage/v1"
iampb "google.golang.org/genproto/googleapis/iam/v1"
@ -35,14 +36,16 @@ type iamClient struct {
userProject string
}
func (c *iamClient) Get(ctx context.Context, resource string) (*iampb.Policy, error) {
func (c *iamClient) Get(ctx context.Context, resource string) (p *iampb.Policy, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Get")
defer func() { trace.EndSpan(ctx, err) }()
call := c.raw.Buckets.GetIamPolicy(resource)
setClientHeader(call.Header())
if c.userProject != "" {
call.UserProject(c.userProject)
}
var rp *raw.Policy
var err error
err = runWithRetry(ctx, func() error {
rp, err = call.Context(ctx).Do()
return err
@ -53,7 +56,10 @@ func (c *iamClient) Get(ctx context.Context, resource string) (*iampb.Policy, er
return iamFromStoragePolicy(rp), nil
}
func (c *iamClient) Set(ctx context.Context, resource string, p *iampb.Policy) error {
func (c *iamClient) Set(ctx context.Context, resource string, p *iampb.Policy) (err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Set")
defer func() { trace.EndSpan(ctx, err) }()
rp := iamToStoragePolicy(p)
call := c.raw.Buckets.SetIamPolicy(resource, rp)
setClientHeader(call.Header())
@ -66,14 +72,16 @@ func (c *iamClient) Set(ctx context.Context, resource string, p *iampb.Policy) e
})
}
func (c *iamClient) Test(ctx context.Context, resource string, perms []string) ([]string, error) {
func (c *iamClient) Test(ctx context.Context, resource string, perms []string) (permissions []string, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Test")
defer func() { trace.EndSpan(ctx, err) }()
call := c.raw.Buckets.TestIamPermissions(resource, perms)
setClientHeader(call.Header())
if c.userProject != "" {
call.UserProject(c.userProject)
}
var res *raw.TestIamPermissionsResponse
var err error
err = runWithRetry(ctx, func() error {
res, err = call.Context(ctx).Do()
return err

View File

@ -1,4 +1,4 @@
// Copyright 2014 Google Inc. All Rights Reserved.
// Copyright 2014 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.

View File

@ -1,4 +1,4 @@
// Copyright 2017 Google Inc. All Rights Reserved.
// Copyright 2017 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.

View File

@ -1,4 +1,4 @@
// Copyright 2017 Google Inc. All Rights Reserved.
// Copyright 2017 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -24,3 +24,10 @@ func withContext(r *http.Request, _ interface{}) *http.Request {
// In Go 1.6 and below, ignore the context.
return r
}
// Go 1.6 doesn't have http.Response.Uncompressed, so we can't know whether the Go
// HTTP stack uncompressed a gzip file. As a good approximation, assume that
// the lack of a Content-Length header means that it did uncompress.
func goHTTPUncompressed(res *http.Response) bool {
return res.Header.Get("Content-Length") == ""
}

View File

@ -1,4 +1,4 @@
// Copyright 2017 Google Inc. All Rights Reserved.
// Copyright 2017 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -19,6 +19,7 @@ import (
"fmt"
"regexp"
"cloud.google.com/go/internal/trace"
"golang.org/x/net/context"
raw "google.golang.org/api/storage/v1"
)
@ -118,7 +119,10 @@ func toRawNotification(n *Notification) *raw.Notification {
// AddNotification adds a notification to b. You must set n's TopicProjectID, TopicID
// and PayloadFormat, and must not set its ID. The other fields are all optional. The
// returned Notification's ID can be used to refer to it.
func (b *BucketHandle) AddNotification(ctx context.Context, n *Notification) (*Notification, error) {
func (b *BucketHandle) AddNotification(ctx context.Context, n *Notification) (ret *Notification, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.AddNotification")
defer func() { trace.EndSpan(ctx, err) }()
if n.ID != "" {
return nil, errors.New("storage: AddNotification: ID must not be set")
}
@ -142,14 +146,16 @@ func (b *BucketHandle) AddNotification(ctx context.Context, n *Notification) (*N
// Notifications returns all the Notifications configured for this bucket, as a map
// indexed by notification ID.
func (b *BucketHandle) Notifications(ctx context.Context) (map[string]*Notification, error) {
func (b *BucketHandle) Notifications(ctx context.Context) (n map[string]*Notification, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Notifications")
defer func() { trace.EndSpan(ctx, err) }()
call := b.c.raw.Notifications.List(b.name)
setClientHeader(call.Header())
if b.userProject != "" {
call.UserProject(b.userProject)
}
var res *raw.Notifications
var err error
err = runWithRetry(ctx, func() error {
res, err = call.Context(ctx).Do()
return err
@ -169,7 +175,10 @@ func notificationsToMap(rns []*raw.Notification) map[string]*Notification {
}
// DeleteNotification deletes the notification with the given ID.
func (b *BucketHandle) DeleteNotification(ctx context.Context, id string) error {
func (b *BucketHandle) DeleteNotification(ctx context.Context, id string) (err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.DeleteNotification")
defer func() { trace.EndSpan(ctx, err) }()
call := b.c.raw.Notifications.Delete(b.name, id)
setClientHeader(call.Header())
if b.userProject != "" {

View File

@ -1,4 +1,4 @@
// Copyright 2016 Google Inc. All Rights Reserved.
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -15,13 +15,194 @@
package storage
import (
"errors"
"fmt"
"hash/crc32"
"io"
"io/ioutil"
"net/http"
"net/url"
"reflect"
"strconv"
"strings"
"cloud.google.com/go/internal/trace"
"golang.org/x/net/context"
"google.golang.org/api/googleapi"
)
var crc32cTable = crc32.MakeTable(crc32.Castagnoli)
// NewReader creates a new Reader to read the contents of the
// object.
// ErrObjectNotExist will be returned if the object is not found.
//
// The caller must call Close on the returned Reader when done reading.
func (o *ObjectHandle) NewReader(ctx context.Context) (*Reader, error) {
return o.NewRangeReader(ctx, 0, -1)
}
// NewRangeReader reads part of an object, reading at most length bytes
// starting at the given offset. If length is negative, the object is read
// until the end.
func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64) (r *Reader, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.NewRangeReader")
defer func() { trace.EndSpan(ctx, err) }()
if err := o.validate(); err != nil {
return nil, err
}
if offset < 0 {
return nil, fmt.Errorf("storage: invalid offset %d < 0", offset)
}
if o.conds != nil {
if err := o.conds.validate("NewRangeReader"); err != nil {
return nil, err
}
}
u := &url.URL{
Scheme: "https",
Host: "storage.googleapis.com",
Path: fmt.Sprintf("/%s/%s", o.bucket, o.object),
RawQuery: conditionsQuery(o.gen, o.conds),
}
verb := "GET"
if length == 0 {
verb = "HEAD"
}
req, err := http.NewRequest(verb, u.String(), nil)
if err != nil {
return nil, err
}
req = withContext(req, ctx)
if o.userProject != "" {
req.Header.Set("X-Goog-User-Project", o.userProject)
}
if o.readCompressed {
req.Header.Set("Accept-Encoding", "gzip")
}
if err := setEncryptionHeaders(req.Header, o.encryptionKey, false); err != nil {
return nil, err
}
// Define a function that initiates a Read with offset and length, assuming we
// have already read seen bytes.
reopen := func(seen int64) (*http.Response, error) {
start := offset + seen
if length < 0 && start > 0 {
req.Header.Set("Range", fmt.Sprintf("bytes=%d-", start))
} else if length > 0 {
// The end character isn't affected by how many bytes we've seen.
req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", start, offset+length-1))
}
var res *http.Response
err = runWithRetry(ctx, func() error {
res, err = o.c.hc.Do(req)
if err != nil {
return err
}
if res.StatusCode == http.StatusNotFound {
res.Body.Close()
return ErrObjectNotExist
}
if res.StatusCode < 200 || res.StatusCode > 299 {
body, _ := ioutil.ReadAll(res.Body)
res.Body.Close()
return &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
Body: string(body),
}
}
if start > 0 && length != 0 && res.StatusCode != http.StatusPartialContent {
res.Body.Close()
return errors.New("storage: partial request not satisfied")
}
return nil
})
if err != nil {
return nil, err
}
return res, nil
}
res, err := reopen(0)
if err != nil {
return nil, err
}
var (
size int64 // total size of object, even if a range was requested.
checkCRC bool
crc uint32
)
if res.StatusCode == http.StatusPartialContent {
cr := strings.TrimSpace(res.Header.Get("Content-Range"))
if !strings.HasPrefix(cr, "bytes ") || !strings.Contains(cr, "/") {
return nil, fmt.Errorf("storage: invalid Content-Range %q", cr)
}
size, err = strconv.ParseInt(cr[strings.LastIndex(cr, "/")+1:], 10, 64)
if err != nil {
return nil, fmt.Errorf("storage: invalid Content-Range %q", cr)
}
} else {
size = res.ContentLength
// Check the CRC iff all of the following hold:
// - We asked for content (length != 0).
// - We got all the content (status != PartialContent).
// - The server sent a CRC header.
// - The Go http stack did not uncompress the file.
// - We were not served compressed data that was uncompressed on download.
// The problem with the last two cases is that the CRC will not match -- GCS
// computes it on the compressed contents, but we compute it on the
// uncompressed contents.
if length != 0 && !goHTTPUncompressed(res) && !uncompressedByServer(res) {
crc, checkCRC = parseCRC32c(res)
}
}
remain := res.ContentLength
body := res.Body
if length == 0 {
remain = 0
body.Close()
body = emptyBody
}
return &Reader{
body: body,
size: size,
remain: remain,
contentType: res.Header.Get("Content-Type"),
contentEncoding: res.Header.Get("Content-Encoding"),
cacheControl: res.Header.Get("Cache-Control"),
wantCRC: crc,
checkCRC: checkCRC,
reopen: reopen,
}, nil
}
func uncompressedByServer(res *http.Response) bool {
// If the data is stored as gzip but is not encoded as gzip, then it
// was uncompressed by the server.
return res.Header.Get("X-Goog-Stored-Content-Encoding") == "gzip" &&
res.Header.Get("Content-Encoding") != "gzip"
}
func parseCRC32c(res *http.Response) (uint32, bool) {
const prefix = "crc32c="
for _, spec := range res.Header["X-Goog-Hash"] {
if strings.HasPrefix(spec, prefix) {
c, err := decodeUint32(spec[len(prefix):])
if err == nil {
return c, true
}
}
}
return 0, false
}
var emptyBody = ioutil.NopCloser(strings.NewReader(""))
// Reader reads a Cloud Storage object.
// It implements io.Reader.
//
@ -29,15 +210,15 @@ var crc32cTable = crc32.MakeTable(crc32.Castagnoli)
// the stored CRC, returning an error from Read if there is a mismatch. This integrity check
// is skipped if transcoding occurs. See https://cloud.google.com/storage/docs/transcoding.
type Reader struct {
body io.ReadCloser
remain, size int64
contentType string
contentEncoding string
cacheControl string
checkCRC bool // should we check the CRC?
wantCRC uint32 // the CRC32c value the server sent in the header
gotCRC uint32 // running crc
checkedCRC bool // did we check the CRC? (For tests.)
body io.ReadCloser
seen, remain, size int64
contentType string
contentEncoding string
cacheControl string
checkCRC bool // should we check the CRC?
wantCRC uint32 // the CRC32c value the server sent in the header
gotCRC uint32 // running crc
reopen func(seen int64) (*http.Response, error)
}
// Close closes the Reader. It must be called when done reading.
@ -46,7 +227,7 @@ func (r *Reader) Close() error {
}
func (r *Reader) Read(p []byte) (int, error) {
n, err := r.body.Read(p)
n, err := r.readWithRetry(p)
if r.remain != -1 {
r.remain -= int64(n)
}
@ -55,8 +236,7 @@ func (r *Reader) Read(p []byte) (int, error) {
// Check CRC here. It would be natural to check it in Close, but
// everybody defers Close on the assumption that it doesn't return
// anything worth looking at.
if r.remain == 0 { // Only check if we have Content-Length.
r.checkedCRC = true
if err == io.EOF {
if r.gotCRC != r.wantCRC {
return n, fmt.Errorf("storage: bad CRC on read: got %d, want %d",
r.gotCRC, r.wantCRC)
@ -66,6 +246,35 @@ func (r *Reader) Read(p []byte) (int, error) {
return n, err
}
func (r *Reader) readWithRetry(p []byte) (int, error) {
n := 0
for len(p[n:]) > 0 {
m, err := r.body.Read(p[n:])
n += m
r.seen += int64(m)
if !shouldRetryRead(err) {
return n, err
}
// Read failed, but we will try again. Send a ranged read request that takes
// into account the number of bytes we've already seen.
res, err := r.reopen(r.seen)
if err != nil {
// reopen already retries
return n, err
}
r.body.Close()
r.body = res.Body
}
return n, nil
}
func shouldRetryRead(err error) bool {
if err == nil {
return false
}
return strings.HasSuffix(err.Error(), "INTERNAL_ERROR") && strings.Contains(reflect.TypeOf(err).String(), "http2")
}
// Size returns the size of the object in bytes.
// The returned value is always the same and is not affected by
// calls to Read or Close.

View File

@ -1,4 +1,4 @@
// Copyright 2014 Google Inc. All Rights Reserved.
// Copyright 2014 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -26,7 +26,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"reflect"
@ -37,6 +36,7 @@ import (
"time"
"unicode/utf8"
"cloud.google.com/go/internal/trace"
"google.golang.org/api/option"
htransport "google.golang.org/api/transport/http"
@ -368,7 +368,10 @@ func (o *ObjectHandle) Key(encryptionKey []byte) *ObjectHandle {
// Attrs returns meta information about the object.
// ErrObjectNotExist will be returned if the object is not found.
func (o *ObjectHandle) Attrs(ctx context.Context) (*ObjectAttrs, error) {
func (o *ObjectHandle) Attrs(ctx context.Context) (attrs *ObjectAttrs, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.Attrs")
defer func() { trace.EndSpan(ctx, err) }()
if err := o.validate(); err != nil {
return nil, err
}
@ -383,7 +386,6 @@ func (o *ObjectHandle) Attrs(ctx context.Context) (*ObjectAttrs, error) {
return nil, err
}
var obj *raw.Object
var err error
setClientHeader(call.Header())
err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err })
if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound {
@ -398,7 +400,10 @@ func (o *ObjectHandle) Attrs(ctx context.Context) (*ObjectAttrs, error) {
// Update updates an object with the provided attributes.
// All zero-value attributes are ignored.
// ErrObjectNotExist will be returned if the object is not found.
func (o *ObjectHandle) Update(ctx context.Context, uattrs ObjectAttrsToUpdate) (*ObjectAttrs, error) {
func (o *ObjectHandle) Update(ctx context.Context, uattrs ObjectAttrsToUpdate) (oa *ObjectAttrs, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.Update")
defer func() { trace.EndSpan(ctx, err) }()
if err := o.validate(); err != nil {
return nil, err
}
@ -466,7 +471,6 @@ func (o *ObjectHandle) Update(ctx context.Context, uattrs ObjectAttrsToUpdate) (
return nil, err
}
var obj *raw.Object
var err error
setClientHeader(call.Header())
err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err })
if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound {
@ -532,144 +536,6 @@ func (o *ObjectHandle) ReadCompressed(compressed bool) *ObjectHandle {
return &o2
}
// NewReader creates a new Reader to read the contents of the
// object.
// ErrObjectNotExist will be returned if the object is not found.
//
// The caller must call Close on the returned Reader when done reading.
func (o *ObjectHandle) NewReader(ctx context.Context) (*Reader, error) {
return o.NewRangeReader(ctx, 0, -1)
}
// NewRangeReader reads part of an object, reading at most length bytes
// starting at the given offset. If length is negative, the object is read
// until the end.
func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64) (*Reader, error) {
if err := o.validate(); err != nil {
return nil, err
}
if offset < 0 {
return nil, fmt.Errorf("storage: invalid offset %d < 0", offset)
}
if o.conds != nil {
if err := o.conds.validate("NewRangeReader"); err != nil {
return nil, err
}
}
u := &url.URL{
Scheme: "https",
Host: "storage.googleapis.com",
Path: fmt.Sprintf("/%s/%s", o.bucket, o.object),
RawQuery: conditionsQuery(o.gen, o.conds),
}
verb := "GET"
if length == 0 {
verb = "HEAD"
}
req, err := http.NewRequest(verb, u.String(), nil)
if err != nil {
return nil, err
}
req = withContext(req, ctx)
if length < 0 && offset > 0 {
req.Header.Set("Range", fmt.Sprintf("bytes=%d-", offset))
} else if length > 0 {
req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+length-1))
}
if o.userProject != "" {
req.Header.Set("X-Goog-User-Project", o.userProject)
}
if o.readCompressed {
req.Header.Set("Accept-Encoding", "gzip")
}
if err := setEncryptionHeaders(req.Header, o.encryptionKey, false); err != nil {
return nil, err
}
var res *http.Response
err = runWithRetry(ctx, func() error {
res, err = o.c.hc.Do(req)
if err != nil {
return err
}
if res.StatusCode == http.StatusNotFound {
res.Body.Close()
return ErrObjectNotExist
}
if res.StatusCode < 200 || res.StatusCode > 299 {
body, _ := ioutil.ReadAll(res.Body)
res.Body.Close()
return &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
Body: string(body),
}
}
if offset > 0 && length != 0 && res.StatusCode != http.StatusPartialContent {
res.Body.Close()
return errors.New("storage: partial request not satisfied")
}
return nil
})
if err != nil {
return nil, err
}
var size int64 // total size of object, even if a range was requested.
if res.StatusCode == http.StatusPartialContent {
cr := strings.TrimSpace(res.Header.Get("Content-Range"))
if !strings.HasPrefix(cr, "bytes ") || !strings.Contains(cr, "/") {
return nil, fmt.Errorf("storage: invalid Content-Range %q", cr)
}
size, err = strconv.ParseInt(cr[strings.LastIndex(cr, "/")+1:], 10, 64)
if err != nil {
return nil, fmt.Errorf("storage: invalid Content-Range %q", cr)
}
} else {
size = res.ContentLength
}
remain := res.ContentLength
body := res.Body
if length == 0 {
remain = 0
body.Close()
body = emptyBody
}
var (
checkCRC bool
crc uint32
)
// Even if there is a CRC header, we can't compute the hash on partial data.
if remain == size {
crc, checkCRC = parseCRC32c(res)
}
return &Reader{
body: body,
size: size,
remain: remain,
contentType: res.Header.Get("Content-Type"),
contentEncoding: res.Header.Get("Content-Encoding"),
cacheControl: res.Header.Get("Cache-Control"),
wantCRC: crc,
checkCRC: checkCRC,
}, nil
}
func parseCRC32c(res *http.Response) (uint32, bool) {
const prefix = "crc32c="
for _, spec := range res.Header["X-Goog-Hash"] {
if strings.HasPrefix(spec, prefix) {
c, err := decodeUint32(spec[len(prefix):])
if err == nil {
return c, true
}
}
}
return 0, false
}
var emptyBody = ioutil.NopCloser(strings.NewReader(""))
// NewWriter returns a storage Writer that writes to the GCS object
// associated with this ObjectHandle.
//
@ -856,6 +722,14 @@ type ObjectAttrs struct {
// encryption in Google Cloud Storage.
CustomerKeySHA256 string
// Cloud KMS key name, in the form
// projects/P/locations/L/keyRings/R/cryptoKeys/K, used to encrypt this object,
// if the object is encrypted by such a key.
//
// Providing both a KMSKeyName and a customer-supplied encryption key (via
// ObjectHandle.Key) will result in an error when writing an object.
KMSKeyName string
// Prefix is set only for ObjectAttrs which represent synthetic "directory
// entries" when iterating over buckets using Query.Delimiter. See
// ObjectIterator.Next. When set, no other fields in ObjectAttrs will be
@ -913,6 +787,7 @@ func newObject(o *raw.Object) *ObjectAttrs {
Metageneration: o.Metageneration,
StorageClass: o.StorageClass,
CustomerKeySHA256: sha256,
KMSKeyName: o.KmsKeyName,
Created: convertTime(o.TimeCreated),
Deleted: convertTime(o.TimeDeleted),
Updated: convertTime(o.Updated),

46908
vendor/cloud.google.com/go/storage/storage.replay generated vendored Normal file

File diff suppressed because one or more lines are too long

View File

@ -1,4 +1,4 @@
// Copyright 2014 Google Inc. All Rights Reserved.
// Copyright 2014 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -48,8 +48,11 @@ type Writer struct {
// to the nearest multiple of 256K. If zero, chunking will be disabled and
// the object will be uploaded in a single request.
//
// ChunkSize will default to a reasonable value. Any custom configuration
// must be done before the first Write call.
// ChunkSize will default to a reasonable value. If you perform many concurrent
// writes of small objects, you may wish set ChunkSize to a value that matches
// your objects' sizes to avoid consuming large amounts of memory.
//
// ChunkSize must be set before the first Write call.
ChunkSize int
// ProgressFunc can be used to monitor the progress of a large write.
@ -85,6 +88,9 @@ func (w *Writer) open() error {
if !utf8.ValidString(attrs.Name) {
return fmt.Errorf("storage: object name %q is not valid UTF-8", attrs.Name)
}
if attrs.KMSKeyName != "" && w.o.encryptionKey != nil {
return errors.New("storage: cannot use KMSKeyName with a customer-supplied encryption key")
}
pr, pw := io.Pipe()
w.pw = pw
w.opened = true
@ -116,6 +122,9 @@ func (w *Writer) open() error {
if w.ProgressFunc != nil {
call.ProgressUpdater(func(n, _ int64) { w.ProgressFunc(n) })
}
if attrs.KMSKeyName != "" {
call.KmsKeyName(attrs.KMSKeyName)
}
if err := setEncryptionHeaders(call.Header(), w.o.encryptionKey, false); err != nil {
w.mu.Lock()
w.err = err

View File

@ -79,7 +79,7 @@ func (client AvailabilitySetsClient) CreateOrUpdatePreparer(ctx context.Context,
}
preparer := autorest.CreatePreparer(
autorest.AsJSON(),
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPut(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}", pathParameters),
@ -371,3 +371,72 @@ func (client AvailabilitySetsClient) ListAvailableSizesResponder(resp *http.Resp
result.Response = autorest.Response{Response: resp}
return
}
// Update update an availability set.
//
// resourceGroupName is the name of the resource group. availabilitySetName is the name of the availability set.
// parameters is parameters supplied to the Update Availability Set operation.
func (client AvailabilitySetsClient) Update(ctx context.Context, resourceGroupName string, availabilitySetName string, parameters AvailabilitySetUpdate) (result AvailabilitySet, err error) {
req, err := client.UpdatePreparer(ctx, resourceGroupName, availabilitySetName, parameters)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "Update", nil, "Failure preparing request")
return
}
resp, err := client.UpdateSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "Update", resp, "Failure sending request")
return
}
result, err = client.UpdateResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "Update", resp, "Failure responding to request")
}
return
}
// UpdatePreparer prepares the Update request.
func (client AvailabilitySetsClient) UpdatePreparer(ctx context.Context, resourceGroupName string, availabilitySetName string, parameters AvailabilitySetUpdate) (*http.Request, error) {
pathParameters := map[string]interface{}{
"availabilitySetName": autorest.Encode("path", availabilitySetName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2017-12-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPatch(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}", pathParameters),
autorest.WithJSON(parameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// UpdateSender sends the Update request. The method will close the
// http.Response Body if it receives an error.
func (client AvailabilitySetsClient) UpdateSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
}
// UpdateResponder handles the response to the Update request. The method always
// closes the http.Response Body.
func (client AvailabilitySetsClient) UpdateResponder(resp *http.Response) (result AvailabilitySet, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}

View File

@ -107,7 +107,7 @@ func (client ContainerServicesClient) CreateOrUpdatePreparer(ctx context.Context
}
preparer := autorest.CreatePreparer(
autorest.AsJSON(),
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPut(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/containerServices/{containerServiceName}", pathParameters),

View File

@ -97,7 +97,7 @@ func (client DisksClient) CreateOrUpdatePreparer(ctx context.Context, resourceGr
}
preparer := autorest.CreatePreparer(
autorest.AsJSON(),
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPut(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}", pathParameters),
@ -314,7 +314,7 @@ func (client DisksClient) GrantAccessPreparer(ctx context.Context, resourceGroup
}
preparer := autorest.CreatePreparer(
autorest.AsJSON(),
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPost(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}/beginGetAccess", pathParameters),
@ -640,7 +640,7 @@ func (client DisksClient) UpdatePreparer(ctx context.Context, resourceGroupName
}
preparer := autorest.CreatePreparer(
autorest.AsJSON(),
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPatch(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}", pathParameters),

View File

@ -83,7 +83,7 @@ func (client ImagesClient) CreateOrUpdatePreparer(ctx context.Context, resourceG
}
preparer := autorest.CreatePreparer(
autorest.AsJSON(),
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPut(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}", pathParameters),
@ -441,3 +441,74 @@ func (client ImagesClient) ListByResourceGroupComplete(ctx context.Context, reso
result.page, err = client.ListByResourceGroup(ctx, resourceGroupName)
return
}
// Update update an image.
//
// resourceGroupName is the name of the resource group. imageName is the name of the image. parameters is
// parameters supplied to the Update Image operation.
func (client ImagesClient) Update(ctx context.Context, resourceGroupName string, imageName string, parameters ImageUpdate) (result ImagesUpdateFuture, err error) {
req, err := client.UpdatePreparer(ctx, resourceGroupName, imageName, parameters)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.ImagesClient", "Update", nil, "Failure preparing request")
return
}
result, err = client.UpdateSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.ImagesClient", "Update", result.Response(), "Failure sending request")
return
}
return
}
// UpdatePreparer prepares the Update request.
func (client ImagesClient) UpdatePreparer(ctx context.Context, resourceGroupName string, imageName string, parameters ImageUpdate) (*http.Request, error) {
pathParameters := map[string]interface{}{
"imageName": autorest.Encode("path", imageName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2017-12-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPatch(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}", pathParameters),
autorest.WithJSON(parameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// UpdateSender sends the Update request. The method will close the
// http.Response Body if it receives an error.
func (client ImagesClient) UpdateSender(req *http.Request) (future ImagesUpdateFuture, err error) {
sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client))
future.Future = azure.NewFuture(req)
future.req = req
_, err = future.Done(sender)
if err != nil {
return
}
err = autorest.Respond(future.Response(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated))
return
}
// UpdateResponder handles the response to the Update request. The method always
// closes the http.Response Body.
func (client ImagesClient) UpdateResponder(resp *http.Response) (result Image, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}

View File

@ -80,7 +80,7 @@ func (client LogAnalyticsClient) ExportRequestRateByIntervalPreparer(ctx context
}
preparer := autorest.CreatePreparer(
autorest.AsJSON(),
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPost(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/logAnalytics/apiAccess/getRequestRateByInterval", pathParameters),
@ -157,7 +157,7 @@ func (client LogAnalyticsClient) ExportThrottledRequestsPreparer(ctx context.Con
}
preparer := autorest.CreatePreparer(
autorest.AsJSON(),
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPost(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/logAnalytics/apiAccess/getThrottledRequests", pathParameters),

Some files were not shown because too many files have changed in this diff Show More