parent
3ec1e79b6a
commit
4e7ce6f42b
|
@ -20,6 +20,7 @@
|
||||||
package metadata // import "cloud.google.com/go/compute/metadata"
|
package metadata // import "cloud.google.com/go/compute/metadata"
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
@ -31,9 +32,6 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
"golang.org/x/net/context/ctxhttp"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -139,11 +137,11 @@ func testOnGCE() bool {
|
||||||
resc := make(chan bool, 2)
|
resc := make(chan bool, 2)
|
||||||
|
|
||||||
// Try two strategies in parallel.
|
// Try two strategies in parallel.
|
||||||
// See https://github.com/GoogleCloudPlatform/google-cloud-go/issues/194
|
// See https://github.com/googleapis/google-cloud-go/issues/194
|
||||||
go func() {
|
go func() {
|
||||||
req, _ := http.NewRequest("GET", "http://"+metadataIP, nil)
|
req, _ := http.NewRequest("GET", "http://"+metadataIP, nil)
|
||||||
req.Header.Set("User-Agent", userAgent)
|
req.Header.Set("User-Agent", userAgent)
|
||||||
res, err := ctxhttp.Do(ctx, defaultClient.hc, req)
|
res, err := defaultClient.hc.Do(req.WithContext(ctx))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
resc <- false
|
resc <- false
|
||||||
return
|
return
|
||||||
|
@ -302,8 +300,8 @@ func (c *Client) getETag(suffix string) (value, etag string, err error) {
|
||||||
// being stable anyway.
|
// being stable anyway.
|
||||||
host = metadataIP
|
host = metadataIP
|
||||||
}
|
}
|
||||||
url := "http://" + host + "/computeMetadata/v1/" + suffix
|
u := "http://" + host + "/computeMetadata/v1/" + suffix
|
||||||
req, _ := http.NewRequest("GET", url, nil)
|
req, _ := http.NewRequest("GET", u, nil)
|
||||||
req.Header.Set("Metadata-Flavor", "Google")
|
req.Header.Set("Metadata-Flavor", "Google")
|
||||||
req.Header.Set("User-Agent", userAgent)
|
req.Header.Set("User-Agent", userAgent)
|
||||||
res, err := c.hc.Do(req)
|
res, err := c.hc.Do(req)
|
||||||
|
@ -314,13 +312,13 @@ func (c *Client) getETag(suffix string) (value, etag string, err error) {
|
||||||
if res.StatusCode == http.StatusNotFound {
|
if res.StatusCode == http.StatusNotFound {
|
||||||
return "", "", NotDefinedError(suffix)
|
return "", "", NotDefinedError(suffix)
|
||||||
}
|
}
|
||||||
if res.StatusCode != 200 {
|
|
||||||
return "", "", fmt.Errorf("status code %d trying to fetch %s", res.StatusCode, url)
|
|
||||||
}
|
|
||||||
all, err := ioutil.ReadAll(res.Body)
|
all, err := ioutil.ReadAll(res.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", "", err
|
return "", "", err
|
||||||
}
|
}
|
||||||
|
if res.StatusCode != 200 {
|
||||||
|
return "", "", &Error{Code: res.StatusCode, Message: string(all)}
|
||||||
|
}
|
||||||
return string(all), res.Header.Get("Etag"), nil
|
return string(all), res.Header.Get("Etag"), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -501,3 +499,15 @@ func (c *Client) Subscribe(suffix string, fn func(v string, ok bool) error) erro
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Error contains an error response from the server.
|
||||||
|
type Error struct {
|
||||||
|
// Code is the HTTP response status code.
|
||||||
|
Code int
|
||||||
|
// Message is the server response message.
|
||||||
|
Message string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *Error) Error() string {
|
||||||
|
return fmt.Sprintf("compute: Received %d `%s`", e.Code, e.Message)
|
||||||
|
}
|
||||||
|
|
|
@ -22,13 +22,15 @@
|
||||||
package iam
|
package iam
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
gax "github.com/googleapis/gax-go"
|
gax "github.com/googleapis/gax-go/v2"
|
||||||
"golang.org/x/net/context"
|
|
||||||
pb "google.golang.org/genproto/googleapis/iam/v1"
|
pb "google.golang.org/genproto/googleapis/iam/v1"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
|
"google.golang.org/grpc/metadata"
|
||||||
)
|
)
|
||||||
|
|
||||||
// client abstracts the IAMPolicy API to allow multiple implementations.
|
// client abstracts the IAMPolicy API to allow multiple implementations.
|
||||||
|
@ -56,6 +58,9 @@ var withRetry = gax.WithRetry(func() gax.Retryer {
|
||||||
|
|
||||||
func (g *grpcClient) Get(ctx context.Context, resource string) (*pb.Policy, error) {
|
func (g *grpcClient) Get(ctx context.Context, resource string) (*pb.Policy, error) {
|
||||||
var proto *pb.Policy
|
var proto *pb.Policy
|
||||||
|
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", resource))
|
||||||
|
ctx = insertMetadata(ctx, md)
|
||||||
|
|
||||||
err := gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error {
|
err := gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error {
|
||||||
var err error
|
var err error
|
||||||
proto, err = g.c.GetIamPolicy(ctx, &pb.GetIamPolicyRequest{Resource: resource})
|
proto, err = g.c.GetIamPolicy(ctx, &pb.GetIamPolicyRequest{Resource: resource})
|
||||||
|
@ -68,6 +73,9 @@ func (g *grpcClient) Get(ctx context.Context, resource string) (*pb.Policy, erro
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g *grpcClient) Set(ctx context.Context, resource string, p *pb.Policy) error {
|
func (g *grpcClient) Set(ctx context.Context, resource string, p *pb.Policy) error {
|
||||||
|
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", resource))
|
||||||
|
ctx = insertMetadata(ctx, md)
|
||||||
|
|
||||||
return gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error {
|
return gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error {
|
||||||
_, err := g.c.SetIamPolicy(ctx, &pb.SetIamPolicyRequest{
|
_, err := g.c.SetIamPolicy(ctx, &pb.SetIamPolicyRequest{
|
||||||
Resource: resource,
|
Resource: resource,
|
||||||
|
@ -79,6 +87,9 @@ func (g *grpcClient) Set(ctx context.Context, resource string, p *pb.Policy) err
|
||||||
|
|
||||||
func (g *grpcClient) Test(ctx context.Context, resource string, perms []string) ([]string, error) {
|
func (g *grpcClient) Test(ctx context.Context, resource string, perms []string) ([]string, error) {
|
||||||
var res *pb.TestIamPermissionsResponse
|
var res *pb.TestIamPermissionsResponse
|
||||||
|
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", resource))
|
||||||
|
ctx = insertMetadata(ctx, md)
|
||||||
|
|
||||||
err := gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error {
|
err := gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error {
|
||||||
var err error
|
var err error
|
||||||
res, err = g.c.TestIamPermissions(ctx, &pb.TestIamPermissionsRequest{
|
res, err = g.c.TestIamPermissions(ctx, &pb.TestIamPermissionsRequest{
|
||||||
|
@ -290,3 +301,15 @@ func memberIndex(m string, b *pb.Binding) int {
|
||||||
}
|
}
|
||||||
return -1
|
return -1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// insertMetadata inserts metadata into the given context
|
||||||
|
func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
|
||||||
|
out, _ := metadata.FromOutgoingContext(ctx)
|
||||||
|
out = out.Copy()
|
||||||
|
for _, md := range mds {
|
||||||
|
for k, v := range md {
|
||||||
|
out[k] = append(out[k], v...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return metadata.NewOutgoingContext(ctx, out)
|
||||||
|
}
|
||||||
|
|
|
@ -1,58 +0,0 @@
|
||||||
// Copyright 2016 Google LLC
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// Package atomiccache provides a map-based cache that supports very fast
|
|
||||||
// reads.
|
|
||||||
package atomiccache
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sync"
|
|
||||||
"sync/atomic"
|
|
||||||
)
|
|
||||||
|
|
||||||
type mapType map[interface{}]interface{}
|
|
||||||
|
|
||||||
// Cache is a map-based cache that supports fast reads via use of atomics.
|
|
||||||
// Writes are slow, requiring a copy of the entire cache.
|
|
||||||
// The zero Cache is an empty cache, ready for use.
|
|
||||||
type Cache struct {
|
|
||||||
val atomic.Value // mapType
|
|
||||||
mu sync.Mutex // used only by writers
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get returns the value of the cache at key. If there is no value,
|
|
||||||
// getter is called to provide one, and the cache is updated.
|
|
||||||
// The getter function may be called concurrently. It should be pure,
|
|
||||||
// returning the same value for every call.
|
|
||||||
func (c *Cache) Get(key interface{}, getter func() interface{}) interface{} {
|
|
||||||
mp, _ := c.val.Load().(mapType)
|
|
||||||
if v, ok := mp[key]; ok {
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compute value without lock.
|
|
||||||
// Might duplicate effort but won't hold other computations back.
|
|
||||||
newV := getter()
|
|
||||||
|
|
||||||
c.mu.Lock()
|
|
||||||
mp, _ = c.val.Load().(mapType)
|
|
||||||
newM := make(mapType, len(mp)+1)
|
|
||||||
for k, v := range mp {
|
|
||||||
newM[k] = v
|
|
||||||
}
|
|
||||||
newM[key] = newV
|
|
||||||
c.val.Store(newM)
|
|
||||||
c.mu.Unlock()
|
|
||||||
return newV
|
|
||||||
}
|
|
|
@ -69,8 +69,7 @@ import (
|
||||||
"reflect"
|
"reflect"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
"cloud.google.com/go/internal/atomiccache"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// A Field records information about a struct field.
|
// A Field records information about a struct field.
|
||||||
|
@ -85,10 +84,17 @@ type Field struct {
|
||||||
equalFold func(s, t []byte) bool
|
equalFold func(s, t []byte) bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ParseTagFunc is a function that accepts a struct tag and returns four values: an alternative name for the field
|
||||||
|
// extracted from the tag, a boolean saying whether to keep the field or ignore it, additional data that is stored
|
||||||
|
// with the field information to avoid having to parse the tag again, and an error.
|
||||||
type ParseTagFunc func(reflect.StructTag) (name string, keep bool, other interface{}, err error)
|
type ParseTagFunc func(reflect.StructTag) (name string, keep bool, other interface{}, err error)
|
||||||
|
|
||||||
|
// ValidateFunc is a function that accepts a reflect.Type and returns an error if the struct type is invalid in any
|
||||||
|
// way.
|
||||||
type ValidateFunc func(reflect.Type) error
|
type ValidateFunc func(reflect.Type) error
|
||||||
|
|
||||||
|
// LeafTypesFunc is a function that accepts a reflect.Type and returns true if the struct type a leaf, or false if not.
|
||||||
|
// TODO(deklerk) is this description accurate?
|
||||||
type LeafTypesFunc func(reflect.Type) bool
|
type LeafTypesFunc func(reflect.Type) bool
|
||||||
|
|
||||||
// A Cache records information about the fields of struct types.
|
// A Cache records information about the fields of struct types.
|
||||||
|
@ -98,7 +104,7 @@ type Cache struct {
|
||||||
parseTag ParseTagFunc
|
parseTag ParseTagFunc
|
||||||
validate ValidateFunc
|
validate ValidateFunc
|
||||||
leafTypes LeafTypesFunc
|
leafTypes LeafTypesFunc
|
||||||
cache atomiccache.Cache // from reflect.Type to cacheValue
|
cache sync.Map // from reflect.Type to cacheValue
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewCache constructs a Cache.
|
// NewCache constructs a Cache.
|
||||||
|
@ -205,13 +211,19 @@ type cacheValue struct {
|
||||||
// This code has been copied and modified from
|
// This code has been copied and modified from
|
||||||
// https://go.googlesource.com/go/+/go1.7.3/src/encoding/json/encode.go.
|
// https://go.googlesource.com/go/+/go1.7.3/src/encoding/json/encode.go.
|
||||||
func (c *Cache) cachedTypeFields(t reflect.Type) (List, error) {
|
func (c *Cache) cachedTypeFields(t reflect.Type) (List, error) {
|
||||||
cv := c.cache.Get(t, func() interface{} {
|
var cv cacheValue
|
||||||
|
x, ok := c.cache.Load(t)
|
||||||
|
if ok {
|
||||||
|
cv = x.(cacheValue)
|
||||||
|
} else {
|
||||||
if err := c.validate(t); err != nil {
|
if err := c.validate(t); err != nil {
|
||||||
return cacheValue{nil, err}
|
cv = cacheValue{nil, err}
|
||||||
}
|
} else {
|
||||||
f, err := c.typeFields(t)
|
f, err := c.typeFields(t)
|
||||||
return cacheValue{List(f), err}
|
cv = cacheValue{List(f), err}
|
||||||
}).(cacheValue)
|
}
|
||||||
|
c.cache.Store(t, cv)
|
||||||
|
}
|
||||||
return cv.fields, cv.err
|
return cv.fields, cv.err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -15,11 +15,10 @@
|
||||||
package internal
|
package internal
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
gax "github.com/googleapis/gax-go"
|
gax "github.com/googleapis/gax-go/v2"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Retry calls the supplied function f repeatedly according to the provided
|
// Retry calls the supplied function f repeatedly according to the provided
|
||||||
|
|
|
@ -0,0 +1,61 @@
|
||||||
|
// Copyright 2017 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package testutil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math"
|
||||||
|
"math/big"
|
||||||
|
|
||||||
|
"github.com/golang/protobuf/proto"
|
||||||
|
"github.com/google/go-cmp/cmp"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
alwaysEqual = cmp.Comparer(func(_, _ interface{}) bool { return true })
|
||||||
|
|
||||||
|
defaultCmpOptions = []cmp.Option{
|
||||||
|
// Use proto.Equal for protobufs
|
||||||
|
cmp.Comparer(proto.Equal),
|
||||||
|
// Use big.Rat.Cmp for big.Rats
|
||||||
|
cmp.Comparer(func(x, y *big.Rat) bool {
|
||||||
|
if x == nil || y == nil {
|
||||||
|
return x == y
|
||||||
|
}
|
||||||
|
return x.Cmp(y) == 0
|
||||||
|
}),
|
||||||
|
// NaNs compare equal
|
||||||
|
cmp.FilterValues(func(x, y float64) bool {
|
||||||
|
return math.IsNaN(x) && math.IsNaN(y)
|
||||||
|
}, alwaysEqual),
|
||||||
|
cmp.FilterValues(func(x, y float32) bool {
|
||||||
|
return math.IsNaN(float64(x)) && math.IsNaN(float64(y))
|
||||||
|
}, alwaysEqual),
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// Equal tests two values for equality.
|
||||||
|
func Equal(x, y interface{}, opts ...cmp.Option) bool {
|
||||||
|
// Put default options at the end. Order doesn't matter.
|
||||||
|
opts = append(opts[:len(opts):len(opts)], defaultCmpOptions...)
|
||||||
|
return cmp.Equal(x, y, opts...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Diff reports the differences between two values.
|
||||||
|
// Diff(x, y) == "" iff Equal(x, y).
|
||||||
|
func Diff(x, y interface{}, opts ...cmp.Option) string {
|
||||||
|
// Put default options at the end. Order doesn't matter.
|
||||||
|
opts = append(opts[:len(opts):len(opts)], defaultCmpOptions...)
|
||||||
|
return cmp.Diff(x, y, opts...)
|
||||||
|
}
|
|
@ -0,0 +1,152 @@
|
||||||
|
// Copyright 2014 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Package testutil contains helper functions for writing tests.
|
||||||
|
package testutil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"golang.org/x/oauth2"
|
||||||
|
"golang.org/x/oauth2/google"
|
||||||
|
"golang.org/x/oauth2/jwt"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
envProjID = "GCLOUD_TESTS_GOLANG_PROJECT_ID"
|
||||||
|
envPrivateKey = "GCLOUD_TESTS_GOLANG_KEY"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ProjID returns the project ID to use in integration tests, or the empty
|
||||||
|
// string if none is configured.
|
||||||
|
func ProjID() string {
|
||||||
|
return os.Getenv(envProjID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Credentials returns the credentials to use in integration tests, or nil if
|
||||||
|
// none is configured. It uses the standard environment variable for tests in
|
||||||
|
// this repo.
|
||||||
|
func Credentials(ctx context.Context, scopes ...string) *google.Credentials {
|
||||||
|
return CredentialsEnv(ctx, envPrivateKey, scopes...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CredentialsEnv returns the credentials to use in integration tests, or nil
|
||||||
|
// if none is configured. If the environment variable is unset, CredentialsEnv
|
||||||
|
// will try to find 'Application Default Credentials'. Else, CredentialsEnv
|
||||||
|
// will return nil. CredentialsEnv will log.Fatal if the token source is
|
||||||
|
// specified but missing or invalid.
|
||||||
|
func CredentialsEnv(ctx context.Context, envVar string, scopes ...string) *google.Credentials {
|
||||||
|
key := os.Getenv(envVar)
|
||||||
|
if key == "" { // Try for application default credentials.
|
||||||
|
creds, err := google.FindDefaultCredentials(ctx, scopes...)
|
||||||
|
if err != nil {
|
||||||
|
log.Println("No 'Application Default Credentials' found.")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return creds
|
||||||
|
}
|
||||||
|
|
||||||
|
data, err := ioutil.ReadFile(key)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
creds, err := google.CredentialsFromJSON(ctx, data, scopes...)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
return creds
|
||||||
|
}
|
||||||
|
|
||||||
|
// TokenSource returns the OAuth2 token source to use in integration tests,
|
||||||
|
// or nil if none is configured. It uses the standard environment variable
|
||||||
|
// for tests in this repo.
|
||||||
|
func TokenSource(ctx context.Context, scopes ...string) oauth2.TokenSource {
|
||||||
|
return TokenSourceEnv(ctx, envPrivateKey, scopes...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TokenSourceEnv returns the OAuth2 token source to use in integration tests. or nil
|
||||||
|
// if none is configured. It tries to get credentials from the filename in the
|
||||||
|
// environment variable envVar. If the environment variable is unset, TokenSourceEnv
|
||||||
|
// will try to find 'Application Default Credentials'. Else, TokenSourceEnv will
|
||||||
|
// return nil. TokenSourceEnv will log.Fatal if the token source is specified but
|
||||||
|
// missing or invalid.
|
||||||
|
func TokenSourceEnv(ctx context.Context, envVar string, scopes ...string) oauth2.TokenSource {
|
||||||
|
key := os.Getenv(envVar)
|
||||||
|
if key == "" { // Try for application default credentials.
|
||||||
|
ts, err := google.DefaultTokenSource(ctx, scopes...)
|
||||||
|
if err != nil {
|
||||||
|
log.Println("No 'Application Default Credentials' found.")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return ts
|
||||||
|
}
|
||||||
|
conf, err := jwtConfigFromFile(key, scopes)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
return conf.TokenSource(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// JWTConfig reads the JSON private key file whose name is in the default
|
||||||
|
// environment variable, and returns the jwt.Config it contains. It ignores
|
||||||
|
// scopes.
|
||||||
|
// If the environment variable is empty, it returns (nil, nil).
|
||||||
|
func JWTConfig() (*jwt.Config, error) {
|
||||||
|
return jwtConfigFromFile(os.Getenv(envPrivateKey), nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// jwtConfigFromFile reads the given JSON private key file, and returns the
|
||||||
|
// jwt.Config it contains.
|
||||||
|
// If the filename is empty, it returns (nil, nil).
|
||||||
|
func jwtConfigFromFile(filename string, scopes []string) (*jwt.Config, error) {
|
||||||
|
if filename == "" {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
jsonKey, err := ioutil.ReadFile(filename)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("cannot read the JSON key file, err: %v", err)
|
||||||
|
}
|
||||||
|
conf, err := google.JWTConfigFromJSON(jsonKey, scopes...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("google.JWTConfigFromJSON: %v", err)
|
||||||
|
}
|
||||||
|
return conf, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CanReplay reports whether an integration test can be run in replay mode.
|
||||||
|
// The replay file must exist, and the GCLOUD_TESTS_GOLANG_ENABLE_REPLAY
|
||||||
|
// environment variable must be non-empty.
|
||||||
|
func CanReplay(replayFilename string) bool {
|
||||||
|
if os.Getenv("GCLOUD_TESTS_GOLANG_ENABLE_REPLAY") == "" {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
_, err := os.Stat(replayFilename)
|
||||||
|
return err == nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErroringTokenSource is a token source for testing purposes,
|
||||||
|
// to always return a non-nil error to its caller. It is useful
|
||||||
|
// when testing error responses with bad oauth2 credentials.
|
||||||
|
type ErroringTokenSource struct{}
|
||||||
|
|
||||||
|
// Token implements oauth2.TokenSource, returning a nil oauth2.Token and a non-nil error.
|
||||||
|
func (fts ErroringTokenSource) Token() (*oauth2.Token, error) {
|
||||||
|
return nil, errors.New("intentional error")
|
||||||
|
}
|
|
@ -12,20 +12,33 @@
|
||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
// +build go1.8
|
package testutil
|
||||||
|
|
||||||
package http
|
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"net/http"
|
"math/rand"
|
||||||
|
"sync"
|
||||||
"go.opencensus.io/plugin/ochttp"
|
"time"
|
||||||
"google.golang.org/api/transport/http/internal/propagation"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func addOCTransport(trans http.RoundTripper) http.RoundTripper {
|
// NewRand creates a new *rand.Rand seeded with t. The return value is safe for use
|
||||||
return &ochttp.Transport{
|
// with multiple goroutines.
|
||||||
Base: trans,
|
func NewRand(t time.Time) *rand.Rand {
|
||||||
Propagation: &propagation.HTTPFormat{},
|
s := &lockedSource{src: rand.NewSource(t.UnixNano())}
|
||||||
|
return rand.New(s)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// lockedSource makes a rand.Source safe for use by multiple goroutines.
|
||||||
|
type lockedSource struct {
|
||||||
|
mu sync.Mutex
|
||||||
|
src rand.Source
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ls *lockedSource) Int63() int64 {
|
||||||
|
ls.mu.Lock()
|
||||||
|
defer ls.mu.Unlock()
|
||||||
|
return ls.src.Int63()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ls *lockedSource) Seed(int64) {
|
||||||
|
panic("shouldn't be calling Seed")
|
||||||
}
|
}
|
|
@ -0,0 +1,135 @@
|
||||||
|
/*
|
||||||
|
Copyright 2016 Google LLC
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package testutil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"net"
|
||||||
|
"regexp"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
|
"google.golang.org/grpc/status"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A Server is an in-process gRPC server, listening on a system-chosen port on
|
||||||
|
// the local loopback interface. Servers are for testing only and are not
|
||||||
|
// intended to be used in production code.
|
||||||
|
//
|
||||||
|
// To create a server, make a new Server, register your handlers, then call
|
||||||
|
// Start:
|
||||||
|
//
|
||||||
|
// srv, err := NewServer()
|
||||||
|
// ...
|
||||||
|
// mypb.RegisterMyServiceServer(srv.Gsrv, &myHandler)
|
||||||
|
// ....
|
||||||
|
// srv.Start()
|
||||||
|
//
|
||||||
|
// Clients should connect to the server with no security:
|
||||||
|
//
|
||||||
|
// conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure())
|
||||||
|
// ...
|
||||||
|
type Server struct {
|
||||||
|
Addr string
|
||||||
|
Port int
|
||||||
|
l net.Listener
|
||||||
|
Gsrv *grpc.Server
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewServer creates a new Server. The Server will be listening for gRPC connections
|
||||||
|
// at the address named by the Addr field, without TLS.
|
||||||
|
func NewServer(opts ...grpc.ServerOption) (*Server, error) {
|
||||||
|
return NewServerWithPort(0, opts...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewServerWithPort creates a new Server at a specific port. The Server will be listening
|
||||||
|
// for gRPC connections at the address named by the Addr field, without TLS.
|
||||||
|
func NewServerWithPort(port int, opts ...grpc.ServerOption) (*Server, error) {
|
||||||
|
l, err := net.Listen("tcp", fmt.Sprintf("127.0.0.1:%d", port))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
s := &Server{
|
||||||
|
Addr: l.Addr().String(),
|
||||||
|
Port: parsePort(l.Addr().String()),
|
||||||
|
l: l,
|
||||||
|
Gsrv: grpc.NewServer(opts...),
|
||||||
|
}
|
||||||
|
return s, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start causes the server to start accepting incoming connections.
|
||||||
|
// Call Start after registering handlers.
|
||||||
|
func (s *Server) Start() {
|
||||||
|
go func() {
|
||||||
|
if err := s.Gsrv.Serve(s.l); err != nil {
|
||||||
|
log.Printf("testutil.Server.Start: %v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close shuts down the server.
|
||||||
|
func (s *Server) Close() {
|
||||||
|
s.Gsrv.Stop()
|
||||||
|
s.l.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// PageBounds converts an incoming page size and token from an RPC request into
|
||||||
|
// slice bounds and the outgoing next-page token.
|
||||||
|
//
|
||||||
|
// PageBounds assumes that the complete, unpaginated list of items exists as a
|
||||||
|
// single slice. In addition to the page size and token, PageBounds needs the
|
||||||
|
// length of that slice.
|
||||||
|
//
|
||||||
|
// PageBounds's first two return values should be used to construct a sub-slice of
|
||||||
|
// the complete, unpaginated slice. E.g. if the complete slice is s, then
|
||||||
|
// s[from:to] is the desired page. Its third return value should be set as the
|
||||||
|
// NextPageToken field of the RPC response.
|
||||||
|
func PageBounds(pageSize int, pageToken string, length int) (from, to int, nextPageToken string, err error) {
|
||||||
|
from, to = 0, length
|
||||||
|
if pageToken != "" {
|
||||||
|
from, err = strconv.Atoi(pageToken)
|
||||||
|
if err != nil {
|
||||||
|
return 0, 0, "", status.Errorf(codes.InvalidArgument, "bad page token: %v", err)
|
||||||
|
}
|
||||||
|
if from >= length {
|
||||||
|
return length, length, "", nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if pageSize > 0 && from+pageSize < length {
|
||||||
|
to = from + pageSize
|
||||||
|
nextPageToken = strconv.Itoa(to)
|
||||||
|
}
|
||||||
|
return from, to, nextPageToken, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var portParser = regexp.MustCompile(`:[0-9]+`)
|
||||||
|
|
||||||
|
func parsePort(addr string) int {
|
||||||
|
res := portParser.FindAllString(addr, -1)
|
||||||
|
if len(res) == 0 {
|
||||||
|
panic(fmt.Errorf("parsePort: found no numbers in %s", addr))
|
||||||
|
}
|
||||||
|
stringPort := res[0][1:] // strip the :
|
||||||
|
p, err := strconv.ParseInt(stringPort, 10, 32)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return int(p)
|
||||||
|
}
|
|
@ -0,0 +1,68 @@
|
||||||
|
// Copyright 2018 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package testutil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"go.opencensus.io/plugin/ocgrpc"
|
||||||
|
"go.opencensus.io/stats/view"
|
||||||
|
"go.opencensus.io/trace"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestExporter is a test utility exporter. It should be created with NewtestExporter.
|
||||||
|
type TestExporter struct {
|
||||||
|
Spans []*trace.SpanData
|
||||||
|
Stats chan *view.Data
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewTestExporter creates a TestExporter and registers it with OpenCensus.
|
||||||
|
func NewTestExporter() *TestExporter {
|
||||||
|
te := &TestExporter{Stats: make(chan *view.Data)}
|
||||||
|
|
||||||
|
view.RegisterExporter(te)
|
||||||
|
view.SetReportingPeriod(time.Millisecond)
|
||||||
|
if err := view.Register(ocgrpc.DefaultClientViews...); err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
trace.RegisterExporter(te)
|
||||||
|
trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()})
|
||||||
|
|
||||||
|
return te
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExportSpan exports a span.
|
||||||
|
func (te *TestExporter) ExportSpan(s *trace.SpanData) {
|
||||||
|
te.Spans = append(te.Spans, s)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExportView exports a view.
|
||||||
|
func (te *TestExporter) ExportView(vd *view.Data) {
|
||||||
|
if len(vd.Rows) > 0 {
|
||||||
|
select {
|
||||||
|
case te.Stats <- vd:
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unregister unregisters the exporter from OpenCensus.
|
||||||
|
func (te *TestExporter) Unregister() {
|
||||||
|
view.UnregisterExporter(te)
|
||||||
|
trace.UnregisterExporter(te)
|
||||||
|
view.SetReportingPeriod(0) // reset to default value
|
||||||
|
}
|
|
@ -12,23 +12,25 @@
|
||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
// +build go1.8
|
|
||||||
|
|
||||||
package trace
|
package trace
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
"go.opencensus.io/trace"
|
"go.opencensus.io/trace"
|
||||||
"golang.org/x/net/context"
|
|
||||||
"google.golang.org/api/googleapi"
|
"google.golang.org/api/googleapi"
|
||||||
"google.golang.org/genproto/googleapis/rpc/code"
|
"google.golang.org/genproto/googleapis/rpc/code"
|
||||||
"google.golang.org/grpc/status"
|
"google.golang.org/grpc/status"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// StartSpan adds a span to the trace with the given name.
|
||||||
func StartSpan(ctx context.Context, name string) context.Context {
|
func StartSpan(ctx context.Context, name string) context.Context {
|
||||||
ctx, _ = trace.StartSpan(ctx, name)
|
ctx, _ = trace.StartSpan(ctx, name)
|
||||||
return ctx
|
return ctx
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// EndSpan ends a span with the given error.
|
||||||
func EndSpan(ctx context.Context, err error) {
|
func EndSpan(ctx context.Context, err error) {
|
||||||
span := trace.FromContext(ctx)
|
span := trace.FromContext(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -37,7 +39,7 @@ func EndSpan(ctx context.Context, err error) {
|
||||||
span.End()
|
span.End()
|
||||||
}
|
}
|
||||||
|
|
||||||
// ToStatus interrogates an error and converts it to an appropriate
|
// toStatus interrogates an error and converts it to an appropriate
|
||||||
// OpenCensus status.
|
// OpenCensus status.
|
||||||
func toStatus(err error) trace.Status {
|
func toStatus(err error) trace.Status {
|
||||||
if err2, ok := err.(*googleapi.Error); ok {
|
if err2, ok := err.(*googleapi.Error); ok {
|
||||||
|
@ -81,3 +83,27 @@ func httpStatusCodeToOCCode(httpStatusCode int) int32 {
|
||||||
return int32(code.Code_UNKNOWN)
|
return int32(code.Code_UNKNOWN)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: (odeke-em): perhaps just pass around spans due to the cost
|
||||||
|
// incurred from using trace.FromContext(ctx) yet we could avoid
|
||||||
|
// throwing away the work done by ctx, span := trace.StartSpan.
|
||||||
|
func TracePrintf(ctx context.Context, attrMap map[string]interface{}, format string, args ...interface{}) {
|
||||||
|
var attrs []trace.Attribute
|
||||||
|
for k, v := range attrMap {
|
||||||
|
var a trace.Attribute
|
||||||
|
switch v := v.(type) {
|
||||||
|
case string:
|
||||||
|
a = trace.StringAttribute(k, v)
|
||||||
|
case bool:
|
||||||
|
a = trace.BoolAttribute(k, v)
|
||||||
|
case int:
|
||||||
|
a = trace.Int64Attribute(k, int64(v))
|
||||||
|
case int64:
|
||||||
|
a = trace.Int64Attribute(k, v)
|
||||||
|
default:
|
||||||
|
a = trace.StringAttribute(k, fmt.Sprintf("%#v", v))
|
||||||
|
}
|
||||||
|
attrs = append(attrs, a)
|
||||||
|
}
|
||||||
|
trace.FromContext(ctx).Annotatef(attrs, format, args...)
|
||||||
|
}
|
|
@ -67,5 +67,5 @@ func goVer(s string) string {
|
||||||
}
|
}
|
||||||
|
|
||||||
func notSemverRune(r rune) bool {
|
func notSemverRune(r rune) bool {
|
||||||
return strings.IndexRune("0123456789.", r) < 0
|
return !strings.ContainsRune("0123456789.", r)
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
// Copyright 2018 Google LLC
|
// Copyright 2019 Google LLC
|
||||||
//
|
//
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
// you may not use this file except in compliance with the License.
|
// you may not use this file except in compliance with the License.
|
||||||
|
@ -12,7 +12,7 @@
|
||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
// AUTO-GENERATED CODE. DO NOT EDIT.
|
// Code generated by gapic-generator. DO NOT EDIT.
|
||||||
|
|
||||||
// Package kms is an auto-generated package for the
|
// Package kms is an auto-generated package for the
|
||||||
// Cloud Key Management Service (KMS) API.
|
// Cloud Key Management Service (KMS) API.
|
||||||
|
@ -20,10 +20,25 @@
|
||||||
//
|
//
|
||||||
// Manages keys and performs cryptographic operations in a central cloud
|
// Manages keys and performs cryptographic operations in a central cloud
|
||||||
// service, for direct use by other cloud resources and applications.
|
// service, for direct use by other cloud resources and applications.
|
||||||
|
//
|
||||||
|
// Use of Context
|
||||||
|
//
|
||||||
|
// The ctx passed to NewClient is used for authentication requests and
|
||||||
|
// for creating the underlying connection, but is not used for subsequent calls.
|
||||||
|
// Individual methods on the client use the ctx given to them.
|
||||||
|
//
|
||||||
|
// To close the open connection, use the Close() method.
|
||||||
|
//
|
||||||
|
// For information about setting deadlines, reusing contexts, and more
|
||||||
|
// please visit godoc.org/cloud.google.com/go.
|
||||||
package kms // import "cloud.google.com/go/kms/apiv1"
|
package kms // import "cloud.google.com/go/kms/apiv1"
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"golang.org/x/net/context"
|
"context"
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
"unicode"
|
||||||
|
|
||||||
"google.golang.org/grpc/metadata"
|
"google.golang.org/grpc/metadata"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -44,3 +59,42 @@ func DefaultAuthScopes() []string {
|
||||||
"https://www.googleapis.com/auth/cloud-platform",
|
"https://www.googleapis.com/auth/cloud-platform",
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// versionGo returns the Go runtime version. The returned string
|
||||||
|
// has no whitespace, suitable for reporting in header.
|
||||||
|
func versionGo() string {
|
||||||
|
const develPrefix = "devel +"
|
||||||
|
|
||||||
|
s := runtime.Version()
|
||||||
|
if strings.HasPrefix(s, develPrefix) {
|
||||||
|
s = s[len(develPrefix):]
|
||||||
|
if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 {
|
||||||
|
s = s[:p]
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
notSemverRune := func(r rune) bool {
|
||||||
|
return strings.IndexRune("0123456789.", r) < 0
|
||||||
|
}
|
||||||
|
|
||||||
|
if strings.HasPrefix(s, "go1") {
|
||||||
|
s = s[2:]
|
||||||
|
var prerelease string
|
||||||
|
if p := strings.IndexFunc(s, notSemverRune); p >= 0 {
|
||||||
|
s, prerelease = s[:p], s[p:]
|
||||||
|
}
|
||||||
|
if strings.HasSuffix(s, ".") {
|
||||||
|
s += "0"
|
||||||
|
} else if strings.Count(s, ".") < 2 {
|
||||||
|
s += ".0"
|
||||||
|
}
|
||||||
|
if prerelease != "" {
|
||||||
|
s += "-" + prerelease
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
return "UNKNOWN"
|
||||||
|
}
|
||||||
|
|
||||||
|
const versionClient = "20190404"
|
||||||
|
|
|
@ -0,0 +1,40 @@
|
||||||
|
// Copyright 2018 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// https://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package kms
|
||||||
|
|
||||||
|
import (
|
||||||
|
"cloud.google.com/go/iam"
|
||||||
|
kmspb "google.golang.org/genproto/googleapis/cloud/kms/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
// KeyRingIAM returns a handle to inspect and change permissions of a KeyRing.
|
||||||
|
//
|
||||||
|
// Deprecated: Please use ResourceIAM and provide the KeyRing.Name as input.
|
||||||
|
func (c *KeyManagementClient) KeyRingIAM(keyRing *kmspb.KeyRing) *iam.Handle {
|
||||||
|
return iam.InternalNewHandle(c.Connection(), keyRing.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CryptoKeyIAM returns a handle to inspect and change permissions of a CryptoKey.
|
||||||
|
//
|
||||||
|
// Deprecated: Please use ResourceIAM and provide the CryptoKey.Name as input.
|
||||||
|
func (c *KeyManagementClient) CryptoKeyIAM(cryptoKey *kmspb.CryptoKey) *iam.Handle {
|
||||||
|
return iam.InternalNewHandle(c.Connection(), cryptoKey.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResourceIAM returns a handle to inspect and change permissions of the resource
|
||||||
|
// indicated by the given resource path.
|
||||||
|
func (c *KeyManagementClient) ResourceIAM(resourcePath string) *iam.Handle {
|
||||||
|
return iam.InternalNewHandle(c.Connection(), resourcePath)
|
||||||
|
}
|
|
@ -1,4 +1,4 @@
|
||||||
// Copyright 2018 Google LLC
|
// Copyright 2019 Google LLC
|
||||||
//
|
//
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
// you may not use this file except in compliance with the License.
|
// you may not use this file except in compliance with the License.
|
||||||
|
@ -12,19 +12,18 @@
|
||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
// AUTO-GENERATED CODE. DO NOT EDIT.
|
// Code generated by gapic-generator. DO NOT EDIT.
|
||||||
|
|
||||||
package kms
|
package kms
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"cloud.google.com/go/internal/version"
|
|
||||||
"github.com/golang/protobuf/proto"
|
"github.com/golang/protobuf/proto"
|
||||||
gax "github.com/googleapis/gax-go"
|
gax "github.com/googleapis/gax-go/v2"
|
||||||
"golang.org/x/net/context"
|
|
||||||
"google.golang.org/api/iterator"
|
"google.golang.org/api/iterator"
|
||||||
"google.golang.org/api/option"
|
"google.golang.org/api/option"
|
||||||
"google.golang.org/api/transport"
|
"google.golang.org/api/transport"
|
||||||
|
@ -164,8 +163,8 @@ func (c *KeyManagementClient) Close() error {
|
||||||
// the `x-goog-api-client` header passed on each request. Intended for
|
// the `x-goog-api-client` header passed on each request. Intended for
|
||||||
// use by Google-written clients.
|
// use by Google-written clients.
|
||||||
func (c *KeyManagementClient) setGoogleClientInfo(keyval ...string) {
|
func (c *KeyManagementClient) setGoogleClientInfo(keyval ...string) {
|
||||||
kv := append([]string{"gl-go", version.Go()}, keyval...)
|
kv := append([]string{"gl-go", versionGo()}, keyval...)
|
||||||
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
|
kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version)
|
||||||
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
|
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -300,8 +299,9 @@ func (c *KeyManagementClient) GetKeyRing(ctx context.Context, req *kmspb.GetKeyR
|
||||||
return resp, nil
|
return resp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetCryptoKey returns metadata for a given [CryptoKey][google.cloud.kms.v1.CryptoKey], as well as its
|
// GetCryptoKey returns metadata for a given [CryptoKey][google.cloud.kms.v1.CryptoKey], as
|
||||||
// [primary][google.cloud.kms.v1.CryptoKey.primary] [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion].
|
// well as its [primary][google.cloud.kms.v1.CryptoKey.primary]
|
||||||
|
// [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion].
|
||||||
func (c *KeyManagementClient) GetCryptoKey(ctx context.Context, req *kmspb.GetCryptoKeyRequest, opts ...gax.CallOption) (*kmspb.CryptoKey, error) {
|
func (c *KeyManagementClient) GetCryptoKey(ctx context.Context, req *kmspb.GetCryptoKeyRequest, opts ...gax.CallOption) (*kmspb.CryptoKey, error) {
|
||||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
|
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
|
||||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||||
|
@ -318,7 +318,8 @@ func (c *KeyManagementClient) GetCryptoKey(ctx context.Context, req *kmspb.GetCr
|
||||||
return resp, nil
|
return resp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetCryptoKeyVersion returns metadata for a given [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion].
|
// GetCryptoKeyVersion returns metadata for a given
|
||||||
|
// [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion].
|
||||||
func (c *KeyManagementClient) GetCryptoKeyVersion(ctx context.Context, req *kmspb.GetCryptoKeyVersionRequest, opts ...gax.CallOption) (*kmspb.CryptoKeyVersion, error) {
|
func (c *KeyManagementClient) GetCryptoKeyVersion(ctx context.Context, req *kmspb.GetCryptoKeyVersionRequest, opts ...gax.CallOption) (*kmspb.CryptoKeyVersion, error) {
|
||||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
|
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
|
||||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||||
|
@ -335,7 +336,8 @@ func (c *KeyManagementClient) GetCryptoKeyVersion(ctx context.Context, req *kmsp
|
||||||
return resp, nil
|
return resp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateKeyRing create a new [KeyRing][google.cloud.kms.v1.KeyRing] in a given Project and Location.
|
// CreateKeyRing create a new [KeyRing][google.cloud.kms.v1.KeyRing] in a given Project and
|
||||||
|
// Location.
|
||||||
func (c *KeyManagementClient) CreateKeyRing(ctx context.Context, req *kmspb.CreateKeyRingRequest, opts ...gax.CallOption) (*kmspb.KeyRing, error) {
|
func (c *KeyManagementClient) CreateKeyRing(ctx context.Context, req *kmspb.CreateKeyRingRequest, opts ...gax.CallOption) (*kmspb.KeyRing, error) {
|
||||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", req.GetParent()))
|
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", req.GetParent()))
|
||||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||||
|
@ -352,7 +354,8 @@ func (c *KeyManagementClient) CreateKeyRing(ctx context.Context, req *kmspb.Crea
|
||||||
return resp, nil
|
return resp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateCryptoKey create a new [CryptoKey][google.cloud.kms.v1.CryptoKey] within a [KeyRing][google.cloud.kms.v1.KeyRing].
|
// CreateCryptoKey create a new [CryptoKey][google.cloud.kms.v1.CryptoKey] within a
|
||||||
|
// [KeyRing][google.cloud.kms.v1.KeyRing].
|
||||||
//
|
//
|
||||||
// [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] and
|
// [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] and
|
||||||
// [CryptoKey.version_template.algorithm][google.cloud.kms.v1.CryptoKeyVersionTemplate.algorithm]
|
// [CryptoKey.version_template.algorithm][google.cloud.kms.v1.CryptoKeyVersionTemplate.algorithm]
|
||||||
|
@ -373,7 +376,8 @@ func (c *KeyManagementClient) CreateCryptoKey(ctx context.Context, req *kmspb.Cr
|
||||||
return resp, nil
|
return resp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateCryptoKeyVersion create a new [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] in a [CryptoKey][google.cloud.kms.v1.CryptoKey].
|
// CreateCryptoKeyVersion create a new [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] in a
|
||||||
|
// [CryptoKey][google.cloud.kms.v1.CryptoKey].
|
||||||
//
|
//
|
||||||
// The server will assign the next sequential id. If unset,
|
// The server will assign the next sequential id. If unset,
|
||||||
// [state][google.cloud.kms.v1.CryptoKeyVersion.state] will be set to
|
// [state][google.cloud.kms.v1.CryptoKeyVersion.state] will be set to
|
||||||
|
@ -411,13 +415,18 @@ func (c *KeyManagementClient) UpdateCryptoKey(ctx context.Context, req *kmspb.Up
|
||||||
return resp, nil
|
return resp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpdateCryptoKeyVersion update a [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]'s metadata.
|
// UpdateCryptoKeyVersion update a [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]'s
|
||||||
|
// metadata.
|
||||||
//
|
//
|
||||||
// [state][google.cloud.kms.v1.CryptoKeyVersion.state] may be changed between
|
// [state][google.cloud.kms.v1.CryptoKeyVersion.state] may be changed between
|
||||||
// [ENABLED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.ENABLED] and
|
// [ENABLED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.ENABLED]
|
||||||
// [DISABLED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DISABLED] using this
|
// and
|
||||||
// method. See [DestroyCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.DestroyCryptoKeyVersion] and [RestoreCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.RestoreCryptoKeyVersion] to
|
// [DISABLED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DISABLED]
|
||||||
// move between other states.
|
// using this method. See
|
||||||
|
// [DestroyCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.DestroyCryptoKeyVersion]
|
||||||
|
// and
|
||||||
|
// [RestoreCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.RestoreCryptoKeyVersion]
|
||||||
|
// to move between other states.
|
||||||
func (c *KeyManagementClient) UpdateCryptoKeyVersion(ctx context.Context, req *kmspb.UpdateCryptoKeyVersionRequest, opts ...gax.CallOption) (*kmspb.CryptoKeyVersion, error) {
|
func (c *KeyManagementClient) UpdateCryptoKeyVersion(ctx context.Context, req *kmspb.UpdateCryptoKeyVersionRequest, opts ...gax.CallOption) (*kmspb.CryptoKeyVersion, error) {
|
||||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "crypto_key_version.name", req.GetCryptoKeyVersion().GetName()))
|
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "crypto_key_version.name", req.GetCryptoKeyVersion().GetName()))
|
||||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||||
|
@ -434,8 +443,9 @@ func (c *KeyManagementClient) UpdateCryptoKeyVersion(ctx context.Context, req *k
|
||||||
return resp, nil
|
return resp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Encrypt encrypts data, so that it can only be recovered by a call to [Decrypt][google.cloud.kms.v1.KeyManagementService.Decrypt].
|
// Encrypt encrypts data, so that it can only be recovered by a call to
|
||||||
// The [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] must be
|
// [Decrypt][google.cloud.kms.v1.KeyManagementService.Decrypt]. The
|
||||||
|
// [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] must be
|
||||||
// [ENCRYPT_DECRYPT][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ENCRYPT_DECRYPT].
|
// [ENCRYPT_DECRYPT][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ENCRYPT_DECRYPT].
|
||||||
func (c *KeyManagementClient) Encrypt(ctx context.Context, req *kmspb.EncryptRequest, opts ...gax.CallOption) (*kmspb.EncryptResponse, error) {
|
func (c *KeyManagementClient) Encrypt(ctx context.Context, req *kmspb.EncryptRequest, opts ...gax.CallOption) (*kmspb.EncryptResponse, error) {
|
||||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
|
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
|
||||||
|
@ -453,8 +463,10 @@ func (c *KeyManagementClient) Encrypt(ctx context.Context, req *kmspb.EncryptReq
|
||||||
return resp, nil
|
return resp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Decrypt decrypts data that was protected by [Encrypt][google.cloud.kms.v1.KeyManagementService.Encrypt]. The [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose]
|
// Decrypt decrypts data that was protected by
|
||||||
// must be [ENCRYPT_DECRYPT][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ENCRYPT_DECRYPT].
|
// [Encrypt][google.cloud.kms.v1.KeyManagementService.Encrypt]. The
|
||||||
|
// [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] must be
|
||||||
|
// [ENCRYPT_DECRYPT][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ENCRYPT_DECRYPT].
|
||||||
func (c *KeyManagementClient) Decrypt(ctx context.Context, req *kmspb.DecryptRequest, opts ...gax.CallOption) (*kmspb.DecryptResponse, error) {
|
func (c *KeyManagementClient) Decrypt(ctx context.Context, req *kmspb.DecryptRequest, opts ...gax.CallOption) (*kmspb.DecryptResponse, error) {
|
||||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
|
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
|
||||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||||
|
@ -471,7 +483,9 @@ func (c *KeyManagementClient) Decrypt(ctx context.Context, req *kmspb.DecryptReq
|
||||||
return resp, nil
|
return resp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpdateCryptoKeyPrimaryVersion update the version of a [CryptoKey][google.cloud.kms.v1.CryptoKey] that will be used in [Encrypt][google.cloud.kms.v1.KeyManagementService.Encrypt].
|
// UpdateCryptoKeyPrimaryVersion update the version of a [CryptoKey][google.cloud.kms.v1.CryptoKey] that
|
||||||
|
// will be used in
|
||||||
|
// [Encrypt][google.cloud.kms.v1.KeyManagementService.Encrypt].
|
||||||
//
|
//
|
||||||
// Returns an error if called on an asymmetric key.
|
// Returns an error if called on an asymmetric key.
|
||||||
func (c *KeyManagementClient) UpdateCryptoKeyPrimaryVersion(ctx context.Context, req *kmspb.UpdateCryptoKeyPrimaryVersionRequest, opts ...gax.CallOption) (*kmspb.CryptoKey, error) {
|
func (c *KeyManagementClient) UpdateCryptoKeyPrimaryVersion(ctx context.Context, req *kmspb.UpdateCryptoKeyPrimaryVersionRequest, opts ...gax.CallOption) (*kmspb.CryptoKey, error) {
|
||||||
|
@ -490,18 +504,24 @@ func (c *KeyManagementClient) UpdateCryptoKeyPrimaryVersion(ctx context.Context,
|
||||||
return resp, nil
|
return resp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// DestroyCryptoKeyVersion schedule a [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] for destruction.
|
// DestroyCryptoKeyVersion schedule a [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] for
|
||||||
|
// destruction.
|
||||||
//
|
//
|
||||||
// Upon calling this method, [CryptoKeyVersion.state][google.cloud.kms.v1.CryptoKeyVersion.state] will be set to
|
// Upon calling this method,
|
||||||
|
// [CryptoKeyVersion.state][google.cloud.kms.v1.CryptoKeyVersion.state] will
|
||||||
|
// be set to
|
||||||
// [DESTROY_SCHEDULED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DESTROY_SCHEDULED]
|
// [DESTROY_SCHEDULED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DESTROY_SCHEDULED]
|
||||||
// and [destroy_time][google.cloud.kms.v1.CryptoKeyVersion.destroy_time] will be set to a time 24
|
// and [destroy_time][google.cloud.kms.v1.CryptoKeyVersion.destroy_time] will
|
||||||
// hours in the future, at which point the [state][google.cloud.kms.v1.CryptoKeyVersion.state]
|
// be set to a time 24 hours in the future, at which point the
|
||||||
// will be changed to
|
// [state][google.cloud.kms.v1.CryptoKeyVersion.state] will be changed to
|
||||||
// [DESTROYED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DESTROYED], and the key
|
// [DESTROYED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DESTROYED],
|
||||||
// material will be irrevocably destroyed.
|
// and the key material will be irrevocably destroyed.
|
||||||
//
|
//
|
||||||
// Before the [destroy_time][google.cloud.kms.v1.CryptoKeyVersion.destroy_time] is reached,
|
// Before the
|
||||||
// [RestoreCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.RestoreCryptoKeyVersion] may be called to reverse the process.
|
// [destroy_time][google.cloud.kms.v1.CryptoKeyVersion.destroy_time] is
|
||||||
|
// reached,
|
||||||
|
// [RestoreCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.RestoreCryptoKeyVersion]
|
||||||
|
// may be called to reverse the process.
|
||||||
func (c *KeyManagementClient) DestroyCryptoKeyVersion(ctx context.Context, req *kmspb.DestroyCryptoKeyVersionRequest, opts ...gax.CallOption) (*kmspb.CryptoKeyVersion, error) {
|
func (c *KeyManagementClient) DestroyCryptoKeyVersion(ctx context.Context, req *kmspb.DestroyCryptoKeyVersionRequest, opts ...gax.CallOption) (*kmspb.CryptoKeyVersion, error) {
|
||||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
|
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
|
||||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||||
|
@ -522,9 +542,11 @@ func (c *KeyManagementClient) DestroyCryptoKeyVersion(ctx context.Context, req *
|
||||||
// [DESTROY_SCHEDULED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DESTROY_SCHEDULED]
|
// [DESTROY_SCHEDULED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DESTROY_SCHEDULED]
|
||||||
// state.
|
// state.
|
||||||
//
|
//
|
||||||
// Upon restoration of the CryptoKeyVersion, [state][google.cloud.kms.v1.CryptoKeyVersion.state]
|
// Upon restoration of the CryptoKeyVersion,
|
||||||
// will be set to [DISABLED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DISABLED],
|
// [state][google.cloud.kms.v1.CryptoKeyVersion.state] will be set to
|
||||||
// and [destroy_time][google.cloud.kms.v1.CryptoKeyVersion.destroy_time] will be cleared.
|
// [DISABLED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DISABLED],
|
||||||
|
// and [destroy_time][google.cloud.kms.v1.CryptoKeyVersion.destroy_time] will
|
||||||
|
// be cleared.
|
||||||
func (c *KeyManagementClient) RestoreCryptoKeyVersion(ctx context.Context, req *kmspb.RestoreCryptoKeyVersionRequest, opts ...gax.CallOption) (*kmspb.CryptoKeyVersion, error) {
|
func (c *KeyManagementClient) RestoreCryptoKeyVersion(ctx context.Context, req *kmspb.RestoreCryptoKeyVersionRequest, opts ...gax.CallOption) (*kmspb.CryptoKeyVersion, error) {
|
||||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
|
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
|
||||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||||
|
@ -541,9 +563,11 @@ func (c *KeyManagementClient) RestoreCryptoKeyVersion(ctx context.Context, req *
|
||||||
return resp, nil
|
return resp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetPublicKey returns the public key for the given [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]. The
|
// GetPublicKey returns the public key for the given
|
||||||
|
// [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]. The
|
||||||
// [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] must be
|
// [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] must be
|
||||||
// [ASYMMETRIC_SIGN][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ASYMMETRIC_SIGN] or
|
// [ASYMMETRIC_SIGN][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ASYMMETRIC_SIGN]
|
||||||
|
// or
|
||||||
// [ASYMMETRIC_DECRYPT][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ASYMMETRIC_DECRYPT].
|
// [ASYMMETRIC_DECRYPT][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ASYMMETRIC_DECRYPT].
|
||||||
func (c *KeyManagementClient) GetPublicKey(ctx context.Context, req *kmspb.GetPublicKeyRequest, opts ...gax.CallOption) (*kmspb.PublicKey, error) {
|
func (c *KeyManagementClient) GetPublicKey(ctx context.Context, req *kmspb.GetPublicKeyRequest, opts ...gax.CallOption) (*kmspb.PublicKey, error) {
|
||||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
|
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
|
||||||
|
@ -562,8 +586,10 @@ func (c *KeyManagementClient) GetPublicKey(ctx context.Context, req *kmspb.GetPu
|
||||||
}
|
}
|
||||||
|
|
||||||
// AsymmetricDecrypt decrypts data that was encrypted with a public key retrieved from
|
// AsymmetricDecrypt decrypts data that was encrypted with a public key retrieved from
|
||||||
// [GetPublicKey][google.cloud.kms.v1.KeyManagementService.GetPublicKey] corresponding to a [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] with
|
// [GetPublicKey][google.cloud.kms.v1.KeyManagementService.GetPublicKey]
|
||||||
// [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] ASYMMETRIC_DECRYPT.
|
// corresponding to a [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]
|
||||||
|
// with [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose]
|
||||||
|
// ASYMMETRIC_DECRYPT.
|
||||||
func (c *KeyManagementClient) AsymmetricDecrypt(ctx context.Context, req *kmspb.AsymmetricDecryptRequest, opts ...gax.CallOption) (*kmspb.AsymmetricDecryptResponse, error) {
|
func (c *KeyManagementClient) AsymmetricDecrypt(ctx context.Context, req *kmspb.AsymmetricDecryptRequest, opts ...gax.CallOption) (*kmspb.AsymmetricDecryptResponse, error) {
|
||||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
|
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
|
||||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||||
|
@ -580,9 +606,11 @@ func (c *KeyManagementClient) AsymmetricDecrypt(ctx context.Context, req *kmspb.
|
||||||
return resp, nil
|
return resp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// AsymmetricSign signs data using a [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] with [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose]
|
// AsymmetricSign signs data using a [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]
|
||||||
|
// with [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose]
|
||||||
// ASYMMETRIC_SIGN, producing a signature that can be verified with the public
|
// ASYMMETRIC_SIGN, producing a signature that can be verified with the public
|
||||||
// key retrieved from [GetPublicKey][google.cloud.kms.v1.KeyManagementService.GetPublicKey].
|
// key retrieved from
|
||||||
|
// [GetPublicKey][google.cloud.kms.v1.KeyManagementService.GetPublicKey].
|
||||||
func (c *KeyManagementClient) AsymmetricSign(ctx context.Context, req *kmspb.AsymmetricSignRequest, opts ...gax.CallOption) (*kmspb.AsymmetricSignResponse, error) {
|
func (c *KeyManagementClient) AsymmetricSign(ctx context.Context, req *kmspb.AsymmetricSignRequest, opts ...gax.CallOption) (*kmspb.AsymmetricSignResponse, error) {
|
||||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
|
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
|
||||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||||
|
|
|
@ -18,12 +18,12 @@ package spanner
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"encoding/gob"
|
"encoding/gob"
|
||||||
"log"
|
"log"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/golang/protobuf/proto"
|
"github.com/golang/protobuf/proto"
|
||||||
"golang.org/x/net/context"
|
|
||||||
sppb "google.golang.org/genproto/googleapis/spanner/v1"
|
sppb "google.golang.org/genproto/googleapis/spanner/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -147,31 +147,33 @@ func (t *BatchReadOnlyTransaction) PartitionQuery(ctx context.Context, statement
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
sid, client := sh.getID(), sh.getClient()
|
sid, client := sh.getID(), sh.getClient()
|
||||||
var (
|
params, paramTypes, err := statement.convertParams()
|
||||||
resp *sppb.PartitionResponse
|
if err != nil {
|
||||||
partitions []*Partition
|
return nil, err
|
||||||
)
|
}
|
||||||
|
|
||||||
// request Partitions
|
// request Partitions
|
||||||
req := &sppb.PartitionQueryRequest{
|
req := &sppb.PartitionQueryRequest{
|
||||||
Session: sid,
|
Session: sid,
|
||||||
Transaction: ts,
|
Transaction: ts,
|
||||||
Sql: statement.SQL,
|
Sql: statement.SQL,
|
||||||
PartitionOptions: opt.toProto(),
|
PartitionOptions: opt.toProto(),
|
||||||
|
Params: params,
|
||||||
|
ParamTypes: paramTypes,
|
||||||
}
|
}
|
||||||
if err := statement.bindParams(req); err != nil {
|
resp, err := client.PartitionQuery(ctx, req)
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
resp, err = client.PartitionQuery(ctx, req)
|
|
||||||
// prepare ExecuteSqlRequest
|
// prepare ExecuteSqlRequest
|
||||||
r := &sppb.ExecuteSqlRequest{
|
r := &sppb.ExecuteSqlRequest{
|
||||||
Session: sid,
|
Session: sid,
|
||||||
Transaction: ts,
|
Transaction: ts,
|
||||||
Sql: statement.SQL,
|
Sql: statement.SQL,
|
||||||
|
Params: params,
|
||||||
|
ParamTypes: paramTypes,
|
||||||
}
|
}
|
||||||
if err := statement.bindParams(r); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
// generate Partitions
|
// generate Partitions
|
||||||
|
var partitions []*Partition
|
||||||
for _, p := range resp.GetPartitions() {
|
for _, p := range resp.GetPartitions() {
|
||||||
partitions = append(partitions, &Partition{
|
partitions = append(partitions, &Partition{
|
||||||
pt: p.PartitionToken,
|
pt: p.PartitionToken,
|
||||||
|
|
|
@ -17,13 +17,14 @@ limitations under the License.
|
||||||
package spanner
|
package spanner
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"regexp"
|
"regexp"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"cloud.google.com/go/internal/trace"
|
||||||
"cloud.google.com/go/internal/version"
|
"cloud.google.com/go/internal/version"
|
||||||
"golang.org/x/net/context"
|
|
||||||
"google.golang.org/api/option"
|
"google.golang.org/api/option"
|
||||||
gtransport "google.golang.org/api/transport/grpc"
|
gtransport "google.golang.org/api/transport/grpc"
|
||||||
sppb "google.golang.org/genproto/googleapis/spanner/v1"
|
sppb "google.golang.org/genproto/googleapis/spanner/v1"
|
||||||
|
@ -87,7 +88,6 @@ type ClientConfig struct {
|
||||||
// NumChannels is the number of gRPC channels.
|
// NumChannels is the number of gRPC channels.
|
||||||
// If zero, a reasonable default is used based on the execution environment.
|
// If zero, a reasonable default is used based on the execution environment.
|
||||||
NumChannels int
|
NumChannels int
|
||||||
co []option.ClientOption
|
|
||||||
// SessionPoolConfig is the configuration for session pool.
|
// SessionPoolConfig is the configuration for session pool.
|
||||||
SessionPoolConfig
|
SessionPoolConfig
|
||||||
// SessionLabels for the sessions created by this client.
|
// SessionLabels for the sessions created by this client.
|
||||||
|
@ -120,8 +120,8 @@ func NewClient(ctx context.Context, database string, opts ...option.ClientOption
|
||||||
// NewClientWithConfig creates a client to a database. A valid database name has the
|
// NewClientWithConfig creates a client to a database. A valid database name has the
|
||||||
// form projects/PROJECT_ID/instances/INSTANCE_ID/databases/DATABASE_ID.
|
// form projects/PROJECT_ID/instances/INSTANCE_ID/databases/DATABASE_ID.
|
||||||
func NewClientWithConfig(ctx context.Context, database string, config ClientConfig, opts ...option.ClientOption) (c *Client, err error) {
|
func NewClientWithConfig(ctx context.Context, database string, config ClientConfig, opts ...option.ClientOption) (c *Client, err error) {
|
||||||
ctx = traceStartSpan(ctx, "cloud.google.com/go/spanner.NewClient")
|
ctx = trace.StartSpan(ctx, "cloud.google.com/go/spanner.NewClient")
|
||||||
defer func() { traceEndSpan(ctx, err) }()
|
defer func() { trace.EndSpan(ctx, err) }()
|
||||||
|
|
||||||
// Validate database path.
|
// Validate database path.
|
||||||
if err := validDatabaseName(database); err != nil {
|
if err := validDatabaseName(database); err != nil {
|
||||||
|
@ -161,6 +161,8 @@ func NewClientWithConfig(ctx context.Context, database string, config ClientConf
|
||||||
if config.MaxBurst == 0 {
|
if config.MaxBurst == 0 {
|
||||||
config.MaxBurst = 10
|
config.MaxBurst = 10
|
||||||
}
|
}
|
||||||
|
// TODO(deklerk) This should be replaced with a balancer with config.NumChannels
|
||||||
|
// connections, instead of config.NumChannels clientconns.
|
||||||
for i := 0; i < config.NumChannels; i++ {
|
for i := 0; i < config.NumChannels; i++ {
|
||||||
conn, err := gtransport.Dial(ctx, allOpts...)
|
conn, err := gtransport.Dial(ctx, allOpts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -340,18 +342,21 @@ func checkNestedTxn(ctx context.Context) error {
|
||||||
// The function f will be called one or more times. It must not maintain
|
// The function f will be called one or more times. It must not maintain
|
||||||
// any state between calls.
|
// any state between calls.
|
||||||
//
|
//
|
||||||
// If the transaction cannot be committed or if f returns an IsAborted error,
|
// If the transaction cannot be committed or if f returns an ABORTED error,
|
||||||
// ReadWriteTransaction will call f again. It will continue to call f until the
|
// ReadWriteTransaction will call f again. It will continue to call f until the
|
||||||
// transaction can be committed or the Context times out or is cancelled. If f
|
// transaction can be committed or the Context times out or is cancelled. If f
|
||||||
// returns an error other than IsAborted, ReadWriteTransaction will abort the
|
// returns an error other than ABORTED, ReadWriteTransaction will abort the
|
||||||
// transaction and return the error.
|
// transaction and return the error.
|
||||||
//
|
//
|
||||||
// To limit the number of retries, set a deadline on the Context rather than
|
// To limit the number of retries, set a deadline on the Context rather than
|
||||||
// using a fixed limit on the number of attempts. ReadWriteTransaction will
|
// using a fixed limit on the number of attempts. ReadWriteTransaction will
|
||||||
// retry as needed until that deadline is met.
|
// retry as needed until that deadline is met.
|
||||||
|
//
|
||||||
|
// See https://godoc.org/cloud.google.com/go/spanner#ReadWriteTransaction for
|
||||||
|
// more details.
|
||||||
func (c *Client) ReadWriteTransaction(ctx context.Context, f func(context.Context, *ReadWriteTransaction) error) (commitTimestamp time.Time, err error) {
|
func (c *Client) ReadWriteTransaction(ctx context.Context, f func(context.Context, *ReadWriteTransaction) error) (commitTimestamp time.Time, err error) {
|
||||||
ctx = traceStartSpan(ctx, "cloud.google.com/go/spanner.ReadWriteTransaction")
|
ctx = trace.StartSpan(ctx, "cloud.google.com/go/spanner.ReadWriteTransaction")
|
||||||
defer func() { traceEndSpan(ctx, err) }()
|
defer func() { trace.EndSpan(ctx, err) }()
|
||||||
if err := checkNestedTxn(ctx); err != nil {
|
if err := checkNestedTxn(ctx); err != nil {
|
||||||
return time.Time{}, err
|
return time.Time{}, err
|
||||||
}
|
}
|
||||||
|
@ -381,7 +386,7 @@ func (c *Client) ReadWriteTransaction(ctx context.Context, f func(context.Contex
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
t.txReadOnly.txReadEnv = t
|
t.txReadOnly.txReadEnv = t
|
||||||
tracePrintf(ctx, map[string]interface{}{"transactionID": string(sh.getTransactionID())},
|
trace.TracePrintf(ctx, map[string]interface{}{"transactionID": string(sh.getTransactionID())},
|
||||||
"Starting transaction attempt")
|
"Starting transaction attempt")
|
||||||
if err = t.begin(ctx); err != nil {
|
if err = t.begin(ctx); err != nil {
|
||||||
// Mask error from begin operation as retryable error.
|
// Mask error from begin operation as retryable error.
|
||||||
|
@ -434,8 +439,8 @@ func (c *Client) Apply(ctx context.Context, ms []*Mutation, opts ...ApplyOption)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx = traceStartSpan(ctx, "cloud.google.com/go/spanner.Apply")
|
ctx = trace.StartSpan(ctx, "cloud.google.com/go/spanner.Apply")
|
||||||
defer func() { traceEndSpan(ctx, err) }()
|
defer func() { trace.EndSpan(ctx, err) }()
|
||||||
t := &writeOnlyTransaction{c.idleSessions}
|
t := &writeOnlyTransaction{c.idleSessions}
|
||||||
return t.applyAtLeastOnce(ctx, ms...)
|
return t.applyAtLeastOnce(ctx, ms...)
|
||||||
}
|
}
|
||||||
|
|
23
vendor/cloud.google.com/go/spanner/util.go → vendor/cloud.google.com/go/spanner/cmp.go
generated
vendored
23
vendor/cloud.google.com/go/spanner/util.go → vendor/cloud.google.com/go/spanner/cmp.go
generated
vendored
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
Copyright 2017 Google LLC
|
Copyright 2018 Google LLC
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
you may not use this file except in compliance with the License.
|
you may not use this file except in compliance with the License.
|
||||||
|
@ -16,18 +16,13 @@ limitations under the License.
|
||||||
|
|
||||||
package spanner
|
package spanner
|
||||||
|
|
||||||
// maxUint64 returns the maximum of two uint64
|
import (
|
||||||
func maxUint64(a, b uint64) uint64 {
|
"cloud.google.com/go/internal/testutil"
|
||||||
if a > b {
|
"github.com/google/go-cmp/cmp"
|
||||||
return a
|
)
|
||||||
}
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
// minUint64 returns the minimum of two uint64
|
func testEqual(a, b interface{}) bool {
|
||||||
func minUint64(a, b uint64) uint64 {
|
return testutil.Equal(a, b,
|
||||||
if a > b {
|
cmp.AllowUnexported(TimestampBound{}, Error{}, Mutation{}, Row{},
|
||||||
return b
|
Partition{}, BatchReadOnlyTransactionID{}))
|
||||||
}
|
|
||||||
return a
|
|
||||||
}
|
}
|
|
@ -19,8 +19,6 @@ Package spanner provides a client for reading and writing to Cloud Spanner
|
||||||
databases. See the packages under admin for clients that operate on databases
|
databases. See the packages under admin for clients that operate on databases
|
||||||
and instances.
|
and instances.
|
||||||
|
|
||||||
Note: This package is in beta. Some backwards-incompatible changes may occur.
|
|
||||||
|
|
||||||
See https://cloud.google.com/spanner/docs/getting-started/go/ for an introduction
|
See https://cloud.google.com/spanner/docs/getting-started/go/ for an introduction
|
||||||
to Cloud Spanner and additional help on using this API.
|
to Cloud Spanner and additional help on using this API.
|
||||||
|
|
||||||
|
|
|
@ -17,9 +17,9 @@ limitations under the License.
|
||||||
package spanner
|
package spanner
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
"google.golang.org/grpc/metadata"
|
"google.golang.org/grpc/metadata"
|
||||||
|
|
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||||
limitations under the License.
|
limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package spanner
|
package backoff
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"math/rand"
|
"math/rand"
|
||||||
|
@ -23,7 +23,7 @@ import (
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// minBackoff is the minimum backoff used by default.
|
// minBackoff is the minimum backoff used by default.
|
||||||
minBackoff = 1 * time.Second
|
minBackoff = 20 * time.Millisecond
|
||||||
// maxBackoff is the maximum backoff used by default.
|
// maxBackoff is the maximum backoff used by default.
|
||||||
maxBackoff = 32 * time.Second
|
maxBackoff = 32 * time.Second
|
||||||
// jitter is the jitter factor.
|
// jitter is the jitter factor.
|
||||||
|
@ -32,16 +32,16 @@ const (
|
||||||
rate = 1.3
|
rate = 1.3
|
||||||
)
|
)
|
||||||
|
|
||||||
var defaultBackoff = exponentialBackoff{minBackoff, maxBackoff}
|
var DefaultBackoff = ExponentialBackoff{minBackoff, maxBackoff}
|
||||||
|
|
||||||
type exponentialBackoff struct {
|
type ExponentialBackoff struct {
|
||||||
min, max time.Duration
|
Min, Max time.Duration
|
||||||
}
|
}
|
||||||
|
|
||||||
// delay calculates the delay that should happen at n-th
|
// delay calculates the delay that should happen at n-th
|
||||||
// exponential backoff in a series.
|
// exponential backoff in a series.
|
||||||
func (b exponentialBackoff) delay(retries int) time.Duration {
|
func (b ExponentialBackoff) Delay(retries int) time.Duration {
|
||||||
min, max := float64(b.min), float64(b.max)
|
min, max := float64(b.Min), float64(b.Max)
|
||||||
delay := min
|
delay := min
|
||||||
for delay < max && retries > 0 {
|
for delay < max && retries > 0 {
|
||||||
delay *= rate
|
delay *= rate
|
|
@ -21,11 +21,10 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"google.golang.org/grpc/codes"
|
|
||||||
|
|
||||||
"cloud.google.com/go/civil"
|
"cloud.google.com/go/civil"
|
||||||
proto3 "github.com/golang/protobuf/ptypes/struct"
|
proto3 "github.com/golang/protobuf/ptypes/struct"
|
||||||
sppb "google.golang.org/genproto/googleapis/spanner/v1"
|
sppb "google.golang.org/genproto/googleapis/spanner/v1"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
)
|
)
|
||||||
|
|
||||||
// A Key can be either a Cloud Spanner row's primary key or a secondary index key.
|
// A Key can be either a Cloud Spanner row's primary key or a secondary index key.
|
||||||
|
|
|
@ -20,7 +20,6 @@ import (
|
||||||
"reflect"
|
"reflect"
|
||||||
|
|
||||||
proto3 "github.com/golang/protobuf/ptypes/struct"
|
proto3 "github.com/golang/protobuf/ptypes/struct"
|
||||||
|
|
||||||
sppb "google.golang.org/genproto/googleapis/spanner/v1"
|
sppb "google.golang.org/genproto/googleapis/spanner/v1"
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
)
|
)
|
||||||
|
|
|
@ -1,74 +0,0 @@
|
||||||
// Copyright 2018 Google LLC
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// +build !go1.7
|
|
||||||
|
|
||||||
package spanner
|
|
||||||
|
|
||||||
import (
|
|
||||||
"reflect"
|
|
||||||
"strconv"
|
|
||||||
)
|
|
||||||
|
|
||||||
func structTagLookup(tag reflect.StructTag, key string) (string, bool) {
|
|
||||||
// from go1.10.2 implementation of StructTag.Lookup.
|
|
||||||
for tag != "" {
|
|
||||||
// Skip leading space.
|
|
||||||
i := 0
|
|
||||||
for i < len(tag) && tag[i] == ' ' {
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
tag = tag[i:]
|
|
||||||
if tag == "" {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
// Scan to colon. A space, a quote or a control character is a syntax error.
|
|
||||||
// Strictly speaking, control chars include the range [0x7f, 0x9f], not just
|
|
||||||
// [0x00, 0x1f], but in practice, we ignore the multi-byte control characters
|
|
||||||
// as it is simpler to inspect the tag's bytes than the tag's runes.
|
|
||||||
i = 0
|
|
||||||
for i < len(tag) && tag[i] > ' ' && tag[i] != ':' && tag[i] != '"' && tag[i] != 0x7f {
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
if i == 0 || i+1 >= len(tag) || tag[i] != ':' || tag[i+1] != '"' {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
name := string(tag[:i])
|
|
||||||
tag = tag[i+1:]
|
|
||||||
|
|
||||||
// Scan quoted string to find value.
|
|
||||||
i = 1
|
|
||||||
for i < len(tag) && tag[i] != '"' {
|
|
||||||
if tag[i] == '\\' {
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
if i >= len(tag) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
qvalue := string(tag[:i+1])
|
|
||||||
tag = tag[i+1:]
|
|
||||||
|
|
||||||
if key == name {
|
|
||||||
value, err := strconv.Unquote(qvalue)
|
|
||||||
if err != nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
return value, true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return "", false
|
|
||||||
}
|
|
|
@ -1,39 +0,0 @@
|
||||||
// Copyright 2017 Google LLC
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// +build !go1.8
|
|
||||||
|
|
||||||
package spanner
|
|
||||||
|
|
||||||
import "golang.org/x/net/context"
|
|
||||||
|
|
||||||
// OpenCensus only supports go 1.8 and higher.
|
|
||||||
|
|
||||||
func traceStartSpan(ctx context.Context, _ string) context.Context {
|
|
||||||
return ctx
|
|
||||||
}
|
|
||||||
|
|
||||||
func traceEndSpan(context.Context, error) {
|
|
||||||
}
|
|
||||||
|
|
||||||
func tracePrintf(context.Context, map[string]interface{}, string, ...interface{}) {
|
|
||||||
}
|
|
||||||
|
|
||||||
type dummy struct{}
|
|
||||||
|
|
||||||
// Not supported below Go 1.8.
|
|
||||||
var OpenSessionCount dummy
|
|
||||||
|
|
||||||
func recordStat(context.Context, dummy, int64) {
|
|
||||||
}
|
|
|
@ -15,9 +15,10 @@
|
||||||
package spanner
|
package spanner
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
"cloud.google.com/go/internal/trace"
|
||||||
"google.golang.org/api/iterator"
|
"google.golang.org/api/iterator"
|
||||||
sppb "google.golang.org/genproto/googleapis/spanner/v1"
|
sppb "google.golang.org/genproto/googleapis/spanner/v1"
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
|
@ -32,8 +33,8 @@ import (
|
||||||
// PartitionedUpdate returns an estimated count of the number of rows affected. The actual
|
// PartitionedUpdate returns an estimated count of the number of rows affected. The actual
|
||||||
// number of affected rows may be greater than the estimate.
|
// number of affected rows may be greater than the estimate.
|
||||||
func (c *Client) PartitionedUpdate(ctx context.Context, statement Statement) (count int64, err error) {
|
func (c *Client) PartitionedUpdate(ctx context.Context, statement Statement) (count int64, err error) {
|
||||||
ctx = traceStartSpan(ctx, "cloud.google.com/go/spanner.PartitionedUpdate")
|
ctx = trace.StartSpan(ctx, "cloud.google.com/go/spanner.PartitionedUpdate")
|
||||||
defer func() { traceEndSpan(ctx, err) }()
|
defer func() { trace.EndSpan(ctx, err) }()
|
||||||
if err := checkNestedTxn(ctx); err != nil {
|
if err := checkNestedTxn(ctx); err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,15 +18,17 @@ package spanner
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"io"
|
"io"
|
||||||
"log"
|
"log"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"cloud.google.com/go/internal/protostruct"
|
"cloud.google.com/go/internal/protostruct"
|
||||||
|
"cloud.google.com/go/internal/trace"
|
||||||
|
"cloud.google.com/go/spanner/internal/backoff"
|
||||||
proto "github.com/golang/protobuf/proto"
|
proto "github.com/golang/protobuf/proto"
|
||||||
proto3 "github.com/golang/protobuf/ptypes/struct"
|
proto3 "github.com/golang/protobuf/ptypes/struct"
|
||||||
"golang.org/x/net/context"
|
|
||||||
"google.golang.org/api/iterator"
|
"google.golang.org/api/iterator"
|
||||||
sppb "google.golang.org/genproto/googleapis/spanner/v1"
|
sppb "google.golang.org/genproto/googleapis/spanner/v1"
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
|
@ -47,7 +49,7 @@ func errEarlyReadEnd() error {
|
||||||
// Cloud Spanner.
|
// Cloud Spanner.
|
||||||
func stream(ctx context.Context, rpc func(ct context.Context, resumeToken []byte) (streamingReceiver, error), setTimestamp func(time.Time), release func(error)) *RowIterator {
|
func stream(ctx context.Context, rpc func(ct context.Context, resumeToken []byte) (streamingReceiver, error), setTimestamp func(time.Time), release func(error)) *RowIterator {
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
ctx = traceStartSpan(ctx, "cloud.google.com/go/spanner.RowIterator")
|
ctx = trace.StartSpan(ctx, "cloud.google.com/go/spanner.RowIterator")
|
||||||
return &RowIterator{
|
return &RowIterator{
|
||||||
streamd: newResumableStreamDecoder(ctx, rpc),
|
streamd: newResumableStreamDecoder(ctx, rpc),
|
||||||
rowd: &partialResultSetDecoder{},
|
rowd: &partialResultSetDecoder{},
|
||||||
|
@ -167,7 +169,7 @@ func (r *RowIterator) Do(f func(r *Row) error) error {
|
||||||
// Stop terminates the iteration. It should be called after you finish using the iterator.
|
// Stop terminates the iteration. It should be called after you finish using the iterator.
|
||||||
func (r *RowIterator) Stop() {
|
func (r *RowIterator) Stop() {
|
||||||
if r.streamd != nil {
|
if r.streamd != nil {
|
||||||
defer traceEndSpan(r.streamd.ctx, r.err)
|
defer trace.EndSpan(r.streamd.ctx, r.err)
|
||||||
}
|
}
|
||||||
if r.cancel != nil {
|
if r.cancel != nil {
|
||||||
r.cancel()
|
r.cancel()
|
||||||
|
@ -309,7 +311,7 @@ type resumableStreamDecoder struct {
|
||||||
// err is the last error resumableStreamDecoder has encountered so far.
|
// err is the last error resumableStreamDecoder has encountered so far.
|
||||||
err error
|
err error
|
||||||
// backoff to compute delays between retries.
|
// backoff to compute delays between retries.
|
||||||
backoff exponentialBackoff
|
backoff backoff.ExponentialBackoff
|
||||||
}
|
}
|
||||||
|
|
||||||
// newResumableStreamDecoder creates a new resumeableStreamDecoder instance.
|
// newResumableStreamDecoder creates a new resumeableStreamDecoder instance.
|
||||||
|
@ -320,7 +322,7 @@ func newResumableStreamDecoder(ctx context.Context, rpc func(ct context.Context,
|
||||||
ctx: ctx,
|
ctx: ctx,
|
||||||
rpc: rpc,
|
rpc: rpc,
|
||||||
maxBytesBetweenResumeTokens: atomic.LoadInt32(&maxBytesBetweenResumeTokens),
|
maxBytesBetweenResumeTokens: atomic.LoadInt32(&maxBytesBetweenResumeTokens),
|
||||||
backoff: defaultBackoff,
|
backoff: backoff.DefaultBackoff,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -343,7 +345,7 @@ func (d *resumableStreamDecoder) isNewResumeToken(rt []byte) bool {
|
||||||
if rt == nil {
|
if rt == nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
if bytes.Compare(rt, d.resumeToken) == 0 {
|
if bytes.Equal(rt, d.resumeToken) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
|
@ -530,8 +532,8 @@ func (d *resumableStreamDecoder) resetBackOff() {
|
||||||
|
|
||||||
// doBackoff does an exponential backoff sleep.
|
// doBackoff does an exponential backoff sleep.
|
||||||
func (d *resumableStreamDecoder) doBackOff() {
|
func (d *resumableStreamDecoder) doBackOff() {
|
||||||
delay := d.backoff.delay(d.retryCount)
|
delay := d.backoff.Delay(d.retryCount)
|
||||||
tracePrintf(d.ctx, nil, "Backing off stream read for %s", delay)
|
trace.TracePrintf(d.ctx, nil, "Backing off stream read for %s", delay)
|
||||||
ticker := time.NewTicker(delay)
|
ticker := time.NewTicker(delay)
|
||||||
defer ticker.Stop()
|
defer ticker.Stop()
|
||||||
d.retryCount++
|
d.retryCount++
|
||||||
|
|
|
@ -17,13 +17,15 @@ limitations under the License.
|
||||||
package spanner
|
package spanner
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"cloud.google.com/go/internal/trace"
|
||||||
|
"cloud.google.com/go/spanner/internal/backoff"
|
||||||
"github.com/golang/protobuf/proto"
|
"github.com/golang/protobuf/proto"
|
||||||
"github.com/golang/protobuf/ptypes"
|
"github.com/golang/protobuf/ptypes"
|
||||||
"golang.org/x/net/context"
|
|
||||||
edpb "google.golang.org/genproto/googleapis/rpc/errdetails"
|
edpb "google.golang.org/genproto/googleapis/rpc/errdetails"
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
"google.golang.org/grpc/metadata"
|
"google.golang.org/grpc/metadata"
|
||||||
|
@ -155,7 +157,7 @@ func extractRetryDelay(err error) (time.Duration, bool) {
|
||||||
// runRetryable keeps attempting to run f until one of the following happens:
|
// runRetryable keeps attempting to run f until one of the following happens:
|
||||||
// 1) f returns nil error or an unretryable error;
|
// 1) f returns nil error or an unretryable error;
|
||||||
// 2) context is cancelled or timeout.
|
// 2) context is cancelled or timeout.
|
||||||
// TODO: consider using https://github.com/googleapis/gax-go once it
|
// TODO: consider using https://github.com/googleapis/gax-go/v2 once it
|
||||||
// becomes available internally.
|
// becomes available internally.
|
||||||
func runRetryable(ctx context.Context, f func(context.Context) error) error {
|
func runRetryable(ctx context.Context, f func(context.Context) error) error {
|
||||||
return toSpannerError(runRetryableNoWrap(ctx, f))
|
return toSpannerError(runRetryableNoWrap(ctx, f))
|
||||||
|
@ -182,9 +184,9 @@ func runRetryableNoWrap(ctx context.Context, f func(context.Context) error) erro
|
||||||
// Error is retryable, do exponential backoff and continue.
|
// Error is retryable, do exponential backoff and continue.
|
||||||
b, ok := extractRetryDelay(funcErr)
|
b, ok := extractRetryDelay(funcErr)
|
||||||
if !ok {
|
if !ok {
|
||||||
b = defaultBackoff.delay(retryCount)
|
b = backoff.DefaultBackoff.Delay(retryCount)
|
||||||
}
|
}
|
||||||
tracePrintf(ctx, nil, "Backing off for %s, then retrying", b)
|
trace.TracePrintf(ctx, nil, "Backing off for %s, then retrying", b)
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return errContextCanceled(ctx, funcErr)
|
return errContextCanceled(ctx, funcErr)
|
||||||
|
|
|
@ -21,7 +21,6 @@ import (
|
||||||
"reflect"
|
"reflect"
|
||||||
|
|
||||||
proto3 "github.com/golang/protobuf/ptypes/struct"
|
proto3 "github.com/golang/protobuf/ptypes/struct"
|
||||||
|
|
||||||
sppb "google.golang.org/genproto/googleapis/spanner/v1"
|
sppb "google.golang.org/genproto/googleapis/spanner/v1"
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
)
|
)
|
||||||
|
|
|
@ -19,6 +19,7 @@ package spanner
|
||||||
import (
|
import (
|
||||||
"container/heap"
|
"container/heap"
|
||||||
"container/list"
|
"container/list"
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
|
@ -26,8 +27,7 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
"cloud.google.com/go/internal/trace"
|
||||||
|
|
||||||
sppb "google.golang.org/genproto/googleapis/spanner/v1"
|
sppb "google.golang.org/genproto/googleapis/spanner/v1"
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
"google.golang.org/grpc/metadata"
|
"google.golang.org/grpc/metadata"
|
||||||
|
@ -258,7 +258,7 @@ func (s *session) destroy(isExpire bool) bool {
|
||||||
// Unregister s from healthcheck queue.
|
// Unregister s from healthcheck queue.
|
||||||
s.pool.hc.unregister(s)
|
s.pool.hc.unregister(s)
|
||||||
// Remove s from Cloud Spanner service.
|
// Remove s from Cloud Spanner service.
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
s.delete(ctx)
|
s.delete(ctx)
|
||||||
return true
|
return true
|
||||||
|
@ -293,13 +293,15 @@ func (s *session) prepareForWrite(ctx context.Context) error {
|
||||||
type SessionPoolConfig struct {
|
type SessionPoolConfig struct {
|
||||||
// getRPCClient is the caller supplied method for getting a gRPC client to Cloud Spanner, this makes session pool able to use client pooling.
|
// getRPCClient is the caller supplied method for getting a gRPC client to Cloud Spanner, this makes session pool able to use client pooling.
|
||||||
getRPCClient func() (sppb.SpannerClient, error)
|
getRPCClient func() (sppb.SpannerClient, error)
|
||||||
// MaxOpened is the maximum number of opened sessions allowed by the
|
// MaxOpened is the maximum number of opened sessions allowed by the session
|
||||||
// session pool. Defaults to NumChannels * 100.
|
// pool. Defaults to NumChannels * 100. If the client tries to open a session and
|
||||||
|
// there are already MaxOpened sessions, it will block until one becomes
|
||||||
|
// available or the context passed to the client method is canceled or times out.
|
||||||
MaxOpened uint64
|
MaxOpened uint64
|
||||||
// MinOpened is the minimum number of opened sessions that the session pool
|
// MinOpened is the minimum number of opened sessions that the session pool
|
||||||
// tries to maintain. Session pool won't continue to expire sessions if number
|
// tries to maintain. Session pool won't continue to expire sessions if number
|
||||||
// of opened connections drops below MinOpened. However, if session is found
|
// of opened connections drops below MinOpened. However, if a session is found
|
||||||
// to be broken, it will still be evicted from session pool, therefore it is
|
// to be broken, it will still be evicted from the session pool, therefore it is
|
||||||
// posssible that the number of opened sessions drops below MinOpened.
|
// posssible that the number of opened sessions drops below MinOpened.
|
||||||
MinOpened uint64
|
MinOpened uint64
|
||||||
// MaxIdle is the maximum number of idle sessions, pool is allowed to keep. Defaults to 0.
|
// MaxIdle is the maximum number of idle sessions, pool is allowed to keep. Defaults to 0.
|
||||||
|
@ -449,7 +451,7 @@ func (p *sessionPool) shouldPrepareWrite() bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *sessionPool) createSession(ctx context.Context) (*session, error) {
|
func (p *sessionPool) createSession(ctx context.Context) (*session, error) {
|
||||||
tracePrintf(ctx, nil, "Creating a new session")
|
trace.TracePrintf(ctx, nil, "Creating a new session")
|
||||||
doneCreate := func(done bool) {
|
doneCreate := func(done bool) {
|
||||||
p.mu.Lock()
|
p.mu.Lock()
|
||||||
if !done {
|
if !done {
|
||||||
|
@ -516,7 +518,7 @@ func (p *sessionPool) isHealthy(s *session) bool {
|
||||||
// take returns a cached session if there are available ones; if there isn't any, it tries to allocate a new one.
|
// take returns a cached session if there are available ones; if there isn't any, it tries to allocate a new one.
|
||||||
// Session returned by take should be used for read operations.
|
// Session returned by take should be used for read operations.
|
||||||
func (p *sessionPool) take(ctx context.Context) (*sessionHandle, error) {
|
func (p *sessionPool) take(ctx context.Context) (*sessionHandle, error) {
|
||||||
tracePrintf(ctx, nil, "Acquiring a read-only session")
|
trace.TracePrintf(ctx, nil, "Acquiring a read-only session")
|
||||||
ctx = contextWithOutgoingMetadata(ctx, p.md)
|
ctx = contextWithOutgoingMetadata(ctx, p.md)
|
||||||
for {
|
for {
|
||||||
var (
|
var (
|
||||||
|
@ -532,11 +534,11 @@ func (p *sessionPool) take(ctx context.Context) (*sessionHandle, error) {
|
||||||
if p.idleList.Len() > 0 {
|
if p.idleList.Len() > 0 {
|
||||||
// Idle sessions are available, get one from the top of the idle list.
|
// Idle sessions are available, get one from the top of the idle list.
|
||||||
s = p.idleList.Remove(p.idleList.Front()).(*session)
|
s = p.idleList.Remove(p.idleList.Front()).(*session)
|
||||||
tracePrintf(ctx, map[string]interface{}{"sessionID": s.getID()},
|
trace.TracePrintf(ctx, map[string]interface{}{"sessionID": s.getID()},
|
||||||
"Acquired read-only session")
|
"Acquired read-only session")
|
||||||
} else if p.idleWriteList.Len() > 0 {
|
} else if p.idleWriteList.Len() > 0 {
|
||||||
s = p.idleWriteList.Remove(p.idleWriteList.Front()).(*session)
|
s = p.idleWriteList.Remove(p.idleWriteList.Front()).(*session)
|
||||||
tracePrintf(ctx, map[string]interface{}{"sessionID": s.getID()},
|
trace.TracePrintf(ctx, map[string]interface{}{"sessionID": s.getID()},
|
||||||
"Acquired read-write session")
|
"Acquired read-write session")
|
||||||
}
|
}
|
||||||
if s != nil {
|
if s != nil {
|
||||||
|
@ -554,10 +556,10 @@ func (p *sessionPool) take(ctx context.Context) (*sessionHandle, error) {
|
||||||
if (p.MaxOpened > 0 && p.numOpened >= p.MaxOpened) || (p.MaxBurst > 0 && p.createReqs >= p.MaxBurst) {
|
if (p.MaxOpened > 0 && p.numOpened >= p.MaxOpened) || (p.MaxBurst > 0 && p.createReqs >= p.MaxBurst) {
|
||||||
mayGetSession := p.mayGetSession
|
mayGetSession := p.mayGetSession
|
||||||
p.mu.Unlock()
|
p.mu.Unlock()
|
||||||
tracePrintf(ctx, nil, "Waiting for read-only session to become available")
|
trace.TracePrintf(ctx, nil, "Waiting for read-only session to become available")
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
tracePrintf(ctx, nil, "Context done waiting for session")
|
trace.TracePrintf(ctx, nil, "Context done waiting for session")
|
||||||
return nil, errGetSessionTimeout()
|
return nil, errGetSessionTimeout()
|
||||||
case <-mayGetSession:
|
case <-mayGetSession:
|
||||||
}
|
}
|
||||||
|
@ -569,10 +571,10 @@ func (p *sessionPool) take(ctx context.Context) (*sessionHandle, error) {
|
||||||
p.createReqs++
|
p.createReqs++
|
||||||
p.mu.Unlock()
|
p.mu.Unlock()
|
||||||
if s, err = p.createSession(ctx); err != nil {
|
if s, err = p.createSession(ctx); err != nil {
|
||||||
tracePrintf(ctx, nil, "Error creating session: %v", err)
|
trace.TracePrintf(ctx, nil, "Error creating session: %v", err)
|
||||||
return nil, toSpannerError(err)
|
return nil, toSpannerError(err)
|
||||||
}
|
}
|
||||||
tracePrintf(ctx, map[string]interface{}{"sessionID": s.getID()},
|
trace.TracePrintf(ctx, map[string]interface{}{"sessionID": s.getID()},
|
||||||
"Created session")
|
"Created session")
|
||||||
return &sessionHandle{session: s}, nil
|
return &sessionHandle{session: s}, nil
|
||||||
}
|
}
|
||||||
|
@ -581,7 +583,7 @@ func (p *sessionPool) take(ctx context.Context) (*sessionHandle, error) {
|
||||||
// takeWriteSession returns a write prepared cached session if there are available ones; if there isn't any, it tries to allocate a new one.
|
// takeWriteSession returns a write prepared cached session if there are available ones; if there isn't any, it tries to allocate a new one.
|
||||||
// Session returned should be used for read write transactions.
|
// Session returned should be used for read write transactions.
|
||||||
func (p *sessionPool) takeWriteSession(ctx context.Context) (*sessionHandle, error) {
|
func (p *sessionPool) takeWriteSession(ctx context.Context) (*sessionHandle, error) {
|
||||||
tracePrintf(ctx, nil, "Acquiring a read-write session")
|
trace.TracePrintf(ctx, nil, "Acquiring a read-write session")
|
||||||
ctx = contextWithOutgoingMetadata(ctx, p.md)
|
ctx = contextWithOutgoingMetadata(ctx, p.md)
|
||||||
for {
|
for {
|
||||||
var (
|
var (
|
||||||
|
@ -597,10 +599,10 @@ func (p *sessionPool) takeWriteSession(ctx context.Context) (*sessionHandle, err
|
||||||
if p.idleWriteList.Len() > 0 {
|
if p.idleWriteList.Len() > 0 {
|
||||||
// Idle sessions are available, get one from the top of the idle list.
|
// Idle sessions are available, get one from the top of the idle list.
|
||||||
s = p.idleWriteList.Remove(p.idleWriteList.Front()).(*session)
|
s = p.idleWriteList.Remove(p.idleWriteList.Front()).(*session)
|
||||||
tracePrintf(ctx, map[string]interface{}{"sessionID": s.getID()}, "Acquired read-write session")
|
trace.TracePrintf(ctx, map[string]interface{}{"sessionID": s.getID()}, "Acquired read-write session")
|
||||||
} else if p.idleList.Len() > 0 {
|
} else if p.idleList.Len() > 0 {
|
||||||
s = p.idleList.Remove(p.idleList.Front()).(*session)
|
s = p.idleList.Remove(p.idleList.Front()).(*session)
|
||||||
tracePrintf(ctx, map[string]interface{}{"sessionID": s.getID()}, "Acquired read-only session")
|
trace.TracePrintf(ctx, map[string]interface{}{"sessionID": s.getID()}, "Acquired read-only session")
|
||||||
}
|
}
|
||||||
if s != nil {
|
if s != nil {
|
||||||
s.setIdleList(nil)
|
s.setIdleList(nil)
|
||||||
|
@ -616,10 +618,10 @@ func (p *sessionPool) takeWriteSession(ctx context.Context) (*sessionHandle, err
|
||||||
if (p.MaxOpened > 0 && p.numOpened >= p.MaxOpened) || (p.MaxBurst > 0 && p.createReqs >= p.MaxBurst) {
|
if (p.MaxOpened > 0 && p.numOpened >= p.MaxOpened) || (p.MaxBurst > 0 && p.createReqs >= p.MaxBurst) {
|
||||||
mayGetSession := p.mayGetSession
|
mayGetSession := p.mayGetSession
|
||||||
p.mu.Unlock()
|
p.mu.Unlock()
|
||||||
tracePrintf(ctx, nil, "Waiting for read-write session to become available")
|
trace.TracePrintf(ctx, nil, "Waiting for read-write session to become available")
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
tracePrintf(ctx, nil, "Context done waiting for session")
|
trace.TracePrintf(ctx, nil, "Context done waiting for session")
|
||||||
return nil, errGetSessionTimeout()
|
return nil, errGetSessionTimeout()
|
||||||
case <-mayGetSession:
|
case <-mayGetSession:
|
||||||
}
|
}
|
||||||
|
@ -632,16 +634,16 @@ func (p *sessionPool) takeWriteSession(ctx context.Context) (*sessionHandle, err
|
||||||
p.createReqs++
|
p.createReqs++
|
||||||
p.mu.Unlock()
|
p.mu.Unlock()
|
||||||
if s, err = p.createSession(ctx); err != nil {
|
if s, err = p.createSession(ctx); err != nil {
|
||||||
tracePrintf(ctx, nil, "Error creating session: %v", err)
|
trace.TracePrintf(ctx, nil, "Error creating session: %v", err)
|
||||||
return nil, toSpannerError(err)
|
return nil, toSpannerError(err)
|
||||||
}
|
}
|
||||||
tracePrintf(ctx, map[string]interface{}{"sessionID": s.getID()},
|
trace.TracePrintf(ctx, map[string]interface{}{"sessionID": s.getID()},
|
||||||
"Created session")
|
"Created session")
|
||||||
}
|
}
|
||||||
if !s.isWritePrepared() {
|
if !s.isWritePrepared() {
|
||||||
if err = s.prepareForWrite(ctx); err != nil {
|
if err = s.prepareForWrite(ctx); err != nil {
|
||||||
s.recycle()
|
s.recycle()
|
||||||
tracePrintf(ctx, map[string]interface{}{"sessionID": s.getID()},
|
trace.TracePrintf(ctx, map[string]interface{}{"sessionID": s.getID()},
|
||||||
"Error preparing session for write")
|
"Error preparing session for write")
|
||||||
return nil, toSpannerError(err)
|
return nil, toSpannerError(err)
|
||||||
}
|
}
|
||||||
|
@ -759,6 +761,7 @@ type healthChecker struct {
|
||||||
done chan struct{}
|
done chan struct{}
|
||||||
// once is used for closing channel done only once.
|
// once is used for closing channel done only once.
|
||||||
once sync.Once
|
once sync.Once
|
||||||
|
maintainerCancel func()
|
||||||
}
|
}
|
||||||
|
|
||||||
// newHealthChecker initializes new instance of healthChecker.
|
// newHealthChecker initializes new instance of healthChecker.
|
||||||
|
@ -773,6 +776,7 @@ func newHealthChecker(interval time.Duration, workers int, sampleInterval time.D
|
||||||
sampleInterval: sampleInterval,
|
sampleInterval: sampleInterval,
|
||||||
ready: make(chan struct{}),
|
ready: make(chan struct{}),
|
||||||
done: make(chan struct{}),
|
done: make(chan struct{}),
|
||||||
|
maintainerCancel: func() {},
|
||||||
}
|
}
|
||||||
hc.waitWorkers.Add(1)
|
hc.waitWorkers.Add(1)
|
||||||
go hc.maintainer()
|
go hc.maintainer()
|
||||||
|
@ -785,6 +789,9 @@ func newHealthChecker(interval time.Duration, workers int, sampleInterval time.D
|
||||||
|
|
||||||
// close closes the healthChecker and waits for all healthcheck workers to exit.
|
// close closes the healthChecker and waits for all healthcheck workers to exit.
|
||||||
func (hc *healthChecker) close() {
|
func (hc *healthChecker) close() {
|
||||||
|
hc.mu.Lock()
|
||||||
|
hc.maintainerCancel()
|
||||||
|
hc.mu.Unlock()
|
||||||
hc.once.Do(func() { close(hc.done) })
|
hc.once.Do(func() { close(hc.done) })
|
||||||
hc.waitWorkers.Wait()
|
hc.waitWorkers.Wait()
|
||||||
}
|
}
|
||||||
|
@ -940,9 +947,7 @@ func (hc *healthChecker) worker(i int) {
|
||||||
}
|
}
|
||||||
select {
|
select {
|
||||||
case <-time.After(time.Duration(rand.Int63n(pause) + pause/2)):
|
case <-time.After(time.Duration(rand.Int63n(pause) + pause/2)):
|
||||||
break
|
|
||||||
case <-hc.done:
|
case <-hc.done:
|
||||||
break
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -961,18 +966,56 @@ func (hc *healthChecker) maintainer() {
|
||||||
var (
|
var (
|
||||||
windowSize uint64 = 10
|
windowSize uint64 = 10
|
||||||
iteration uint64
|
iteration uint64
|
||||||
timeout <-chan time.Time
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// replenishPool is run if numOpened is less than sessionsToKeep, timeouts on sampleInterval.
|
|
||||||
replenishPool := func(sessionsToKeep uint64) {
|
|
||||||
ctx, _ := context.WithTimeout(context.Background(), hc.sampleInterval)
|
|
||||||
for {
|
for {
|
||||||
select {
|
if hc.isClosing() {
|
||||||
case <-timeout:
|
hc.waitWorkers.Done()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// maxSessionsInUse is the maximum number of sessions in use concurrently over a period of time.
|
||||||
|
var maxSessionsInUse uint64
|
||||||
|
|
||||||
|
// Updates metrics.
|
||||||
|
hc.pool.mu.Lock()
|
||||||
|
currSessionsInUse := hc.pool.numOpened - uint64(hc.pool.idleList.Len()) - uint64(hc.pool.idleWriteList.Len())
|
||||||
|
currSessionsOpened := hc.pool.numOpened
|
||||||
|
hc.pool.mu.Unlock()
|
||||||
|
|
||||||
|
hc.mu.Lock()
|
||||||
|
if iteration%windowSize == 0 || maxSessionsInUse < currSessionsInUse {
|
||||||
|
maxSessionsInUse = currSessionsInUse
|
||||||
|
}
|
||||||
|
sessionsToKeep := maxUint64(hc.pool.MinOpened,
|
||||||
|
minUint64(currSessionsOpened, hc.pool.MaxIdle+maxSessionsInUse))
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), hc.sampleInterval)
|
||||||
|
hc.maintainerCancel = cancel
|
||||||
|
hc.mu.Unlock()
|
||||||
|
|
||||||
|
// Replenish or Shrink pool if needed.
|
||||||
|
// Note: we don't need to worry about pending create session requests, we only need to sample the current sessions in use.
|
||||||
|
// the routines will not try to create extra / delete creating sessions.
|
||||||
|
if sessionsToKeep > currSessionsOpened {
|
||||||
|
hc.replenishPool(ctx, sessionsToKeep)
|
||||||
|
} else {
|
||||||
|
hc.shrinkPool(ctx, sessionsToKeep)
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
case <-hc.done:
|
||||||
|
cancel()
|
||||||
|
}
|
||||||
|
iteration++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// replenishPool is run if numOpened is less than sessionsToKeep, timeouts on sampleInterval.
|
||||||
|
func (hc *healthChecker) replenishPool(ctx context.Context, sessionsToKeep uint64) {
|
||||||
|
for {
|
||||||
|
if ctx.Err() != nil {
|
||||||
return
|
return
|
||||||
default:
|
|
||||||
break
|
|
||||||
}
|
}
|
||||||
|
|
||||||
p := hc.pool
|
p := hc.pool
|
||||||
|
@ -1007,13 +1050,10 @@ func (hc *healthChecker) maintainer() {
|
||||||
}
|
}
|
||||||
|
|
||||||
// shrinkPool, scales down the session pool.
|
// shrinkPool, scales down the session pool.
|
||||||
shrinkPool := func(sessionsToKeep uint64) {
|
func (hc *healthChecker) shrinkPool(ctx context.Context, sessionsToKeep uint64) {
|
||||||
for {
|
for {
|
||||||
select {
|
if ctx.Err() != nil {
|
||||||
case <-timeout:
|
|
||||||
return
|
return
|
||||||
default:
|
|
||||||
break
|
|
||||||
}
|
}
|
||||||
|
|
||||||
p := hc.pool
|
p := hc.pool
|
||||||
|
@ -1040,49 +1080,6 @@ func (hc *healthChecker) maintainer() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for {
|
|
||||||
if hc.isClosing() {
|
|
||||||
hc.waitWorkers.Done()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// maxSessionsInUse is the maximum number of sessions in use concurrently over a period of time.
|
|
||||||
var maxSessionsInUse uint64
|
|
||||||
|
|
||||||
// Updates metrics.
|
|
||||||
hc.pool.mu.Lock()
|
|
||||||
currSessionsInUse := hc.pool.numOpened - uint64(hc.pool.idleList.Len()) - uint64(hc.pool.idleWriteList.Len())
|
|
||||||
currSessionsOpened := hc.pool.numOpened
|
|
||||||
hc.pool.mu.Unlock()
|
|
||||||
|
|
||||||
hc.mu.Lock()
|
|
||||||
if iteration%windowSize == 0 || maxSessionsInUse < currSessionsInUse {
|
|
||||||
maxSessionsInUse = currSessionsInUse
|
|
||||||
}
|
|
||||||
sessionsToKeep := maxUint64(hc.pool.MinOpened,
|
|
||||||
minUint64(currSessionsOpened, hc.pool.MaxIdle+maxSessionsInUse))
|
|
||||||
hc.mu.Unlock()
|
|
||||||
|
|
||||||
timeout = time.After(hc.sampleInterval)
|
|
||||||
// Replenish or Shrink pool if needed.
|
|
||||||
// Note: we don't need to worry about pending create session requests, we only need to sample the current sessions in use.
|
|
||||||
// the routines will not try to create extra / delete creating sessions.
|
|
||||||
if sessionsToKeep > currSessionsOpened {
|
|
||||||
replenishPool(sessionsToKeep)
|
|
||||||
} else {
|
|
||||||
shrinkPool(sessionsToKeep)
|
|
||||||
}
|
|
||||||
|
|
||||||
select {
|
|
||||||
case <-timeout:
|
|
||||||
break
|
|
||||||
case <-hc.done:
|
|
||||||
break
|
|
||||||
}
|
|
||||||
iteration++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// shouldDropSession returns true if a particular error leads to the removal of a session
|
// shouldDropSession returns true if a particular error leads to the removal of a session
|
||||||
func shouldDropSession(err error) bool {
|
func shouldDropSession(err error) bool {
|
||||||
if err == nil {
|
if err == nil {
|
||||||
|
@ -1091,8 +1088,24 @@ func shouldDropSession(err error) bool {
|
||||||
// If a Cloud Spanner can no longer locate the session (for example, if session is garbage collected), then caller
|
// If a Cloud Spanner can no longer locate the session (for example, if session is garbage collected), then caller
|
||||||
// should not try to return the session back into the session pool.
|
// should not try to return the session back into the session pool.
|
||||||
// TODO: once gRPC can return auxiliary error information, stop parsing the error message.
|
// TODO: once gRPC can return auxiliary error information, stop parsing the error message.
|
||||||
if ErrCode(err) == codes.NotFound && strings.Contains(ErrDesc(err), "Session not found:") {
|
if ErrCode(err) == codes.NotFound && strings.Contains(ErrDesc(err), "Session not found") {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// maxUint64 returns the maximum of two uint64
|
||||||
|
func maxUint64(a, b uint64) uint64 {
|
||||||
|
if a > b {
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
// minUint64 returns the minimum of two uint64
|
||||||
|
func minUint64(a, b uint64) uint64 {
|
||||||
|
if a > b {
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
|
|
@ -21,7 +21,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
proto3 "github.com/golang/protobuf/ptypes/struct"
|
proto3 "github.com/golang/protobuf/ptypes/struct"
|
||||||
|
structpb "github.com/golang/protobuf/ptypes/struct"
|
||||||
sppb "google.golang.org/genproto/googleapis/spanner/v1"
|
sppb "google.golang.org/genproto/googleapis/spanner/v1"
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
)
|
)
|
||||||
|
@ -48,6 +48,36 @@ func NewStatement(sql string) Statement {
|
||||||
return Statement{SQL: sql, Params: map[string]interface{}{}}
|
return Statement{SQL: sql, Params: map[string]interface{}{}}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
errNilParam = errors.New("use T(nil), not nil")
|
||||||
|
errNoType = errors.New("no type information")
|
||||||
|
)
|
||||||
|
|
||||||
|
// convertParams converts a statement's parameters into proto Param and
|
||||||
|
// ParamTypes.
|
||||||
|
func (s *Statement) convertParams() (*structpb.Struct, map[string]*sppb.Type, error) {
|
||||||
|
params := &proto3.Struct{
|
||||||
|
Fields: map[string]*proto3.Value{},
|
||||||
|
}
|
||||||
|
paramTypes := map[string]*sppb.Type{}
|
||||||
|
for k, v := range s.Params {
|
||||||
|
if v == nil {
|
||||||
|
return nil, nil, errBindParam(k, v, errNilParam)
|
||||||
|
}
|
||||||
|
val, t, err := encodeValue(v)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, errBindParam(k, v, err)
|
||||||
|
}
|
||||||
|
if t == nil { // should not happen, because of nil check above
|
||||||
|
return nil, nil, errBindParam(k, v, errNoType)
|
||||||
|
}
|
||||||
|
params.Fields[k] = val
|
||||||
|
paramTypes[k] = t
|
||||||
|
}
|
||||||
|
|
||||||
|
return params, paramTypes, nil
|
||||||
|
}
|
||||||
|
|
||||||
// errBindParam returns error for not being able to bind parameter to query request.
|
// errBindParam returns error for not being able to bind parameter to query request.
|
||||||
func errBindParam(k string, v interface{}, err error) error {
|
func errBindParam(k string, v interface{}, err error) error {
|
||||||
if err == nil {
|
if err == nil {
|
||||||
|
@ -60,42 +90,3 @@ func errBindParam(k string, v interface{}, err error) error {
|
||||||
se.decorate(fmt.Sprintf("failed to bind query parameter(name: %q, value: %v)", k, v))
|
se.decorate(fmt.Sprintf("failed to bind query parameter(name: %q, value: %v)", k, v))
|
||||||
return se
|
return se
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
|
||||||
errNilParam = errors.New("use T(nil), not nil")
|
|
||||||
errNoType = errors.New("no type information")
|
|
||||||
)
|
|
||||||
|
|
||||||
// bindParams binds parameters in a Statement to a sppb.ExecuteSqlRequest or sppb.PartitionQueryRequest.
|
|
||||||
func (s *Statement) bindParams(i interface{}) error {
|
|
||||||
params := &proto3.Struct{
|
|
||||||
Fields: map[string]*proto3.Value{},
|
|
||||||
}
|
|
||||||
paramTypes := map[string]*sppb.Type{}
|
|
||||||
for k, v := range s.Params {
|
|
||||||
if v == nil {
|
|
||||||
return errBindParam(k, v, errNilParam)
|
|
||||||
}
|
|
||||||
val, t, err := encodeValue(v)
|
|
||||||
if err != nil {
|
|
||||||
return errBindParam(k, v, err)
|
|
||||||
}
|
|
||||||
if t == nil { // should not happen, because of nil check above
|
|
||||||
return errBindParam(k, v, errNoType)
|
|
||||||
}
|
|
||||||
params.Fields[k] = val
|
|
||||||
paramTypes[k] = t
|
|
||||||
}
|
|
||||||
|
|
||||||
switch r := i.(type) {
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("failed to bind query parameter, unexpected request type: %v", r)
|
|
||||||
case *sppb.ExecuteSqlRequest:
|
|
||||||
r.Params = params
|
|
||||||
r.ParamTypes = paramTypes
|
|
||||||
case *sppb.PartitionQueryRequest:
|
|
||||||
r.Params = params
|
|
||||||
r.ParamTypes = paramTypes
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
41
vendor/cloud.google.com/go/spanner/go18.go → vendor/cloud.google.com/go/spanner/stats.go
generated
vendored
41
vendor/cloud.google.com/go/spanner/go18.go → vendor/cloud.google.com/go/spanner/stats.go
generated
vendored
|
@ -12,54 +12,15 @@
|
||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
// +build go1.8
|
|
||||||
|
|
||||||
package spanner
|
package spanner
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"context"
|
||||||
|
|
||||||
"go.opencensus.io/stats"
|
"go.opencensus.io/stats"
|
||||||
"go.opencensus.io/stats/view"
|
"go.opencensus.io/stats/view"
|
||||||
"go.opencensus.io/trace"
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func traceStartSpan(ctx context.Context, name string) context.Context {
|
|
||||||
ctx, _ = trace.StartSpan(ctx, name)
|
|
||||||
return ctx
|
|
||||||
}
|
|
||||||
|
|
||||||
func traceEndSpan(ctx context.Context, err error) {
|
|
||||||
span := trace.FromContext(ctx)
|
|
||||||
if err != nil {
|
|
||||||
// TODO(jba): Add error code to the status.
|
|
||||||
span.SetStatus(trace.Status{Message: err.Error()})
|
|
||||||
}
|
|
||||||
span.End()
|
|
||||||
}
|
|
||||||
|
|
||||||
func tracePrintf(ctx context.Context, attrMap map[string]interface{}, format string, args ...interface{}) {
|
|
||||||
var attrs []trace.Attribute
|
|
||||||
for k, v := range attrMap {
|
|
||||||
var a trace.Attribute
|
|
||||||
switch v := v.(type) {
|
|
||||||
case string:
|
|
||||||
a = trace.StringAttribute(k, v)
|
|
||||||
case bool:
|
|
||||||
a = trace.BoolAttribute(k, v)
|
|
||||||
case int:
|
|
||||||
a = trace.Int64Attribute(k, int64(v))
|
|
||||||
case int64:
|
|
||||||
a = trace.Int64Attribute(k, v)
|
|
||||||
default:
|
|
||||||
a = trace.StringAttribute(k, fmt.Sprintf("%#v", v))
|
|
||||||
}
|
|
||||||
attrs = append(attrs, a)
|
|
||||||
}
|
|
||||||
trace.FromContext(ctx).Annotatef(attrs, format, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
const statsPrefix = "cloud.google.com/go/spanner/"
|
const statsPrefix = "cloud.google.com/go/spanner/"
|
||||||
|
|
||||||
func recordStat(ctx context.Context, m *stats.Int64Measure, n int64) {
|
func recordStat(ctx context.Context, m *stats.Int64Measure, n int64) {
|
|
@ -17,12 +17,12 @@ limitations under the License.
|
||||||
package spanner
|
package spanner
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
"cloud.google.com/go/internal/trace"
|
||||||
|
|
||||||
"google.golang.org/api/iterator"
|
"google.golang.org/api/iterator"
|
||||||
sppb "google.golang.org/genproto/googleapis/spanner/v1"
|
sppb "google.golang.org/genproto/googleapis/spanner/v1"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
|
@ -81,8 +81,8 @@ type ReadOptions struct {
|
||||||
// ReadWithOptions returns a RowIterator for reading multiple rows from the database.
|
// ReadWithOptions returns a RowIterator for reading multiple rows from the database.
|
||||||
// Pass a ReadOptions to modify the read operation.
|
// Pass a ReadOptions to modify the read operation.
|
||||||
func (t *txReadOnly) ReadWithOptions(ctx context.Context, table string, keys KeySet, columns []string, opts *ReadOptions) (ri *RowIterator) {
|
func (t *txReadOnly) ReadWithOptions(ctx context.Context, table string, keys KeySet, columns []string, opts *ReadOptions) (ri *RowIterator) {
|
||||||
ctx = traceStartSpan(ctx, "cloud.google.com/go/spanner.Read")
|
ctx = trace.StartSpan(ctx, "cloud.google.com/go/spanner.Read")
|
||||||
defer func() { traceEndSpan(ctx, ri.err) }()
|
defer func() { trace.EndSpan(ctx, ri.err) }()
|
||||||
var (
|
var (
|
||||||
sh *sessionHandle
|
sh *sessionHandle
|
||||||
ts *sppb.TransactionSelector
|
ts *sppb.TransactionSelector
|
||||||
|
@ -189,9 +189,9 @@ func (t *txReadOnly) AnalyzeQuery(ctx context.Context, statement Statement) (*sp
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *txReadOnly) query(ctx context.Context, statement Statement, mode sppb.ExecuteSqlRequest_QueryMode) (ri *RowIterator) {
|
func (t *txReadOnly) query(ctx context.Context, statement Statement, mode sppb.ExecuteSqlRequest_QueryMode) (ri *RowIterator) {
|
||||||
ctx = traceStartSpan(ctx, "cloud.google.com/go/spanner.Query")
|
ctx = trace.StartSpan(ctx, "cloud.google.com/go/spanner.Query")
|
||||||
defer func() { traceEndSpan(ctx, ri.err) }()
|
defer func() { trace.EndSpan(ctx, ri.err) }()
|
||||||
req, sh, err := t.prepareExecuteSql(ctx, statement, mode)
|
req, sh, err := t.prepareExecuteSQL(ctx, statement, mode)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return &RowIterator{err: err}
|
return &RowIterator{err: err}
|
||||||
}
|
}
|
||||||
|
@ -206,8 +206,7 @@ func (t *txReadOnly) query(ctx context.Context, statement Statement, mode sppb.E
|
||||||
t.release)
|
t.release)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *txReadOnly) prepareExecuteSql(ctx context.Context, stmt Statement, mode sppb.ExecuteSqlRequest_QueryMode) (
|
func (t *txReadOnly) prepareExecuteSQL(ctx context.Context, stmt Statement, mode sppb.ExecuteSqlRequest_QueryMode) (*sppb.ExecuteSqlRequest, *sessionHandle, error) {
|
||||||
*sppb.ExecuteSqlRequest, *sessionHandle, error) {
|
|
||||||
sh, ts, err := t.acquire(ctx)
|
sh, ts, err := t.acquire(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
|
@ -218,15 +217,18 @@ func (t *txReadOnly) prepareExecuteSql(ctx context.Context, stmt Statement, mode
|
||||||
// Might happen if transaction is closed in the middle of a API call.
|
// Might happen if transaction is closed in the middle of a API call.
|
||||||
return nil, nil, errSessionClosed(sh)
|
return nil, nil, errSessionClosed(sh)
|
||||||
}
|
}
|
||||||
|
params, paramTypes, err := stmt.convertParams()
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
req := &sppb.ExecuteSqlRequest{
|
req := &sppb.ExecuteSqlRequest{
|
||||||
Session: sid,
|
Session: sid,
|
||||||
Transaction: ts,
|
Transaction: ts,
|
||||||
Sql: stmt.SQL,
|
Sql: stmt.SQL,
|
||||||
QueryMode: mode,
|
QueryMode: mode,
|
||||||
Seqno: atomic.AddInt64(&t.sequenceNumber, 1),
|
Seqno: atomic.AddInt64(&t.sequenceNumber, 1),
|
||||||
}
|
Params: params,
|
||||||
if err := stmt.bindParams(req); err != nil {
|
ParamTypes: paramTypes,
|
||||||
return nil, nil, err
|
|
||||||
}
|
}
|
||||||
return req, sh, nil
|
return req, sh, nil
|
||||||
}
|
}
|
||||||
|
@ -250,11 +252,6 @@ func errRtsUnavailable() error {
|
||||||
return spannerErrorf(codes.Internal, "read timestamp is unavailable")
|
return spannerErrorf(codes.Internal, "read timestamp is unavailable")
|
||||||
}
|
}
|
||||||
|
|
||||||
// errTxNotInitialized returns error for using an uninitialized transaction.
|
|
||||||
func errTxNotInitialized() error {
|
|
||||||
return spannerErrorf(codes.InvalidArgument, "cannot use a uninitialized transaction")
|
|
||||||
}
|
|
||||||
|
|
||||||
// errTxClosed returns error for using a closed transaction.
|
// errTxClosed returns error for using a closed transaction.
|
||||||
func errTxClosed() error {
|
func errTxClosed() error {
|
||||||
return spannerErrorf(codes.InvalidArgument, "cannot use a closed transaction")
|
return spannerErrorf(codes.InvalidArgument, "cannot use a closed transaction")
|
||||||
|
@ -561,9 +558,9 @@ func (t *ReadOnlyTransaction) WithTimestampBound(tb TimestampBound) *ReadOnlyTra
|
||||||
// ReadWriteTransaction provides a locking read-write transaction.
|
// ReadWriteTransaction provides a locking read-write transaction.
|
||||||
//
|
//
|
||||||
// This type of transaction is the only way to write data into Cloud Spanner;
|
// This type of transaction is the only way to write data into Cloud Spanner;
|
||||||
// (*Client).Apply and (*Client).ApplyAtLeastOnce use transactions
|
// (*Client).Apply, (*Client).ApplyAtLeastOnce, (*Client).PartitionedUpdate use
|
||||||
// internally. These transactions rely on pessimistic locking and, if
|
// transactions internally. These transactions rely on pessimistic locking and,
|
||||||
// necessary, two-phase commit. Locking read-write transactions may abort,
|
// if necessary, two-phase commit. Locking read-write transactions may abort,
|
||||||
// requiring the application to retry. However, the interface exposed by
|
// requiring the application to retry. However, the interface exposed by
|
||||||
// (*Client).ReadWriteTransaction eliminates the need for applications to write
|
// (*Client).ReadWriteTransaction eliminates the need for applications to write
|
||||||
// retry loops explicitly.
|
// retry loops explicitly.
|
||||||
|
@ -663,9 +660,9 @@ func (t *ReadWriteTransaction) BufferWrite(ms []*Mutation) error {
|
||||||
// Update returns an error if the statement is a query. However, the
|
// Update returns an error if the statement is a query. However, the
|
||||||
// query is executed, and any data read will be validated upon commit.
|
// query is executed, and any data read will be validated upon commit.
|
||||||
func (t *ReadWriteTransaction) Update(ctx context.Context, stmt Statement) (rowCount int64, err error) {
|
func (t *ReadWriteTransaction) Update(ctx context.Context, stmt Statement) (rowCount int64, err error) {
|
||||||
ctx = traceStartSpan(ctx, "cloud.google.com/go/spanner.Update")
|
ctx = trace.StartSpan(ctx, "cloud.google.com/go/spanner.Update")
|
||||||
defer func() { traceEndSpan(ctx, err) }()
|
defer func() { trace.EndSpan(ctx, err) }()
|
||||||
req, sh, err := t.prepareExecuteSql(ctx, stmt, sppb.ExecuteSqlRequest_NORMAL)
|
req, sh, err := t.prepareExecuteSQL(ctx, stmt, sppb.ExecuteSqlRequest_NORMAL)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
@ -679,6 +676,64 @@ func (t *ReadWriteTransaction) Update(ctx context.Context, stmt Statement) (rowC
|
||||||
return extractRowCount(resultSet.Stats)
|
return extractRowCount(resultSet.Stats)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// BatchUpdate groups one or more DML statements and sends them to Spanner in a
|
||||||
|
// single RPC. This is an efficient way to execute multiple DML statements.
|
||||||
|
//
|
||||||
|
// A slice of counts is returned, where each count represents the number of
|
||||||
|
// affected rows for the given query at the same index. If an error occurs,
|
||||||
|
// counts will be returned up to the query that encountered the error.
|
||||||
|
func (t *ReadWriteTransaction) BatchUpdate(ctx context.Context, stmts []Statement) (_ []int64, err error) {
|
||||||
|
ctx = trace.StartSpan(ctx, "cloud.google.com/go/spanner.BatchUpdate")
|
||||||
|
defer func() { trace.EndSpan(ctx, err) }()
|
||||||
|
|
||||||
|
sh, ts, err := t.acquire(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// Cloud Spanner will return "Session not found" on bad sessions.
|
||||||
|
sid := sh.getID()
|
||||||
|
if sid == "" {
|
||||||
|
// Might happen if transaction is closed in the middle of a API call.
|
||||||
|
return nil, errSessionClosed(sh)
|
||||||
|
}
|
||||||
|
|
||||||
|
var sppbStmts []*sppb.ExecuteBatchDmlRequest_Statement
|
||||||
|
for _, st := range stmts {
|
||||||
|
params, paramTypes, err := st.convertParams()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
sppbStmts = append(sppbStmts, &sppb.ExecuteBatchDmlRequest_Statement{
|
||||||
|
Sql: st.SQL,
|
||||||
|
Params: params,
|
||||||
|
ParamTypes: paramTypes,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := sh.getClient().ExecuteBatchDml(ctx, &sppb.ExecuteBatchDmlRequest{
|
||||||
|
Session: sh.getID(),
|
||||||
|
Transaction: ts,
|
||||||
|
Statements: sppbStmts,
|
||||||
|
Seqno: atomic.AddInt64(&t.sequenceNumber, 1),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var counts []int64
|
||||||
|
for _, rs := range resp.ResultSets {
|
||||||
|
count, err := extractRowCount(rs.Stats)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
counts = append(counts, count)
|
||||||
|
}
|
||||||
|
if resp.Status.Code != 0 {
|
||||||
|
return counts, spannerErrorf(codes.Code(uint32(resp.Status.Code)), resp.Status.Message)
|
||||||
|
}
|
||||||
|
return counts, nil
|
||||||
|
}
|
||||||
|
|
||||||
// acquire implements txReadEnv.acquire.
|
// acquire implements txReadEnv.acquire.
|
||||||
func (t *ReadWriteTransaction) acquire(ctx context.Context) (*sessionHandle, *sppb.TransactionSelector, error) {
|
func (t *ReadWriteTransaction) acquire(ctx context.Context) (*sessionHandle, *sppb.TransactionSelector, error) {
|
||||||
ts := &sppb.TransactionSelector{
|
ts := &sppb.TransactionSelector{
|
||||||
|
@ -807,7 +862,6 @@ func (t *ReadWriteTransaction) rollback(ctx context.Context) {
|
||||||
if shouldDropSession(err) {
|
if shouldDropSession(err) {
|
||||||
t.sh.destroy()
|
t.sh.destroy()
|
||||||
}
|
}
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// runInTransaction executes f under a read-write transaction context.
|
// runInTransaction executes f under a read-write transaction context.
|
||||||
|
|
|
@ -41,7 +41,7 @@ var (
|
||||||
// InsertStruct or InsertMap. See ExampleCommitTimestamp.
|
// InsertStruct or InsertMap. See ExampleCommitTimestamp.
|
||||||
// This is just a placeholder and the actual value stored in this
|
// This is just a placeholder and the actual value stored in this
|
||||||
// variable has no meaning.
|
// variable has no meaning.
|
||||||
CommitTimestamp time.Time = commitTimestamp
|
CommitTimestamp = commitTimestamp
|
||||||
commitTimestamp = time.Unix(0, 0).In(time.FixedZone("CommitTimestamp placeholder", 0xDB))
|
commitTimestamp = time.Unix(0, 0).In(time.FixedZone("CommitTimestamp placeholder", 0xDB))
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -129,7 +129,7 @@ type NullTime struct {
|
||||||
// String implements Stringer.String for NullTime
|
// String implements Stringer.String for NullTime
|
||||||
func (n NullTime) String() string {
|
func (n NullTime) String() string {
|
||||||
if !n.Valid {
|
if !n.Valid {
|
||||||
return fmt.Sprintf("%s", "<null>")
|
return "<null>"
|
||||||
}
|
}
|
||||||
return fmt.Sprintf("%q", n.Time.Format(time.RFC3339Nano))
|
return fmt.Sprintf("%q", n.Time.Format(time.RFC3339Nano))
|
||||||
}
|
}
|
||||||
|
@ -143,7 +143,7 @@ type NullDate struct {
|
||||||
// String implements Stringer.String for NullDate
|
// String implements Stringer.String for NullDate
|
||||||
func (n NullDate) String() string {
|
func (n NullDate) String() string {
|
||||||
if !n.Valid {
|
if !n.Valid {
|
||||||
return fmt.Sprintf("%s", "<null>")
|
return "<null>"
|
||||||
}
|
}
|
||||||
return fmt.Sprintf("%q", n.Date)
|
return fmt.Sprintf("%q", n.Date)
|
||||||
}
|
}
|
||||||
|
@ -1487,7 +1487,7 @@ func encodeStruct(v interface{}) (*proto3.Value, *sppb.Type, error) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
fname, ok := structTagLookup(sf.Tag, "spanner")
|
fname, ok := sf.Tag.Lookup("spanner")
|
||||||
if !ok {
|
if !ok {
|
||||||
fname = sf.Name
|
fname = sf.Name
|
||||||
}
|
}
|
||||||
|
@ -1551,7 +1551,7 @@ func isStructOrArrayOfStructValue(v interface{}) bool {
|
||||||
|
|
||||||
func isSupportedMutationType(v interface{}) bool {
|
func isSupportedMutationType(v interface{}) bool {
|
||||||
switch v.(type) {
|
switch v.(type) {
|
||||||
case string, NullString, []string, []NullString,
|
case nil, string, NullString, []string, []NullString,
|
||||||
[]byte, [][]byte,
|
[]byte, [][]byte,
|
||||||
int, []int, int64, []int64, NullInt64, []NullInt64,
|
int, []int, int64, []int64, NullInt64, []NullInt64,
|
||||||
bool, []bool, NullBool, []NullBool,
|
bool, []bool, NullBool, []NullBool,
|
||||||
|
|
|
@ -15,11 +15,11 @@
|
||||||
package storage
|
package storage
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"net/http"
|
"net/http"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
|
||||||
"cloud.google.com/go/internal/trace"
|
"cloud.google.com/go/internal/trace"
|
||||||
"golang.org/x/net/context"
|
|
||||||
"google.golang.org/api/googleapi"
|
"google.golang.org/api/googleapi"
|
||||||
raw "google.golang.org/api/storage/v1"
|
raw "google.golang.org/api/storage/v1"
|
||||||
)
|
)
|
||||||
|
@ -121,7 +121,7 @@ func (a *ACLHandle) bucketDefaultList(ctx context.Context) ([]ACLRule, error) {
|
||||||
var err error
|
var err error
|
||||||
err = runWithRetry(ctx, func() error {
|
err = runWithRetry(ctx, func() error {
|
||||||
req := a.c.raw.DefaultObjectAccessControls.List(a.bucket)
|
req := a.c.raw.DefaultObjectAccessControls.List(a.bucket)
|
||||||
a.configureCall(req, ctx)
|
a.configureCall(ctx, req)
|
||||||
acls, err = req.Do()
|
acls, err = req.Do()
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
|
@ -134,7 +134,7 @@ func (a *ACLHandle) bucketDefaultList(ctx context.Context) ([]ACLRule, error) {
|
||||||
func (a *ACLHandle) bucketDefaultDelete(ctx context.Context, entity ACLEntity) error {
|
func (a *ACLHandle) bucketDefaultDelete(ctx context.Context, entity ACLEntity) error {
|
||||||
return runWithRetry(ctx, func() error {
|
return runWithRetry(ctx, func() error {
|
||||||
req := a.c.raw.DefaultObjectAccessControls.Delete(a.bucket, string(entity))
|
req := a.c.raw.DefaultObjectAccessControls.Delete(a.bucket, string(entity))
|
||||||
a.configureCall(req, ctx)
|
a.configureCall(ctx, req)
|
||||||
return req.Do()
|
return req.Do()
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -144,7 +144,7 @@ func (a *ACLHandle) bucketList(ctx context.Context) ([]ACLRule, error) {
|
||||||
var err error
|
var err error
|
||||||
err = runWithRetry(ctx, func() error {
|
err = runWithRetry(ctx, func() error {
|
||||||
req := a.c.raw.BucketAccessControls.List(a.bucket)
|
req := a.c.raw.BucketAccessControls.List(a.bucket)
|
||||||
a.configureCall(req, ctx)
|
a.configureCall(ctx, req)
|
||||||
acls, err = req.Do()
|
acls, err = req.Do()
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
|
@ -162,7 +162,7 @@ func (a *ACLHandle) bucketSet(ctx context.Context, entity ACLEntity, role ACLRol
|
||||||
}
|
}
|
||||||
err := runWithRetry(ctx, func() error {
|
err := runWithRetry(ctx, func() error {
|
||||||
req := a.c.raw.BucketAccessControls.Update(a.bucket, string(entity), acl)
|
req := a.c.raw.BucketAccessControls.Update(a.bucket, string(entity), acl)
|
||||||
a.configureCall(req, ctx)
|
a.configureCall(ctx, req)
|
||||||
_, err := req.Do()
|
_, err := req.Do()
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
|
@ -175,7 +175,7 @@ func (a *ACLHandle) bucketSet(ctx context.Context, entity ACLEntity, role ACLRol
|
||||||
func (a *ACLHandle) bucketDelete(ctx context.Context, entity ACLEntity) error {
|
func (a *ACLHandle) bucketDelete(ctx context.Context, entity ACLEntity) error {
|
||||||
return runWithRetry(ctx, func() error {
|
return runWithRetry(ctx, func() error {
|
||||||
req := a.c.raw.BucketAccessControls.Delete(a.bucket, string(entity))
|
req := a.c.raw.BucketAccessControls.Delete(a.bucket, string(entity))
|
||||||
a.configureCall(req, ctx)
|
a.configureCall(ctx, req)
|
||||||
return req.Do()
|
return req.Do()
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -185,7 +185,7 @@ func (a *ACLHandle) objectList(ctx context.Context) ([]ACLRule, error) {
|
||||||
var err error
|
var err error
|
||||||
err = runWithRetry(ctx, func() error {
|
err = runWithRetry(ctx, func() error {
|
||||||
req := a.c.raw.ObjectAccessControls.List(a.bucket, a.object)
|
req := a.c.raw.ObjectAccessControls.List(a.bucket, a.object)
|
||||||
a.configureCall(req, ctx)
|
a.configureCall(ctx, req)
|
||||||
acls, err = req.Do()
|
acls, err = req.Do()
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
|
@ -212,7 +212,7 @@ func (a *ACLHandle) objectSet(ctx context.Context, entity ACLEntity, role ACLRol
|
||||||
} else {
|
} else {
|
||||||
req = a.c.raw.ObjectAccessControls.Update(a.bucket, a.object, string(entity), acl)
|
req = a.c.raw.ObjectAccessControls.Update(a.bucket, a.object, string(entity), acl)
|
||||||
}
|
}
|
||||||
a.configureCall(req, ctx)
|
a.configureCall(ctx, req)
|
||||||
return runWithRetry(ctx, func() error {
|
return runWithRetry(ctx, func() error {
|
||||||
_, err := req.Do()
|
_, err := req.Do()
|
||||||
return err
|
return err
|
||||||
|
@ -222,12 +222,12 @@ func (a *ACLHandle) objectSet(ctx context.Context, entity ACLEntity, role ACLRol
|
||||||
func (a *ACLHandle) objectDelete(ctx context.Context, entity ACLEntity) error {
|
func (a *ACLHandle) objectDelete(ctx context.Context, entity ACLEntity) error {
|
||||||
return runWithRetry(ctx, func() error {
|
return runWithRetry(ctx, func() error {
|
||||||
req := a.c.raw.ObjectAccessControls.Delete(a.bucket, a.object, string(entity))
|
req := a.c.raw.ObjectAccessControls.Delete(a.bucket, a.object, string(entity))
|
||||||
a.configureCall(req, ctx)
|
a.configureCall(ctx, req)
|
||||||
return req.Do()
|
return req.Do()
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *ACLHandle) configureCall(call interface{ Header() http.Header }, ctx context.Context) {
|
func (a *ACLHandle) configureCall(ctx context.Context, call interface{ Header() http.Header }) {
|
||||||
vc := reflect.ValueOf(call)
|
vc := reflect.ValueOf(call)
|
||||||
vc.MethodByName("Context").Call([]reflect.Value{reflect.ValueOf(ctx)})
|
vc.MethodByName("Context").Call([]reflect.Value{reflect.ValueOf(ctx)})
|
||||||
if a.userProject != "" {
|
if a.userProject != "" {
|
||||||
|
|
|
@ -15,6 +15,7 @@
|
||||||
package storage
|
package storage
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
@ -22,7 +23,6 @@ import (
|
||||||
|
|
||||||
"cloud.google.com/go/internal/optional"
|
"cloud.google.com/go/internal/optional"
|
||||||
"cloud.google.com/go/internal/trace"
|
"cloud.google.com/go/internal/trace"
|
||||||
"golang.org/x/net/context"
|
|
||||||
"google.golang.org/api/googleapi"
|
"google.golang.org/api/googleapi"
|
||||||
"google.golang.org/api/iterator"
|
"google.golang.org/api/iterator"
|
||||||
raw "google.golang.org/api/storage/v1"
|
raw "google.golang.org/api/storage/v1"
|
||||||
|
@ -186,6 +186,7 @@ func (b *BucketHandle) newGetCall() (*raw.BucketsGetCall, error) {
|
||||||
return req, nil
|
return req, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Update updates a bucket's attributes.
|
||||||
func (b *BucketHandle) Update(ctx context.Context, uattrs BucketAttrsToUpdate) (attrs *BucketAttrs, err error) {
|
func (b *BucketHandle) Update(ctx context.Context, uattrs BucketAttrsToUpdate) (attrs *BucketAttrs, err error) {
|
||||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Create")
|
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Create")
|
||||||
defer func() { trace.EndSpan(ctx, err) }()
|
defer func() { trace.EndSpan(ctx, err) }()
|
||||||
|
@ -231,6 +232,10 @@ type BucketAttrs struct {
|
||||||
// ACL is the list of access control rules on the bucket.
|
// ACL is the list of access control rules on the bucket.
|
||||||
ACL []ACLRule
|
ACL []ACLRule
|
||||||
|
|
||||||
|
// BucketPolicyOnly configures access checks to use only bucket-level IAM
|
||||||
|
// policies.
|
||||||
|
BucketPolicyOnly BucketPolicyOnly
|
||||||
|
|
||||||
// DefaultObjectACL is the list of access controls to
|
// DefaultObjectACL is the list of access controls to
|
||||||
// apply to new objects when no object ACL is provided.
|
// apply to new objects when no object ACL is provided.
|
||||||
DefaultObjectACL []ACLRule
|
DefaultObjectACL []ACLRule
|
||||||
|
@ -308,6 +313,21 @@ type BucketAttrs struct {
|
||||||
|
|
||||||
// The website configuration.
|
// The website configuration.
|
||||||
Website *BucketWebsite
|
Website *BucketWebsite
|
||||||
|
|
||||||
|
// Etag is the HTTP/1.1 Entity tag for the bucket.
|
||||||
|
// This field is read-only.
|
||||||
|
Etag string
|
||||||
|
}
|
||||||
|
|
||||||
|
// BucketPolicyOnly configures access checks to use only bucket-level IAM
|
||||||
|
// policies.
|
||||||
|
type BucketPolicyOnly struct {
|
||||||
|
// Enabled specifies whether access checks use only bucket-level IAM
|
||||||
|
// policies. Enabled may be disabled until the locked time.
|
||||||
|
Enabled bool
|
||||||
|
// LockedTime specifies the deadline for changing Enabled from true to
|
||||||
|
// false.
|
||||||
|
LockedTime time.Time
|
||||||
}
|
}
|
||||||
|
|
||||||
// Lifecycle is the lifecycle configuration for objects in the bucket.
|
// Lifecycle is the lifecycle configuration for objects in the bucket.
|
||||||
|
@ -315,7 +335,7 @@ type Lifecycle struct {
|
||||||
Rules []LifecycleRule
|
Rules []LifecycleRule
|
||||||
}
|
}
|
||||||
|
|
||||||
// Retention policy enforces a minimum retention time for all objects
|
// RetentionPolicy enforces a minimum retention time for all objects
|
||||||
// contained in the bucket.
|
// contained in the bucket.
|
||||||
//
|
//
|
||||||
// Any attempt to overwrite or delete objects younger than the retention
|
// Any attempt to overwrite or delete objects younger than the retention
|
||||||
|
@ -442,7 +462,7 @@ type BucketLogging struct {
|
||||||
LogObjectPrefix string
|
LogObjectPrefix string
|
||||||
}
|
}
|
||||||
|
|
||||||
// Website holds the bucket's website configuration, controlling how the
|
// BucketWebsite holds the bucket's website configuration, controlling how the
|
||||||
// service behaves when accessing bucket contents as a web site. See
|
// service behaves when accessing bucket contents as a web site. See
|
||||||
// https://cloud.google.com/storage/docs/static-website for more information.
|
// https://cloud.google.com/storage/docs/static-website for more information.
|
||||||
type BucketWebsite struct {
|
type BucketWebsite struct {
|
||||||
|
@ -484,6 +504,8 @@ func newBucket(b *raw.Bucket) (*BucketAttrs, error) {
|
||||||
Encryption: toBucketEncryption(b.Encryption),
|
Encryption: toBucketEncryption(b.Encryption),
|
||||||
Logging: toBucketLogging(b.Logging),
|
Logging: toBucketLogging(b.Logging),
|
||||||
Website: toBucketWebsite(b.Website),
|
Website: toBucketWebsite(b.Website),
|
||||||
|
BucketPolicyOnly: toBucketPolicyOnly(b.IamConfiguration),
|
||||||
|
Etag: b.Etag,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -508,6 +530,14 @@ func (b *BucketAttrs) toRawBucket() *raw.Bucket {
|
||||||
if b.RequesterPays {
|
if b.RequesterPays {
|
||||||
bb = &raw.BucketBilling{RequesterPays: true}
|
bb = &raw.BucketBilling{RequesterPays: true}
|
||||||
}
|
}
|
||||||
|
var bktIAM *raw.BucketIamConfiguration
|
||||||
|
if b.BucketPolicyOnly.Enabled {
|
||||||
|
bktIAM = &raw.BucketIamConfiguration{
|
||||||
|
BucketPolicyOnly: &raw.BucketIamConfigurationBucketPolicyOnly{
|
||||||
|
Enabled: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
return &raw.Bucket{
|
return &raw.Bucket{
|
||||||
Name: b.Name,
|
Name: b.Name,
|
||||||
Location: b.Location,
|
Location: b.Location,
|
||||||
|
@ -523,6 +553,7 @@ func (b *BucketAttrs) toRawBucket() *raw.Bucket {
|
||||||
Encryption: b.Encryption.toRawBucketEncryption(),
|
Encryption: b.Encryption.toRawBucketEncryption(),
|
||||||
Logging: b.Logging.toRawBucketLogging(),
|
Logging: b.Logging.toRawBucketLogging(),
|
||||||
Website: b.Website.toRawBucketWebsite(),
|
Website: b.Website.toRawBucketWebsite(),
|
||||||
|
IamConfiguration: bktIAM,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -557,6 +588,7 @@ type BucketEncryption struct {
|
||||||
DefaultKMSKeyName string
|
DefaultKMSKeyName string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// BucketAttrsToUpdate define the attributes to update during an Update call.
|
||||||
type BucketAttrsToUpdate struct {
|
type BucketAttrsToUpdate struct {
|
||||||
// If set, updates whether the bucket uses versioning.
|
// If set, updates whether the bucket uses versioning.
|
||||||
VersioningEnabled optional.Bool
|
VersioningEnabled optional.Bool
|
||||||
|
@ -568,6 +600,10 @@ type BucketAttrsToUpdate struct {
|
||||||
// newly created objects in this bucket.
|
// newly created objects in this bucket.
|
||||||
DefaultEventBasedHold optional.Bool
|
DefaultEventBasedHold optional.Bool
|
||||||
|
|
||||||
|
// BucketPolicyOnly configures access checks to use only bucket-level IAM
|
||||||
|
// policies.
|
||||||
|
BucketPolicyOnly *BucketPolicyOnly
|
||||||
|
|
||||||
// If set, updates the retention policy of the bucket. Using
|
// If set, updates the retention policy of the bucket. Using
|
||||||
// RetentionPolicy.RetentionPeriod = 0 will delete the existing policy.
|
// RetentionPolicy.RetentionPeriod = 0 will delete the existing policy.
|
||||||
//
|
//
|
||||||
|
@ -654,6 +690,13 @@ func (ua *BucketAttrsToUpdate) toRawBucket() *raw.Bucket {
|
||||||
ForceSendFields: []string{"RequesterPays"},
|
ForceSendFields: []string{"RequesterPays"},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if ua.BucketPolicyOnly != nil {
|
||||||
|
rb.IamConfiguration = &raw.BucketIamConfiguration{
|
||||||
|
BucketPolicyOnly: &raw.BucketIamConfigurationBucketPolicyOnly{
|
||||||
|
Enabled: ua.BucketPolicyOnly.Enabled,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
if ua.Encryption != nil {
|
if ua.Encryption != nil {
|
||||||
if ua.Encryption.DefaultKMSKeyName == "" {
|
if ua.Encryption.DefaultKMSKeyName == "" {
|
||||||
rb.NullFields = append(rb.NullFields, "Encryption")
|
rb.NullFields = append(rb.NullFields, "Encryption")
|
||||||
|
@ -973,6 +1016,22 @@ func toBucketWebsite(w *raw.BucketWebsite) *BucketWebsite {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func toBucketPolicyOnly(b *raw.BucketIamConfiguration) BucketPolicyOnly {
|
||||||
|
if b == nil || b.BucketPolicyOnly == nil || !b.BucketPolicyOnly.Enabled {
|
||||||
|
return BucketPolicyOnly{}
|
||||||
|
}
|
||||||
|
lt, err := time.Parse(time.RFC3339, b.BucketPolicyOnly.LockedTime)
|
||||||
|
if err != nil {
|
||||||
|
return BucketPolicyOnly{
|
||||||
|
Enabled: true,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return BucketPolicyOnly{
|
||||||
|
Enabled: true,
|
||||||
|
LockedTime: lt,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Objects returns an iterator over the objects in the bucket that match the Query q.
|
// Objects returns an iterator over the objects in the bucket that match the Query q.
|
||||||
// If q is nil, no filtering is done.
|
// If q is nil, no filtering is done.
|
||||||
func (b *BucketHandle) Objects(ctx context.Context, q *Query) *ObjectIterator {
|
func (b *BucketHandle) Objects(ctx context.Context, q *Query) *ObjectIterator {
|
||||||
|
|
|
@ -15,11 +15,11 @@
|
||||||
package storage
|
package storage
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"cloud.google.com/go/internal/trace"
|
"cloud.google.com/go/internal/trace"
|
||||||
"golang.org/x/net/context"
|
|
||||||
raw "google.golang.org/api/storage/v1"
|
raw "google.golang.org/api/storage/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -170,7 +170,7 @@ Errors returned by this client are often of the type [`googleapi.Error`](https:/
|
||||||
These errors can be introspected for more information by type asserting to the richer `googleapi.Error` type. For example:
|
These errors can be introspected for more information by type asserting to the richer `googleapi.Error` type. For example:
|
||||||
|
|
||||||
if e, ok := err.(*googleapi.Error); ok {
|
if e, ok := err.(*googleapi.Error); ok {
|
||||||
if e.Code = 409 { ... }
|
if e.Code == 409 { ... }
|
||||||
}
|
}
|
||||||
*/
|
*/
|
||||||
package storage // import "cloud.google.com/go/storage"
|
package storage // import "cloud.google.com/go/storage"
|
||||||
|
|
|
@ -15,9 +15,10 @@
|
||||||
package storage
|
package storage
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
"cloud.google.com/go/iam"
|
"cloud.google.com/go/iam"
|
||||||
"cloud.google.com/go/internal/trace"
|
"cloud.google.com/go/internal/trace"
|
||||||
"golang.org/x/net/context"
|
|
||||||
raw "google.golang.org/api/storage/v1"
|
raw "google.golang.org/api/storage/v1"
|
||||||
iampb "google.golang.org/genproto/googleapis/iam/v1"
|
iampb "google.golang.org/genproto/googleapis/iam/v1"
|
||||||
)
|
)
|
||||||
|
|
|
@ -15,9 +15,10 @@
|
||||||
package storage
|
package storage
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
"cloud.google.com/go/internal"
|
"cloud.google.com/go/internal"
|
||||||
gax "github.com/googleapis/gax-go"
|
gax "github.com/googleapis/gax-go/v2"
|
||||||
"golang.org/x/net/context"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// runWithRetry calls the function until it returns nil or a non-retryable error, or
|
// runWithRetry calls the function until it returns nil or a non-retryable error, or
|
||||||
|
|
|
@ -1,33 +0,0 @@
|
||||||
// Copyright 2017 Google LLC
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// +build !go1.7
|
|
||||||
|
|
||||||
package storage
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/http"
|
|
||||||
)
|
|
||||||
|
|
||||||
func withContext(r *http.Request, _ interface{}) *http.Request {
|
|
||||||
// In Go 1.6 and below, ignore the context.
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
|
|
||||||
// Go 1.6 doesn't have http.Response.Uncompressed, so we can't know whether the Go
|
|
||||||
// HTTP stack uncompressed a gzip file. As a good approximation, assume that
|
|
||||||
// the lack of a Content-Length header means that it did uncompress.
|
|
||||||
func goHTTPUncompressed(res *http.Response) bool {
|
|
||||||
return res.Header.Get("Content-Length") == ""
|
|
||||||
}
|
|
|
@ -15,12 +15,12 @@
|
||||||
package storage
|
package storage
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"regexp"
|
"regexp"
|
||||||
|
|
||||||
"cloud.google.com/go/internal/trace"
|
"cloud.google.com/go/internal/trace"
|
||||||
"golang.org/x/net/context"
|
|
||||||
raw "google.golang.org/api/storage/v1"
|
raw "google.golang.org/api/storage/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -15,6 +15,7 @@
|
||||||
package storage
|
package storage
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"hash/crc32"
|
"hash/crc32"
|
||||||
|
@ -28,7 +29,6 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"cloud.google.com/go/internal/trace"
|
"cloud.google.com/go/internal/trace"
|
||||||
"golang.org/x/net/context"
|
|
||||||
"google.golang.org/api/googleapi"
|
"google.golang.org/api/googleapi"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -107,7 +107,7 @@ func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
req = withContext(req, ctx)
|
req = req.WithContext(ctx)
|
||||||
if o.userProject != "" {
|
if o.userProject != "" {
|
||||||
req.Header.Set("X-Goog-User-Project", o.userProject)
|
req.Header.Set("X-Goog-User-Project", o.userProject)
|
||||||
}
|
}
|
||||||
|
@ -202,7 +202,7 @@ func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64)
|
||||||
// The problem with the last two cases is that the CRC will not match -- GCS
|
// The problem with the last two cases is that the CRC will not match -- GCS
|
||||||
// computes it on the compressed contents, but we compute it on the
|
// computes it on the compressed contents, but we compute it on the
|
||||||
// uncompressed contents.
|
// uncompressed contents.
|
||||||
if length != 0 && !goHTTPUncompressed(res) && !uncompressedByServer(res) {
|
if length != 0 && !res.Uncompressed && !uncompressedByServer(res) {
|
||||||
crc, checkCRC = parseCRC32c(res)
|
crc, checkCRC = parseCRC32c(res)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -16,6 +16,7 @@ package storage
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"crypto"
|
"crypto"
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"crypto/rsa"
|
"crypto/rsa"
|
||||||
|
@ -25,7 +26,6 @@ import (
|
||||||
"encoding/pem"
|
"encoding/pem"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
@ -36,19 +36,19 @@ import (
|
||||||
"time"
|
"time"
|
||||||
"unicode/utf8"
|
"unicode/utf8"
|
||||||
|
|
||||||
"cloud.google.com/go/internal/trace"
|
|
||||||
"google.golang.org/api/option"
|
|
||||||
htransport "google.golang.org/api/transport/http"
|
|
||||||
|
|
||||||
"cloud.google.com/go/internal/optional"
|
"cloud.google.com/go/internal/optional"
|
||||||
|
"cloud.google.com/go/internal/trace"
|
||||||
"cloud.google.com/go/internal/version"
|
"cloud.google.com/go/internal/version"
|
||||||
"golang.org/x/net/context"
|
|
||||||
"google.golang.org/api/googleapi"
|
"google.golang.org/api/googleapi"
|
||||||
|
"google.golang.org/api/option"
|
||||||
raw "google.golang.org/api/storage/v1"
|
raw "google.golang.org/api/storage/v1"
|
||||||
|
htransport "google.golang.org/api/transport/http"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
// ErrBucketNotExist indicates that the bucket does not exist.
|
||||||
ErrBucketNotExist = errors.New("storage: bucket doesn't exist")
|
ErrBucketNotExist = errors.New("storage: bucket doesn't exist")
|
||||||
|
// ErrObjectNotExist indicates that the object does not exist.
|
||||||
ErrObjectNotExist = errors.New("storage: object doesn't exist")
|
ErrObjectNotExist = errors.New("storage: object doesn't exist")
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -777,6 +777,10 @@ type ObjectAttrs struct {
|
||||||
// ObjectIterator.Next. When set, no other fields in ObjectAttrs will be
|
// ObjectIterator.Next. When set, no other fields in ObjectAttrs will be
|
||||||
// populated.
|
// populated.
|
||||||
Prefix string
|
Prefix string
|
||||||
|
|
||||||
|
// Etag is the HTTP/1.1 Entity tag for the object.
|
||||||
|
// This field is read-only.
|
||||||
|
Etag string
|
||||||
}
|
}
|
||||||
|
|
||||||
// convertTime converts a time in RFC3339 format to time.Time.
|
// convertTime converts a time in RFC3339 format to time.Time.
|
||||||
|
@ -829,6 +833,7 @@ func newObject(o *raw.Object) *ObjectAttrs {
|
||||||
Created: convertTime(o.TimeCreated),
|
Created: convertTime(o.TimeCreated),
|
||||||
Deleted: convertTime(o.TimeDeleted),
|
Deleted: convertTime(o.TimeDeleted),
|
||||||
Updated: convertTime(o.Updated),
|
Updated: convertTime(o.Updated),
|
||||||
|
Etag: o.Etag,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -871,17 +876,6 @@ type Query struct {
|
||||||
Versions bool
|
Versions bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// contentTyper implements ContentTyper to enable an
|
|
||||||
// io.ReadCloser to specify its MIME type.
|
|
||||||
type contentTyper struct {
|
|
||||||
io.Reader
|
|
||||||
t string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *contentTyper) ContentType() string {
|
|
||||||
return c.t
|
|
||||||
}
|
|
||||||
|
|
||||||
// Conditions constrain methods to act on specific generations of
|
// Conditions constrain methods to act on specific generations of
|
||||||
// objects.
|
// objects.
|
||||||
//
|
//
|
||||||
|
|
File diff suppressed because one or more lines are too long
|
@ -15,6 +15,7 @@
|
||||||
package storage
|
package storage
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
@ -22,7 +23,6 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
"unicode/utf8"
|
"unicode/utf8"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
"google.golang.org/api/googleapi"
|
"google.golang.org/api/googleapi"
|
||||||
raw "google.golang.org/api/storage/v1"
|
raw "google.golang.org/api/storage/v1"
|
||||||
)
|
)
|
||||||
|
|
|
@ -0,0 +1,24 @@
|
||||||
|
# How to contribute
|
||||||
|
|
||||||
|
We'd love to accept your patches and contributions to this project. There are
|
||||||
|
just a few small guidelines you need to follow.
|
||||||
|
|
||||||
|
## Contributor License Agreement
|
||||||
|
|
||||||
|
Contributions to this project must be accompanied by a Contributor License
|
||||||
|
Agreement. You (or your employer) retain the copyright to your contribution,
|
||||||
|
this simply gives us permission to use and redistribute your contributions as
|
||||||
|
part of the project. Head over to <https://cla.developers.google.com/> to see
|
||||||
|
your current agreements on file or to sign a new one.
|
||||||
|
|
||||||
|
You generally only need to submit a CLA once, so if you've already submitted one
|
||||||
|
(even if it was for a different project), you probably don't need to do it
|
||||||
|
again.
|
||||||
|
|
||||||
|
## Code reviews
|
||||||
|
|
||||||
|
All submissions, including submissions by project members, require review. We
|
||||||
|
use GitHub pull requests for this purpose. Consult [GitHub Help] for more
|
||||||
|
information on using pull requests.
|
||||||
|
|
||||||
|
[GitHub Help]: https://help.github.com/articles/about-pull-requests/
|
|
@ -0,0 +1,61 @@
|
||||||
|
# OpenCensus Agent Go Exporter
|
||||||
|
|
||||||
|
[![Build Status][travis-image]][travis-url] [![GoDoc][godoc-image]][godoc-url]
|
||||||
|
|
||||||
|
|
||||||
|
This repository contains the Go implementation of the OpenCensus Agent (OC-Agent) Exporter.
|
||||||
|
OC-Agent is a deamon process running in a VM that can retrieve spans/stats/metrics from
|
||||||
|
OpenCensus Library, export them to other backends and possibly push configurations back to
|
||||||
|
Library. See more details on [OC-Agent Readme][OCAgentReadme].
|
||||||
|
|
||||||
|
Note: This is an experimental repository and is likely to get backwards-incompatible changes.
|
||||||
|
Ultimately we may want to move the OC-Agent Go Exporter to [OpenCensus Go core library][OpenCensusGo].
|
||||||
|
|
||||||
|
## Installation
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ go get -u contrib.go.opencensus.io/exporter/ocagent
|
||||||
|
```
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
```go
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"contrib.go.opencensus.io/exporter/ocagent"
|
||||||
|
"go.opencensus.io/trace"
|
||||||
|
)
|
||||||
|
|
||||||
|
func Example() {
|
||||||
|
exp, err := ocagent.NewExporter(ocagent.WithInsecure(), ocagent.WithServiceName("your-service-name"))
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Failed to create the agent exporter: %v", err)
|
||||||
|
}
|
||||||
|
defer exp.Stop()
|
||||||
|
|
||||||
|
// Now register it as a trace exporter.
|
||||||
|
trace.RegisterExporter(exp)
|
||||||
|
|
||||||
|
// Then use the OpenCensus tracing library, like we normally would.
|
||||||
|
ctx, span := trace.StartSpan(context.Background(), "AgentExporter-Example")
|
||||||
|
defer span.End()
|
||||||
|
|
||||||
|
for i := 0; i < 10; i++ {
|
||||||
|
_, iSpan := trace.StartSpan(ctx, fmt.Sprintf("Sample-%d", i))
|
||||||
|
<-time.After(6 * time.Millisecond)
|
||||||
|
iSpan.End()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
[OCAgentReadme]: https://github.com/census-instrumentation/opencensus-proto/tree/master/opencensus/proto/agent#opencensus-agent-proto
|
||||||
|
[OpenCensusGo]: https://github.com/census-instrumentation/opencensus-go
|
||||||
|
[godoc-image]: https://godoc.org/contrib.go.opencensus.io/exporter/ocagent?status.svg
|
||||||
|
[godoc-url]: https://godoc.org/contrib.go.opencensus.io/exporter/ocagent
|
||||||
|
[travis-image]: https://travis-ci.org/census-ecosystem/opencensus-go-exporter-ocagent.svg?branch=master
|
||||||
|
[travis-url]: https://travis-ci.org/census-ecosystem/opencensus-go-exporter-ocagent
|
||||||
|
|
|
@ -0,0 +1,38 @@
|
||||||
|
// Copyright 2018, OpenCensus Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package ocagent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math/rand"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
var randSrc = rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||||
|
|
||||||
|
// retries function fn upto n times, if fn returns an error lest it returns nil early.
|
||||||
|
// It applies exponential backoff in units of (1<<n) + jitter microsends.
|
||||||
|
func nTriesWithExponentialBackoff(nTries int64, timeBaseUnit time.Duration, fn func() error) (err error) {
|
||||||
|
for i := int64(0); i < nTries; i++ {
|
||||||
|
err = fn()
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// Backoff for a time period with a pseudo-random jitter
|
||||||
|
jitter := time.Duration(randSrc.Float64()*100) * time.Microsecond
|
||||||
|
ts := jitter + ((1 << uint64(i)) * timeBaseUnit)
|
||||||
|
<-time.After(ts)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
|
@ -0,0 +1,101 @@
|
||||||
|
// Copyright 2018, OpenCensus Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package ocagent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math/rand"
|
||||||
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
sDisconnected int32 = 5 + iota
|
||||||
|
sConnected
|
||||||
|
)
|
||||||
|
|
||||||
|
func (ae *Exporter) setStateDisconnected() {
|
||||||
|
atomic.StoreInt32(&ae.connectionState, sDisconnected)
|
||||||
|
select {
|
||||||
|
case ae.disconnectedCh <- true:
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ae *Exporter) setStateConnected() {
|
||||||
|
atomic.StoreInt32(&ae.connectionState, sConnected)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ae *Exporter) connected() bool {
|
||||||
|
return atomic.LoadInt32(&ae.connectionState) == sConnected
|
||||||
|
}
|
||||||
|
|
||||||
|
const defaultConnReattemptPeriod = 10 * time.Second
|
||||||
|
|
||||||
|
func (ae *Exporter) indefiniteBackgroundConnection() error {
|
||||||
|
defer func() {
|
||||||
|
ae.backgroundConnectionDoneCh <- true
|
||||||
|
}()
|
||||||
|
|
||||||
|
connReattemptPeriod := ae.reconnectionPeriod
|
||||||
|
if connReattemptPeriod <= 0 {
|
||||||
|
connReattemptPeriod = defaultConnReattemptPeriod
|
||||||
|
}
|
||||||
|
|
||||||
|
// No strong seeding required, nano time can
|
||||||
|
// already help with pseudo uniqueness.
|
||||||
|
rng := rand.New(rand.NewSource(time.Now().UnixNano() + rand.Int63n(1024)))
|
||||||
|
|
||||||
|
// maxJitter: 1 + (70% of the connectionReattemptPeriod)
|
||||||
|
maxJitter := int64(1 + 0.7*float64(connReattemptPeriod))
|
||||||
|
|
||||||
|
for {
|
||||||
|
// Otherwise these will be the normal scenarios to enable
|
||||||
|
// reconnections if we trip out.
|
||||||
|
// 1. If we've stopped, return entirely
|
||||||
|
// 2. Otherwise block until we are disconnected, and
|
||||||
|
// then retry connecting
|
||||||
|
select {
|
||||||
|
case <-ae.stopCh:
|
||||||
|
return errStopped
|
||||||
|
|
||||||
|
case <-ae.disconnectedCh:
|
||||||
|
// Normal scenario that we'll wait for
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := ae.connect(); err == nil {
|
||||||
|
ae.setStateConnected()
|
||||||
|
} else {
|
||||||
|
ae.setStateDisconnected()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Apply some jitter to avoid lockstep retrials of other
|
||||||
|
// agent-exporters. Lockstep retrials could result in an
|
||||||
|
// innocent DDOS, by clogging the machine's resources and network.
|
||||||
|
jitter := time.Duration(rng.Int63n(maxJitter))
|
||||||
|
select {
|
||||||
|
case <-ae.stopCh:
|
||||||
|
return errStopped
|
||||||
|
case <-time.After(connReattemptPeriod + jitter):
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ae *Exporter) connect() error {
|
||||||
|
cc, err := ae.dialToAgent()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return ae.enableConnectionStreams(cc)
|
||||||
|
}
|
|
@ -0,0 +1,10 @@
|
||||||
|
module contrib.go.opencensus.io/exporter/ocagent
|
||||||
|
|
||||||
|
require (
|
||||||
|
github.com/census-instrumentation/opencensus-proto v0.2.0 // this is to match the version used in census-instrumentation/opencensus-service
|
||||||
|
github.com/golang/protobuf v1.3.1
|
||||||
|
github.com/grpc-ecosystem/grpc-gateway v1.8.5 // indirect
|
||||||
|
go.opencensus.io v0.20.2
|
||||||
|
google.golang.org/api v0.3.1
|
||||||
|
google.golang.org/grpc v1.19.1
|
||||||
|
)
|
|
@ -0,0 +1,130 @@
|
||||||
|
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||||
|
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||||
|
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||||
|
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
|
||||||
|
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
|
||||||
|
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||||
|
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||||
|
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
||||||
|
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||||
|
github.com/census-instrumentation/opencensus-proto v0.2.0 h1:LzQXZOgg4CQfE6bFvXGM30YZL1WW/M337pXml+GrcZ4=
|
||||||
|
github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||||
|
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||||
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
|
||||||
|
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
|
||||||
|
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
|
||||||
|
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||||
|
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||||
|
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||||
|
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||||
|
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||||
|
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||||
|
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||||
|
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
|
||||||
|
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||||
|
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||||
|
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
|
||||||
|
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
|
github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
|
||||||
|
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
|
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||||
|
github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ=
|
||||||
|
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||||
|
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
|
||||||
|
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||||
|
github.com/grpc-ecosystem/grpc-gateway v1.8.5 h1:2+KSC78XiO6Qy0hIjfc1OD9H+hsaJdJlb8Kqsd41CTE=
|
||||||
|
github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||||
|
github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo=
|
||||||
|
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||||
|
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||||
|
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||||
|
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||||
|
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
|
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||||
|
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||||
|
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||||
|
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||||
|
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||||
|
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||||
|
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||||
|
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||||
|
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||||
|
github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
|
||||||
|
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||||
|
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
|
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||||
|
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
|
||||||
|
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||||
|
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||||
|
github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||||
|
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||||
|
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||||
|
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||||
|
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||||
|
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||||
|
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
|
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||||
|
go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
||||||
|
go.opencensus.io v0.20.2 h1:NAfh7zF0/3/HqtMvJNZ/RFrSlCE6ZTlHmKfhL/Dm1Jk=
|
||||||
|
go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
||||||
|
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||||
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
|
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
|
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||||
|
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||||
|
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||||
|
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628=
|
||||||
|
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
|
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA=
|
||||||
|
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6 h1:bjcUS9ztw9kFmmIxJInhon/0Is3p+EHBKNgquIzo1OI=
|
||||||
|
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
|
||||||
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
||||||
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
|
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
|
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
|
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||||
|
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||||
|
google.golang.org/api v0.3.1 h1:oJra/lMfmtm13/rgY/8i3MzjFWYXvQIAKjQ3HqofMk8=
|
||||||
|
google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
|
||||||
|
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||||
|
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||||
|
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||||
|
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19 h1:Lj2SnHtxkRGJDqnGaSjo+CCdIieEnwVazbOXILwQemk=
|
||||||
|
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||||
|
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||||
|
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||||
|
google.golang.org/grpc v1.19.1 h1:TrBcJ1yqAl1G++wO39nD/qtgpsW9/1+QGrluyMGEYgM=
|
||||||
|
google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||||
|
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||||
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
|
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
|
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||||
|
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
|
||||||
|
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||||
|
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
|
||||||
|
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
|
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
|
@ -0,0 +1,46 @@
|
||||||
|
// Copyright 2018, OpenCensus Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package ocagent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
|
||||||
|
commonpb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1"
|
||||||
|
"go.opencensus.io"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NodeWithStartTime creates a node using nodeName and derives:
|
||||||
|
// Hostname from the environment
|
||||||
|
// Pid from the current process
|
||||||
|
// StartTimestamp from the start time of this process
|
||||||
|
// Language and library information.
|
||||||
|
func NodeWithStartTime(nodeName string) *commonpb.Node {
|
||||||
|
return &commonpb.Node{
|
||||||
|
Identifier: &commonpb.ProcessIdentifier{
|
||||||
|
HostName: os.Getenv("HOSTNAME"),
|
||||||
|
Pid: uint32(os.Getpid()),
|
||||||
|
StartTimestamp: timeToTimestamp(startTime),
|
||||||
|
},
|
||||||
|
LibraryInfo: &commonpb.LibraryInfo{
|
||||||
|
Language: commonpb.LibraryInfo_GO_LANG,
|
||||||
|
ExporterVersion: Version,
|
||||||
|
CoreLibraryVersion: opencensus.Version(),
|
||||||
|
},
|
||||||
|
ServiceInfo: &commonpb.ServiceInfo{
|
||||||
|
Name: nodeName,
|
||||||
|
},
|
||||||
|
Attributes: make(map[string]string),
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,501 @@
|
||||||
|
// Copyright 2018, OpenCensus Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package ocagent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"google.golang.org/api/support/bundler"
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/credentials"
|
||||||
|
"google.golang.org/grpc/metadata"
|
||||||
|
|
||||||
|
"go.opencensus.io/plugin/ocgrpc"
|
||||||
|
"go.opencensus.io/resource"
|
||||||
|
"go.opencensus.io/stats/view"
|
||||||
|
"go.opencensus.io/trace"
|
||||||
|
|
||||||
|
commonpb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1"
|
||||||
|
agentmetricspb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1"
|
||||||
|
agenttracepb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1"
|
||||||
|
metricspb "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1"
|
||||||
|
resourcepb "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1"
|
||||||
|
tracepb "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
var startupMu sync.Mutex
|
||||||
|
var startTime time.Time
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
startupMu.Lock()
|
||||||
|
startTime = time.Now()
|
||||||
|
startupMu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ trace.Exporter = (*Exporter)(nil)
|
||||||
|
var _ view.Exporter = (*Exporter)(nil)
|
||||||
|
|
||||||
|
type Exporter struct {
|
||||||
|
connectionState int32
|
||||||
|
|
||||||
|
// mu protects the non-atomic and non-channel variables
|
||||||
|
mu sync.RWMutex
|
||||||
|
// senderMu protects the concurrent unsafe traceExporter client
|
||||||
|
senderMu sync.RWMutex
|
||||||
|
started bool
|
||||||
|
stopped bool
|
||||||
|
agentAddress string
|
||||||
|
serviceName string
|
||||||
|
canDialInsecure bool
|
||||||
|
traceExporter agenttracepb.TraceService_ExportClient
|
||||||
|
metricsExporter agentmetricspb.MetricsService_ExportClient
|
||||||
|
nodeInfo *commonpb.Node
|
||||||
|
grpcClientConn *grpc.ClientConn
|
||||||
|
reconnectionPeriod time.Duration
|
||||||
|
resource *resourcepb.Resource
|
||||||
|
compressor string
|
||||||
|
headers map[string]string
|
||||||
|
|
||||||
|
startOnce sync.Once
|
||||||
|
stopCh chan bool
|
||||||
|
disconnectedCh chan bool
|
||||||
|
|
||||||
|
backgroundConnectionDoneCh chan bool
|
||||||
|
|
||||||
|
traceBundler *bundler.Bundler
|
||||||
|
|
||||||
|
// viewDataBundler is the bundler to enable conversion
|
||||||
|
// from OpenCensus-Go view.Data to metricspb.Metric.
|
||||||
|
// Please do not confuse it with metricsBundler!
|
||||||
|
viewDataBundler *bundler.Bundler
|
||||||
|
|
||||||
|
clientTransportCredentials credentials.TransportCredentials
|
||||||
|
|
||||||
|
grpcDialOptions []grpc.DialOption
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewExporter(opts ...ExporterOption) (*Exporter, error) {
|
||||||
|
exp, err := NewUnstartedExporter(opts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := exp.Start(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return exp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
const spanDataBufferSize = 300
|
||||||
|
|
||||||
|
func NewUnstartedExporter(opts ...ExporterOption) (*Exporter, error) {
|
||||||
|
e := new(Exporter)
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt.withExporter(e)
|
||||||
|
}
|
||||||
|
traceBundler := bundler.NewBundler((*trace.SpanData)(nil), func(bundle interface{}) {
|
||||||
|
e.uploadTraces(bundle.([]*trace.SpanData))
|
||||||
|
})
|
||||||
|
traceBundler.DelayThreshold = 2 * time.Second
|
||||||
|
traceBundler.BundleCountThreshold = spanDataBufferSize
|
||||||
|
e.traceBundler = traceBundler
|
||||||
|
|
||||||
|
viewDataBundler := bundler.NewBundler((*view.Data)(nil), func(bundle interface{}) {
|
||||||
|
e.uploadViewData(bundle.([]*view.Data))
|
||||||
|
})
|
||||||
|
viewDataBundler.DelayThreshold = 2 * time.Second
|
||||||
|
viewDataBundler.BundleCountThreshold = 500 // TODO: (@odeke-em) make this configurable.
|
||||||
|
e.viewDataBundler = viewDataBundler
|
||||||
|
e.nodeInfo = NodeWithStartTime(e.serviceName)
|
||||||
|
e.resource = resourceProtoFromEnv()
|
||||||
|
|
||||||
|
return e, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
maxInitialConfigRetries = 10
|
||||||
|
maxInitialTracesRetries = 10
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
errAlreadyStarted = errors.New("already started")
|
||||||
|
errNotStarted = errors.New("not started")
|
||||||
|
errStopped = errors.New("stopped")
|
||||||
|
errNoConnection = errors.New("no active connection")
|
||||||
|
)
|
||||||
|
|
||||||
|
// Start dials to the agent, establishing a connection to it. It also
|
||||||
|
// initiates the Config and Trace services by sending over the initial
|
||||||
|
// messages that consist of the node identifier. Start invokes a background
|
||||||
|
// connector that will reattempt connections to the agent periodically
|
||||||
|
// if the connection dies.
|
||||||
|
func (ae *Exporter) Start() error {
|
||||||
|
var err = errAlreadyStarted
|
||||||
|
ae.startOnce.Do(func() {
|
||||||
|
ae.mu.Lock()
|
||||||
|
defer ae.mu.Unlock()
|
||||||
|
|
||||||
|
ae.started = true
|
||||||
|
ae.disconnectedCh = make(chan bool, 1)
|
||||||
|
ae.stopCh = make(chan bool)
|
||||||
|
ae.backgroundConnectionDoneCh = make(chan bool)
|
||||||
|
|
||||||
|
ae.setStateDisconnected()
|
||||||
|
go ae.indefiniteBackgroundConnection()
|
||||||
|
|
||||||
|
err = nil
|
||||||
|
})
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ae *Exporter) prepareAgentAddress() string {
|
||||||
|
if ae.agentAddress != "" {
|
||||||
|
return ae.agentAddress
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%s:%d", DefaultAgentHost, DefaultAgentPort)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ae *Exporter) enableConnectionStreams(cc *grpc.ClientConn) error {
|
||||||
|
ae.mu.RLock()
|
||||||
|
started := ae.started
|
||||||
|
nodeInfo := ae.nodeInfo
|
||||||
|
ae.mu.RUnlock()
|
||||||
|
|
||||||
|
if !started {
|
||||||
|
return errNotStarted
|
||||||
|
}
|
||||||
|
|
||||||
|
ae.mu.Lock()
|
||||||
|
// If the previous clientConn was non-nil, close it
|
||||||
|
if ae.grpcClientConn != nil {
|
||||||
|
_ = ae.grpcClientConn.Close()
|
||||||
|
}
|
||||||
|
ae.grpcClientConn = cc
|
||||||
|
ae.mu.Unlock()
|
||||||
|
|
||||||
|
if err := ae.createTraceServiceConnection(ae.grpcClientConn, nodeInfo); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return ae.createMetricsServiceConnection(ae.grpcClientConn, nodeInfo)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ae *Exporter) createTraceServiceConnection(cc *grpc.ClientConn, node *commonpb.Node) error {
|
||||||
|
// Initiate the trace service by sending over node identifier info.
|
||||||
|
traceSvcClient := agenttracepb.NewTraceServiceClient(cc)
|
||||||
|
ctx := context.Background()
|
||||||
|
if len(ae.headers) > 0 {
|
||||||
|
ctx = metadata.NewOutgoingContext(ctx, metadata.New(ae.headers))
|
||||||
|
}
|
||||||
|
traceExporter, err := traceSvcClient.Export(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Exporter.Start:: TraceServiceClient: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
firstTraceMessage := &agenttracepb.ExportTraceServiceRequest{
|
||||||
|
Node: node,
|
||||||
|
Resource: ae.resource,
|
||||||
|
}
|
||||||
|
if err := traceExporter.Send(firstTraceMessage); err != nil {
|
||||||
|
return fmt.Errorf("Exporter.Start:: Failed to initiate the Config service: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ae.mu.Lock()
|
||||||
|
ae.traceExporter = traceExporter
|
||||||
|
ae.mu.Unlock()
|
||||||
|
|
||||||
|
// Initiate the config service by sending over node identifier info.
|
||||||
|
configStream, err := traceSvcClient.Config(context.Background())
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Exporter.Start:: ConfigStream: %v", err)
|
||||||
|
}
|
||||||
|
firstCfgMessage := &agenttracepb.CurrentLibraryConfig{Node: node}
|
||||||
|
if err := configStream.Send(firstCfgMessage); err != nil {
|
||||||
|
return fmt.Errorf("Exporter.Start:: Failed to initiate the Config service: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// In the background, handle trace configurations that are beamed down
|
||||||
|
// by the agent, but also reply to it with the applied configuration.
|
||||||
|
go ae.handleConfigStreaming(configStream)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ae *Exporter) createMetricsServiceConnection(cc *grpc.ClientConn, node *commonpb.Node) error {
|
||||||
|
metricsSvcClient := agentmetricspb.NewMetricsServiceClient(cc)
|
||||||
|
metricsExporter, err := metricsSvcClient.Export(context.Background())
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("MetricsExporter: failed to start the service client: %v", err)
|
||||||
|
}
|
||||||
|
// Initiate the metrics service by sending over the first message just containing the Node and Resource.
|
||||||
|
firstMetricsMessage := &agentmetricspb.ExportMetricsServiceRequest{
|
||||||
|
Node: node,
|
||||||
|
Resource: ae.resource,
|
||||||
|
}
|
||||||
|
if err := metricsExporter.Send(firstMetricsMessage); err != nil {
|
||||||
|
return fmt.Errorf("MetricsExporter:: failed to send the first message: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ae.mu.Lock()
|
||||||
|
ae.metricsExporter = metricsExporter
|
||||||
|
ae.mu.Unlock()
|
||||||
|
|
||||||
|
// With that we are good to go and can start sending metrics
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ae *Exporter) dialToAgent() (*grpc.ClientConn, error) {
|
||||||
|
addr := ae.prepareAgentAddress()
|
||||||
|
var dialOpts []grpc.DialOption
|
||||||
|
if ae.clientTransportCredentials != nil {
|
||||||
|
dialOpts = append(dialOpts, grpc.WithTransportCredentials(ae.clientTransportCredentials))
|
||||||
|
} else if ae.canDialInsecure {
|
||||||
|
dialOpts = append(dialOpts, grpc.WithInsecure())
|
||||||
|
}
|
||||||
|
if ae.compressor != "" {
|
||||||
|
dialOpts = append(dialOpts, grpc.WithDefaultCallOptions(grpc.UseCompressor(ae.compressor)))
|
||||||
|
}
|
||||||
|
dialOpts = append(dialOpts, grpc.WithStatsHandler(&ocgrpc.ClientHandler{}))
|
||||||
|
if len(ae.grpcDialOptions) != 0 {
|
||||||
|
dialOpts = append(dialOpts, ae.grpcDialOptions...)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
if len(ae.headers) > 0 {
|
||||||
|
ctx = metadata.NewOutgoingContext(ctx, metadata.New(ae.headers))
|
||||||
|
}
|
||||||
|
return grpc.DialContext(ctx, addr, dialOpts...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ae *Exporter) handleConfigStreaming(configStream agenttracepb.TraceService_ConfigClient) error {
|
||||||
|
// Note: We haven't yet implemented configuration sending so we
|
||||||
|
// should NOT be changing connection states within this function for now.
|
||||||
|
for {
|
||||||
|
recv, err := configStream.Recv()
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Check if this is a transient error or exponential backoff-able.
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
cfg := recv.Config
|
||||||
|
if cfg == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Otherwise now apply the trace configuration sent down from the agent
|
||||||
|
if psamp := cfg.GetProbabilitySampler(); psamp != nil {
|
||||||
|
trace.ApplyConfig(trace.Config{DefaultSampler: trace.ProbabilitySampler(psamp.SamplingProbability)})
|
||||||
|
} else if csamp := cfg.GetConstantSampler(); csamp != nil {
|
||||||
|
alwaysSample := csamp.Decision == tracepb.ConstantSampler_ALWAYS_ON
|
||||||
|
if alwaysSample {
|
||||||
|
trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()})
|
||||||
|
} else {
|
||||||
|
trace.ApplyConfig(trace.Config{DefaultSampler: trace.NeverSample()})
|
||||||
|
}
|
||||||
|
} else { // TODO: Add the rate limiting sampler here
|
||||||
|
}
|
||||||
|
|
||||||
|
// Then finally send back to upstream the newly applied configuration
|
||||||
|
err = configStream.Send(&agenttracepb.CurrentLibraryConfig{Config: &tracepb.TraceConfig{Sampler: cfg.Sampler}})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop shuts down all the connections and resources
|
||||||
|
// related to the exporter.
|
||||||
|
func (ae *Exporter) Stop() error {
|
||||||
|
ae.mu.RLock()
|
||||||
|
cc := ae.grpcClientConn
|
||||||
|
started := ae.started
|
||||||
|
stopped := ae.stopped
|
||||||
|
ae.mu.RUnlock()
|
||||||
|
|
||||||
|
if !started {
|
||||||
|
return errNotStarted
|
||||||
|
}
|
||||||
|
if stopped {
|
||||||
|
// TODO: tell the user that we've already stopped, so perhaps a sentinel error?
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
ae.Flush()
|
||||||
|
|
||||||
|
// Now close the underlying gRPC connection.
|
||||||
|
var err error
|
||||||
|
if cc != nil {
|
||||||
|
err = cc.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// At this point we can change the state variables: started and stopped
|
||||||
|
ae.mu.Lock()
|
||||||
|
ae.started = false
|
||||||
|
ae.stopped = true
|
||||||
|
ae.mu.Unlock()
|
||||||
|
close(ae.stopCh)
|
||||||
|
|
||||||
|
// Ensure that the backgroundConnector returns
|
||||||
|
<-ae.backgroundConnectionDoneCh
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ae *Exporter) ExportSpan(sd *trace.SpanData) {
|
||||||
|
if sd == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
_ = ae.traceBundler.Add(sd, 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ae *Exporter) ExportTraceServiceRequest(batch *agenttracepb.ExportTraceServiceRequest) error {
|
||||||
|
if batch == nil || len(batch.Spans) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-ae.stopCh:
|
||||||
|
return errStopped
|
||||||
|
|
||||||
|
default:
|
||||||
|
if !ae.connected() {
|
||||||
|
return errNoConnection
|
||||||
|
}
|
||||||
|
|
||||||
|
ae.senderMu.Lock()
|
||||||
|
err := ae.traceExporter.Send(batch)
|
||||||
|
ae.senderMu.Unlock()
|
||||||
|
if err != nil {
|
||||||
|
ae.setStateDisconnected()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ae *Exporter) ExportView(vd *view.Data) {
|
||||||
|
if vd == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
_ = ae.viewDataBundler.Add(vd, 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func ocSpanDataToPbSpans(sdl []*trace.SpanData) []*tracepb.Span {
|
||||||
|
if len(sdl) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
protoSpans := make([]*tracepb.Span, 0, len(sdl))
|
||||||
|
for _, sd := range sdl {
|
||||||
|
if sd != nil {
|
||||||
|
protoSpans = append(protoSpans, ocSpanToProtoSpan(sd))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return protoSpans
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ae *Exporter) uploadTraces(sdl []*trace.SpanData) {
|
||||||
|
select {
|
||||||
|
case <-ae.stopCh:
|
||||||
|
return
|
||||||
|
|
||||||
|
default:
|
||||||
|
if !ae.connected() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
protoSpans := ocSpanDataToPbSpans(sdl)
|
||||||
|
if len(protoSpans) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ae.senderMu.Lock()
|
||||||
|
err := ae.traceExporter.Send(&agenttracepb.ExportTraceServiceRequest{
|
||||||
|
Spans: protoSpans,
|
||||||
|
})
|
||||||
|
ae.senderMu.Unlock()
|
||||||
|
if err != nil {
|
||||||
|
ae.setStateDisconnected()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ocViewDataToPbMetrics(vdl []*view.Data) []*metricspb.Metric {
|
||||||
|
if len(vdl) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
metrics := make([]*metricspb.Metric, 0, len(vdl))
|
||||||
|
for _, vd := range vdl {
|
||||||
|
if vd != nil {
|
||||||
|
vmetric, err := viewDataToMetric(vd)
|
||||||
|
// TODO: (@odeke-em) somehow report this error, if it is non-nil.
|
||||||
|
if err == nil && vmetric != nil {
|
||||||
|
metrics = append(metrics, vmetric)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return metrics
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ae *Exporter) uploadViewData(vdl []*view.Data) {
|
||||||
|
select {
|
||||||
|
case <-ae.stopCh:
|
||||||
|
return
|
||||||
|
|
||||||
|
default:
|
||||||
|
if !ae.connected() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
protoMetrics := ocViewDataToPbMetrics(vdl)
|
||||||
|
if len(protoMetrics) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
err := ae.metricsExporter.Send(&agentmetricspb.ExportMetricsServiceRequest{
|
||||||
|
Metrics: protoMetrics,
|
||||||
|
// TODO:(@odeke-em)
|
||||||
|
// a) Figure out how to derive a Node from the environment
|
||||||
|
// b) Figure out how to derive a Resource from the environment
|
||||||
|
// or better letting users of the exporter configure it.
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
ae.setStateDisconnected()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ae *Exporter) Flush() {
|
||||||
|
ae.traceBundler.Flush()
|
||||||
|
ae.viewDataBundler.Flush()
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceProtoFromEnv() *resourcepb.Resource {
|
||||||
|
rs, _ := resource.FromEnv(context.Background())
|
||||||
|
if rs == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
rprs := &resourcepb.Resource{
|
||||||
|
Type: rs.Type,
|
||||||
|
}
|
||||||
|
if rs.Labels != nil {
|
||||||
|
rprs.Labels = make(map[string]string)
|
||||||
|
for k, v := range rs.Labels {
|
||||||
|
rprs.Labels[k] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return rprs
|
||||||
|
}
|
|
@ -0,0 +1,144 @@
|
||||||
|
// Copyright 2018, OpenCensus Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package ocagent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/credentials"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
DefaultAgentPort uint16 = 55678
|
||||||
|
DefaultAgentHost string = "localhost"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ExporterOption interface {
|
||||||
|
withExporter(e *Exporter)
|
||||||
|
}
|
||||||
|
|
||||||
|
type insecureGrpcConnection int
|
||||||
|
|
||||||
|
var _ ExporterOption = (*insecureGrpcConnection)(nil)
|
||||||
|
|
||||||
|
func (igc *insecureGrpcConnection) withExporter(e *Exporter) {
|
||||||
|
e.canDialInsecure = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithInsecure disables client transport security for the exporter's gRPC connection
|
||||||
|
// just like grpc.WithInsecure() https://godoc.org/google.golang.org/grpc#WithInsecure
|
||||||
|
// does. Note, by default, client security is required unless WithInsecure is used.
|
||||||
|
func WithInsecure() ExporterOption { return new(insecureGrpcConnection) }
|
||||||
|
|
||||||
|
type addressSetter string
|
||||||
|
|
||||||
|
func (as addressSetter) withExporter(e *Exporter) {
|
||||||
|
e.agentAddress = string(as)
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ ExporterOption = (*addressSetter)(nil)
|
||||||
|
|
||||||
|
// WithAddress allows one to set the address that the exporter will
|
||||||
|
// connect to the agent on. If unset, it will instead try to use
|
||||||
|
// connect to DefaultAgentHost:DefaultAgentPort
|
||||||
|
func WithAddress(addr string) ExporterOption {
|
||||||
|
return addressSetter(addr)
|
||||||
|
}
|
||||||
|
|
||||||
|
type serviceNameSetter string
|
||||||
|
|
||||||
|
func (sns serviceNameSetter) withExporter(e *Exporter) {
|
||||||
|
e.serviceName = string(sns)
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ ExporterOption = (*serviceNameSetter)(nil)
|
||||||
|
|
||||||
|
// WithServiceName allows one to set/override the service name
|
||||||
|
// that the exporter will report to the agent.
|
||||||
|
func WithServiceName(serviceName string) ExporterOption {
|
||||||
|
return serviceNameSetter(serviceName)
|
||||||
|
}
|
||||||
|
|
||||||
|
type reconnectionPeriod time.Duration
|
||||||
|
|
||||||
|
func (rp reconnectionPeriod) withExporter(e *Exporter) {
|
||||||
|
e.reconnectionPeriod = time.Duration(rp)
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithReconnectionPeriod(rp time.Duration) ExporterOption {
|
||||||
|
return reconnectionPeriod(rp)
|
||||||
|
}
|
||||||
|
|
||||||
|
type compressorSetter string
|
||||||
|
|
||||||
|
func (c compressorSetter) withExporter(e *Exporter) {
|
||||||
|
e.compressor = string(c)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UseCompressor will set the compressor for the gRPC client to use when sending requests.
|
||||||
|
// It is the responsibility of the caller to ensure that the compressor set has been registered
|
||||||
|
// with google.golang.org/grpc/encoding. This can be done by encoding.RegisterCompressor. Some
|
||||||
|
// compressors auto-register on import, such as gzip, which can be registered by calling
|
||||||
|
// `import _ "google.golang.org/grpc/encoding/gzip"`
|
||||||
|
func UseCompressor(compressorName string) ExporterOption {
|
||||||
|
return compressorSetter(compressorName)
|
||||||
|
}
|
||||||
|
|
||||||
|
type headerSetter map[string]string
|
||||||
|
|
||||||
|
func (h headerSetter) withExporter(e *Exporter) {
|
||||||
|
e.headers = map[string]string(h)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithHeaders will send the provided headers when the gRPC stream connection
|
||||||
|
// is instantiated
|
||||||
|
func WithHeaders(headers map[string]string) ExporterOption {
|
||||||
|
return headerSetter(headers)
|
||||||
|
}
|
||||||
|
|
||||||
|
type clientCredentials struct {
|
||||||
|
credentials.TransportCredentials
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ ExporterOption = (*clientCredentials)(nil)
|
||||||
|
|
||||||
|
// WithTLSCredentials allows the connection to use TLS credentials
|
||||||
|
// when talking to the server. It takes in grpc.TransportCredentials instead
|
||||||
|
// of say a Certificate file or a tls.Certificate, because the retrieving
|
||||||
|
// these credentials can be done in many ways e.g. plain file, in code tls.Config
|
||||||
|
// or by certificate rotation, so it is up to the caller to decide what to use.
|
||||||
|
func WithTLSCredentials(creds credentials.TransportCredentials) ExporterOption {
|
||||||
|
return &clientCredentials{TransportCredentials: creds}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cc *clientCredentials) withExporter(e *Exporter) {
|
||||||
|
e.clientTransportCredentials = cc.TransportCredentials
|
||||||
|
}
|
||||||
|
|
||||||
|
type grpcDialOptions []grpc.DialOption
|
||||||
|
|
||||||
|
var _ ExporterOption = (*grpcDialOptions)(nil)
|
||||||
|
|
||||||
|
// WithGRPCDialOption opens support to any grpc.DialOption to be used. If it conflicts
|
||||||
|
// with some other configuration the GRPC specified via the agent the ones here will
|
||||||
|
// take preference since they are set last.
|
||||||
|
func WithGRPCDialOption(opts ...grpc.DialOption) ExporterOption {
|
||||||
|
return grpcDialOptions(opts)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (opts grpcDialOptions) withExporter(e *Exporter) {
|
||||||
|
e.grpcDialOptions = opts
|
||||||
|
}
|
248
vendor/contrib.go.opencensus.io/exporter/ocagent/transform_spans.go
generated
vendored
Normal file
248
vendor/contrib.go.opencensus.io/exporter/ocagent/transform_spans.go
generated
vendored
Normal file
|
@ -0,0 +1,248 @@
|
||||||
|
// Copyright 2018, OpenCensus Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package ocagent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"go.opencensus.io/trace"
|
||||||
|
"go.opencensus.io/trace/tracestate"
|
||||||
|
|
||||||
|
tracepb "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1"
|
||||||
|
"github.com/golang/protobuf/ptypes/timestamp"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
maxAnnotationEventsPerSpan = 32
|
||||||
|
maxMessageEventsPerSpan = 128
|
||||||
|
)
|
||||||
|
|
||||||
|
func ocSpanToProtoSpan(sd *trace.SpanData) *tracepb.Span {
|
||||||
|
if sd == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
var namePtr *tracepb.TruncatableString
|
||||||
|
if sd.Name != "" {
|
||||||
|
namePtr = &tracepb.TruncatableString{Value: sd.Name}
|
||||||
|
}
|
||||||
|
return &tracepb.Span{
|
||||||
|
TraceId: sd.TraceID[:],
|
||||||
|
SpanId: sd.SpanID[:],
|
||||||
|
ParentSpanId: sd.ParentSpanID[:],
|
||||||
|
Status: ocStatusToProtoStatus(sd.Status),
|
||||||
|
StartTime: timeToTimestamp(sd.StartTime),
|
||||||
|
EndTime: timeToTimestamp(sd.EndTime),
|
||||||
|
Links: ocLinksToProtoLinks(sd.Links),
|
||||||
|
Kind: ocSpanKindToProtoSpanKind(sd.SpanKind),
|
||||||
|
Name: namePtr,
|
||||||
|
Attributes: ocAttributesToProtoAttributes(sd.Attributes),
|
||||||
|
TimeEvents: ocTimeEventsToProtoTimeEvents(sd.Annotations, sd.MessageEvents),
|
||||||
|
Tracestate: ocTracestateToProtoTracestate(sd.Tracestate),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var blankStatus trace.Status
|
||||||
|
|
||||||
|
func ocStatusToProtoStatus(status trace.Status) *tracepb.Status {
|
||||||
|
if status == blankStatus {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return &tracepb.Status{
|
||||||
|
Code: status.Code,
|
||||||
|
Message: status.Message,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ocLinksToProtoLinks(links []trace.Link) *tracepb.Span_Links {
|
||||||
|
if len(links) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
sl := make([]*tracepb.Span_Link, 0, len(links))
|
||||||
|
for _, ocLink := range links {
|
||||||
|
// This redefinition is necessary to prevent ocLink.*ID[:] copies
|
||||||
|
// being reused -- in short we need a new ocLink per iteration.
|
||||||
|
ocLink := ocLink
|
||||||
|
|
||||||
|
sl = append(sl, &tracepb.Span_Link{
|
||||||
|
TraceId: ocLink.TraceID[:],
|
||||||
|
SpanId: ocLink.SpanID[:],
|
||||||
|
Type: ocLinkTypeToProtoLinkType(ocLink.Type),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return &tracepb.Span_Links{
|
||||||
|
Link: sl,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ocLinkTypeToProtoLinkType(oct trace.LinkType) tracepb.Span_Link_Type {
|
||||||
|
switch oct {
|
||||||
|
case trace.LinkTypeChild:
|
||||||
|
return tracepb.Span_Link_CHILD_LINKED_SPAN
|
||||||
|
case trace.LinkTypeParent:
|
||||||
|
return tracepb.Span_Link_PARENT_LINKED_SPAN
|
||||||
|
default:
|
||||||
|
return tracepb.Span_Link_TYPE_UNSPECIFIED
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ocAttributesToProtoAttributes(attrs map[string]interface{}) *tracepb.Span_Attributes {
|
||||||
|
if len(attrs) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
outMap := make(map[string]*tracepb.AttributeValue)
|
||||||
|
for k, v := range attrs {
|
||||||
|
switch v := v.(type) {
|
||||||
|
case bool:
|
||||||
|
outMap[k] = &tracepb.AttributeValue{Value: &tracepb.AttributeValue_BoolValue{BoolValue: v}}
|
||||||
|
|
||||||
|
case int:
|
||||||
|
outMap[k] = &tracepb.AttributeValue{Value: &tracepb.AttributeValue_IntValue{IntValue: int64(v)}}
|
||||||
|
|
||||||
|
case int64:
|
||||||
|
outMap[k] = &tracepb.AttributeValue{Value: &tracepb.AttributeValue_IntValue{IntValue: v}}
|
||||||
|
|
||||||
|
case string:
|
||||||
|
outMap[k] = &tracepb.AttributeValue{
|
||||||
|
Value: &tracepb.AttributeValue_StringValue{
|
||||||
|
StringValue: &tracepb.TruncatableString{Value: v},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return &tracepb.Span_Attributes{
|
||||||
|
AttributeMap: outMap,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// This code is mostly copied from
|
||||||
|
// https://github.com/census-ecosystem/opencensus-go-exporter-stackdriver/blob/master/trace_proto.go#L46
|
||||||
|
func ocTimeEventsToProtoTimeEvents(as []trace.Annotation, es []trace.MessageEvent) *tracepb.Span_TimeEvents {
|
||||||
|
if len(as) == 0 && len(es) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
timeEvents := &tracepb.Span_TimeEvents{}
|
||||||
|
var annotations, droppedAnnotationsCount int
|
||||||
|
var messageEvents, droppedMessageEventsCount int
|
||||||
|
|
||||||
|
// Transform annotations
|
||||||
|
for i, a := range as {
|
||||||
|
if annotations >= maxAnnotationEventsPerSpan {
|
||||||
|
droppedAnnotationsCount = len(as) - i
|
||||||
|
break
|
||||||
|
}
|
||||||
|
annotations++
|
||||||
|
timeEvents.TimeEvent = append(timeEvents.TimeEvent,
|
||||||
|
&tracepb.Span_TimeEvent{
|
||||||
|
Time: timeToTimestamp(a.Time),
|
||||||
|
Value: transformAnnotationToTimeEvent(&a),
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Transform message events
|
||||||
|
for i, e := range es {
|
||||||
|
if messageEvents >= maxMessageEventsPerSpan {
|
||||||
|
droppedMessageEventsCount = len(es) - i
|
||||||
|
break
|
||||||
|
}
|
||||||
|
messageEvents++
|
||||||
|
timeEvents.TimeEvent = append(timeEvents.TimeEvent,
|
||||||
|
&tracepb.Span_TimeEvent{
|
||||||
|
Time: timeToTimestamp(e.Time),
|
||||||
|
Value: transformMessageEventToTimeEvent(&e),
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Process dropped counter
|
||||||
|
timeEvents.DroppedAnnotationsCount = clip32(droppedAnnotationsCount)
|
||||||
|
timeEvents.DroppedMessageEventsCount = clip32(droppedMessageEventsCount)
|
||||||
|
|
||||||
|
return timeEvents
|
||||||
|
}
|
||||||
|
|
||||||
|
func transformAnnotationToTimeEvent(a *trace.Annotation) *tracepb.Span_TimeEvent_Annotation_ {
|
||||||
|
return &tracepb.Span_TimeEvent_Annotation_{
|
||||||
|
Annotation: &tracepb.Span_TimeEvent_Annotation{
|
||||||
|
Description: &tracepb.TruncatableString{Value: a.Message},
|
||||||
|
Attributes: ocAttributesToProtoAttributes(a.Attributes),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func transformMessageEventToTimeEvent(e *trace.MessageEvent) *tracepb.Span_TimeEvent_MessageEvent_ {
|
||||||
|
return &tracepb.Span_TimeEvent_MessageEvent_{
|
||||||
|
MessageEvent: &tracepb.Span_TimeEvent_MessageEvent{
|
||||||
|
Type: tracepb.Span_TimeEvent_MessageEvent_Type(e.EventType),
|
||||||
|
Id: uint64(e.MessageID),
|
||||||
|
UncompressedSize: uint64(e.UncompressedByteSize),
|
||||||
|
CompressedSize: uint64(e.CompressedByteSize),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// clip32 clips an int to the range of an int32.
|
||||||
|
func clip32(x int) int32 {
|
||||||
|
if x < math.MinInt32 {
|
||||||
|
return math.MinInt32
|
||||||
|
}
|
||||||
|
if x > math.MaxInt32 {
|
||||||
|
return math.MaxInt32
|
||||||
|
}
|
||||||
|
return int32(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
func timeToTimestamp(t time.Time) *timestamp.Timestamp {
|
||||||
|
nanoTime := t.UnixNano()
|
||||||
|
return ×tamp.Timestamp{
|
||||||
|
Seconds: nanoTime / 1e9,
|
||||||
|
Nanos: int32(nanoTime % 1e9),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ocSpanKindToProtoSpanKind(kind int) tracepb.Span_SpanKind {
|
||||||
|
switch kind {
|
||||||
|
case trace.SpanKindClient:
|
||||||
|
return tracepb.Span_CLIENT
|
||||||
|
case trace.SpanKindServer:
|
||||||
|
return tracepb.Span_SERVER
|
||||||
|
default:
|
||||||
|
return tracepb.Span_SPAN_KIND_UNSPECIFIED
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ocTracestateToProtoTracestate(ts *tracestate.Tracestate) *tracepb.Span_Tracestate {
|
||||||
|
if ts == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return &tracepb.Span_Tracestate{
|
||||||
|
Entries: ocTracestateEntriesToProtoTracestateEntries(ts.Entries()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ocTracestateEntriesToProtoTracestateEntries(entries []tracestate.Entry) []*tracepb.Span_Tracestate_Entry {
|
||||||
|
protoEntries := make([]*tracepb.Span_Tracestate_Entry, 0, len(entries))
|
||||||
|
for _, entry := range entries {
|
||||||
|
protoEntries = append(protoEntries, &tracepb.Span_Tracestate_Entry{
|
||||||
|
Key: entry.Key,
|
||||||
|
Value: entry.Value,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return protoEntries
|
||||||
|
}
|
274
vendor/contrib.go.opencensus.io/exporter/ocagent/transform_stats_to_metrics.go
generated
vendored
Normal file
274
vendor/contrib.go.opencensus.io/exporter/ocagent/transform_stats_to_metrics.go
generated
vendored
Normal file
|
@ -0,0 +1,274 @@
|
||||||
|
// Copyright 2018, OpenCensus Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package ocagent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"go.opencensus.io/stats"
|
||||||
|
"go.opencensus.io/stats/view"
|
||||||
|
"go.opencensus.io/tag"
|
||||||
|
|
||||||
|
"github.com/golang/protobuf/ptypes/timestamp"
|
||||||
|
|
||||||
|
metricspb "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
errNilMeasure = errors.New("expecting a non-nil stats.Measure")
|
||||||
|
errNilView = errors.New("expecting a non-nil view.View")
|
||||||
|
errNilViewData = errors.New("expecting a non-nil view.Data")
|
||||||
|
)
|
||||||
|
|
||||||
|
func viewDataToMetric(vd *view.Data) (*metricspb.Metric, error) {
|
||||||
|
if vd == nil {
|
||||||
|
return nil, errNilViewData
|
||||||
|
}
|
||||||
|
|
||||||
|
descriptor, err := viewToMetricDescriptor(vd.View)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
timeseries, err := viewDataToTimeseries(vd)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
metric := &metricspb.Metric{
|
||||||
|
MetricDescriptor: descriptor,
|
||||||
|
Timeseries: timeseries,
|
||||||
|
}
|
||||||
|
return metric, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func viewToMetricDescriptor(v *view.View) (*metricspb.MetricDescriptor, error) {
|
||||||
|
if v == nil {
|
||||||
|
return nil, errNilView
|
||||||
|
}
|
||||||
|
if v.Measure == nil {
|
||||||
|
return nil, errNilMeasure
|
||||||
|
}
|
||||||
|
|
||||||
|
desc := &metricspb.MetricDescriptor{
|
||||||
|
Name: stringOrCall(v.Name, v.Measure.Name),
|
||||||
|
Description: stringOrCall(v.Description, v.Measure.Description),
|
||||||
|
Unit: v.Measure.Unit(),
|
||||||
|
Type: aggregationToMetricDescriptorType(v),
|
||||||
|
LabelKeys: tagKeysToLabelKeys(v.TagKeys),
|
||||||
|
}
|
||||||
|
return desc, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func stringOrCall(first string, call func() string) string {
|
||||||
|
if first != "" {
|
||||||
|
return first
|
||||||
|
}
|
||||||
|
return call()
|
||||||
|
}
|
||||||
|
|
||||||
|
type measureType uint
|
||||||
|
|
||||||
|
const (
|
||||||
|
measureUnknown measureType = iota
|
||||||
|
measureInt64
|
||||||
|
measureFloat64
|
||||||
|
)
|
||||||
|
|
||||||
|
func measureTypeFromMeasure(m stats.Measure) measureType {
|
||||||
|
switch m.(type) {
|
||||||
|
default:
|
||||||
|
return measureUnknown
|
||||||
|
case *stats.Float64Measure:
|
||||||
|
return measureFloat64
|
||||||
|
case *stats.Int64Measure:
|
||||||
|
return measureInt64
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func aggregationToMetricDescriptorType(v *view.View) metricspb.MetricDescriptor_Type {
|
||||||
|
if v == nil || v.Aggregation == nil {
|
||||||
|
return metricspb.MetricDescriptor_UNSPECIFIED
|
||||||
|
}
|
||||||
|
if v.Measure == nil {
|
||||||
|
return metricspb.MetricDescriptor_UNSPECIFIED
|
||||||
|
}
|
||||||
|
|
||||||
|
switch v.Aggregation.Type {
|
||||||
|
case view.AggTypeCount:
|
||||||
|
// Cumulative on int64
|
||||||
|
return metricspb.MetricDescriptor_CUMULATIVE_INT64
|
||||||
|
|
||||||
|
case view.AggTypeDistribution:
|
||||||
|
// Cumulative types
|
||||||
|
return metricspb.MetricDescriptor_CUMULATIVE_DISTRIBUTION
|
||||||
|
|
||||||
|
case view.AggTypeLastValue:
|
||||||
|
// Gauge types
|
||||||
|
switch measureTypeFromMeasure(v.Measure) {
|
||||||
|
case measureFloat64:
|
||||||
|
return metricspb.MetricDescriptor_GAUGE_DOUBLE
|
||||||
|
case measureInt64:
|
||||||
|
return metricspb.MetricDescriptor_GAUGE_INT64
|
||||||
|
}
|
||||||
|
|
||||||
|
case view.AggTypeSum:
|
||||||
|
// Cumulative types
|
||||||
|
switch measureTypeFromMeasure(v.Measure) {
|
||||||
|
case measureFloat64:
|
||||||
|
return metricspb.MetricDescriptor_CUMULATIVE_DOUBLE
|
||||||
|
case measureInt64:
|
||||||
|
return metricspb.MetricDescriptor_CUMULATIVE_INT64
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// For all other cases, return unspecified.
|
||||||
|
return metricspb.MetricDescriptor_UNSPECIFIED
|
||||||
|
}
|
||||||
|
|
||||||
|
func tagKeysToLabelKeys(tagKeys []tag.Key) []*metricspb.LabelKey {
|
||||||
|
labelKeys := make([]*metricspb.LabelKey, 0, len(tagKeys))
|
||||||
|
for _, tagKey := range tagKeys {
|
||||||
|
labelKeys = append(labelKeys, &metricspb.LabelKey{
|
||||||
|
Key: tagKey.Name(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return labelKeys
|
||||||
|
}
|
||||||
|
|
||||||
|
func viewDataToTimeseries(vd *view.Data) ([]*metricspb.TimeSeries, error) {
|
||||||
|
if vd == nil || len(vd.Rows) == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Given that view.Data only contains Start, End
|
||||||
|
// the timestamps for all the row data will be the exact same
|
||||||
|
// per aggregation. However, the values will differ.
|
||||||
|
// Each row has its own tags.
|
||||||
|
startTimestamp := timeToProtoTimestamp(vd.Start)
|
||||||
|
endTimestamp := timeToProtoTimestamp(vd.End)
|
||||||
|
|
||||||
|
mType := measureTypeFromMeasure(vd.View.Measure)
|
||||||
|
timeseries := make([]*metricspb.TimeSeries, 0, len(vd.Rows))
|
||||||
|
// It is imperative that the ordering of "LabelValues" matches those
|
||||||
|
// of the Label keys in the metric descriptor.
|
||||||
|
for _, row := range vd.Rows {
|
||||||
|
labelValues := labelValuesFromTags(row.Tags)
|
||||||
|
point := rowToPoint(vd.View, row, endTimestamp, mType)
|
||||||
|
timeseries = append(timeseries, &metricspb.TimeSeries{
|
||||||
|
StartTimestamp: startTimestamp,
|
||||||
|
LabelValues: labelValues,
|
||||||
|
Points: []*metricspb.Point{point},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(timeseries) == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return timeseries, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func timeToProtoTimestamp(t time.Time) *timestamp.Timestamp {
|
||||||
|
unixNano := t.UnixNano()
|
||||||
|
return ×tamp.Timestamp{
|
||||||
|
Seconds: int64(unixNano / 1e9),
|
||||||
|
Nanos: int32(unixNano % 1e9),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func rowToPoint(v *view.View, row *view.Row, endTimestamp *timestamp.Timestamp, mType measureType) *metricspb.Point {
|
||||||
|
pt := &metricspb.Point{
|
||||||
|
Timestamp: endTimestamp,
|
||||||
|
}
|
||||||
|
|
||||||
|
switch data := row.Data.(type) {
|
||||||
|
case *view.CountData:
|
||||||
|
pt.Value = &metricspb.Point_Int64Value{Int64Value: data.Value}
|
||||||
|
|
||||||
|
case *view.DistributionData:
|
||||||
|
pt.Value = &metricspb.Point_DistributionValue{
|
||||||
|
DistributionValue: &metricspb.DistributionValue{
|
||||||
|
Count: data.Count,
|
||||||
|
Sum: float64(data.Count) * data.Mean, // because Mean := Sum/Count
|
||||||
|
// TODO: Add Exemplar
|
||||||
|
Buckets: bucketsToProtoBuckets(data.CountPerBucket),
|
||||||
|
BucketOptions: &metricspb.DistributionValue_BucketOptions{
|
||||||
|
Type: &metricspb.DistributionValue_BucketOptions_Explicit_{
|
||||||
|
Explicit: &metricspb.DistributionValue_BucketOptions_Explicit{
|
||||||
|
Bounds: v.Aggregation.Buckets,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
SumOfSquaredDeviation: data.SumOfSquaredDev,
|
||||||
|
}}
|
||||||
|
|
||||||
|
case *view.LastValueData:
|
||||||
|
setPointValue(pt, data.Value, mType)
|
||||||
|
|
||||||
|
case *view.SumData:
|
||||||
|
setPointValue(pt, data.Value, mType)
|
||||||
|
}
|
||||||
|
|
||||||
|
return pt
|
||||||
|
}
|
||||||
|
|
||||||
|
// Not returning anything from this function because metricspb.Point.is_Value is an unexported
|
||||||
|
// interface hence we just have to set its value by pointer.
|
||||||
|
func setPointValue(pt *metricspb.Point, value float64, mType measureType) {
|
||||||
|
if mType == measureInt64 {
|
||||||
|
pt.Value = &metricspb.Point_Int64Value{Int64Value: int64(value)}
|
||||||
|
} else {
|
||||||
|
pt.Value = &metricspb.Point_DoubleValue{DoubleValue: value}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func bucketsToProtoBuckets(countPerBucket []int64) []*metricspb.DistributionValue_Bucket {
|
||||||
|
distBuckets := make([]*metricspb.DistributionValue_Bucket, len(countPerBucket))
|
||||||
|
for i := 0; i < len(countPerBucket); i++ {
|
||||||
|
count := countPerBucket[i]
|
||||||
|
|
||||||
|
distBuckets[i] = &metricspb.DistributionValue_Bucket{
|
||||||
|
Count: count,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return distBuckets
|
||||||
|
}
|
||||||
|
|
||||||
|
func labelValuesFromTags(tags []tag.Tag) []*metricspb.LabelValue {
|
||||||
|
if len(tags) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
labelValues := make([]*metricspb.LabelValue, 0, len(tags))
|
||||||
|
for _, tag_ := range tags {
|
||||||
|
labelValues = append(labelValues, &metricspb.LabelValue{
|
||||||
|
Value: tag_.Value,
|
||||||
|
|
||||||
|
// It is imperative that we set the "HasValue" attribute,
|
||||||
|
// in order to distinguish missing a label from the empty string.
|
||||||
|
// https://godoc.org/github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1#LabelValue.HasValue
|
||||||
|
//
|
||||||
|
// OpenCensus-Go uses non-pointers for tags as seen by this function's arguments,
|
||||||
|
// so the best case that we can use to distinguish missing labels/tags from the
|
||||||
|
// empty string is by checking if the Tag.Key.Name() != "" to indicate that we have
|
||||||
|
// a value.
|
||||||
|
HasValue: tag_.Key.Name() != "",
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return labelValues
|
||||||
|
}
|
|
@ -12,17 +12,6 @@
|
||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
package internal // import "go.opencensus.io/stats/internal"
|
package ocagent
|
||||||
|
|
||||||
const (
|
const Version = "0.0.1"
|
||||||
MaxNameLength = 255
|
|
||||||
)
|
|
||||||
|
|
||||||
func IsPrintable(str string) bool {
|
|
||||||
for _, r := range str {
|
|
||||||
if !(r >= ' ' && r <= '~') {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
|
@ -1134,7 +1134,7 @@ type DataDisk struct {
|
||||||
WriteAcceleratorEnabled *bool `json:"writeAcceleratorEnabled,omitempty"`
|
WriteAcceleratorEnabled *bool `json:"writeAcceleratorEnabled,omitempty"`
|
||||||
// CreateOption - Specifies how the virtual machine should be created.<br><br> Possible values are:<br><br> **Attach** \u2013 This value is used when you are using a specialized disk to create the virtual machine.<br><br> **FromImage** \u2013 This value is used when you are using an image to create the virtual machine. If you are using a platform image, you also use the imageReference element described above. If you are using a marketplace image, you also use the plan element previously described. Possible values include: 'FromImage', 'Empty', 'Attach'
|
// CreateOption - Specifies how the virtual machine should be created.<br><br> Possible values are:<br><br> **Attach** \u2013 This value is used when you are using a specialized disk to create the virtual machine.<br><br> **FromImage** \u2013 This value is used when you are using an image to create the virtual machine. If you are using a platform image, you also use the imageReference element described above. If you are using a marketplace image, you also use the plan element previously described. Possible values include: 'FromImage', 'Empty', 'Attach'
|
||||||
CreateOption DiskCreateOptionTypes `json:"createOption,omitempty"`
|
CreateOption DiskCreateOptionTypes `json:"createOption,omitempty"`
|
||||||
// DiskSizeGB - Specifies the size of an empty data disk in gigabytes. This element can be used to overwrite the size of the disk in a virtual machine image. <br><br> This value cannot be larger than 1023 GB
|
// DiskSizeGB - Specifies the size of an empty data disk in gigabytes. This element can be used to overwrite the name of the disk in a virtual machine image. <br><br> This value cannot be larger than 1023 GB
|
||||||
DiskSizeGB *int32 `json:"diskSizeGB,omitempty"`
|
DiskSizeGB *int32 `json:"diskSizeGB,omitempty"`
|
||||||
// ManagedDisk - The managed disk parameters.
|
// ManagedDisk - The managed disk parameters.
|
||||||
ManagedDisk *ManagedDiskParameters `json:"managedDisk,omitempty"`
|
ManagedDisk *ManagedDiskParameters `json:"managedDisk,omitempty"`
|
||||||
|
@ -2077,7 +2077,7 @@ type OSDisk struct {
|
||||||
WriteAcceleratorEnabled *bool `json:"writeAcceleratorEnabled,omitempty"`
|
WriteAcceleratorEnabled *bool `json:"writeAcceleratorEnabled,omitempty"`
|
||||||
// CreateOption - Specifies how the virtual machine should be created.<br><br> Possible values are:<br><br> **Attach** \u2013 This value is used when you are using a specialized disk to create the virtual machine.<br><br> **FromImage** \u2013 This value is used when you are using an image to create the virtual machine. If you are using a platform image, you also use the imageReference element described above. If you are using a marketplace image, you also use the plan element previously described. Possible values include: 'FromImage', 'Empty', 'Attach'
|
// CreateOption - Specifies how the virtual machine should be created.<br><br> Possible values are:<br><br> **Attach** \u2013 This value is used when you are using a specialized disk to create the virtual machine.<br><br> **FromImage** \u2013 This value is used when you are using an image to create the virtual machine. If you are using a platform image, you also use the imageReference element described above. If you are using a marketplace image, you also use the plan element previously described. Possible values include: 'FromImage', 'Empty', 'Attach'
|
||||||
CreateOption DiskCreateOptionTypes `json:"createOption,omitempty"`
|
CreateOption DiskCreateOptionTypes `json:"createOption,omitempty"`
|
||||||
// DiskSizeGB - Specifies the size of an empty data disk in gigabytes. This element can be used to overwrite the size of the disk in a virtual machine image. <br><br> This value cannot be larger than 1023 GB
|
// DiskSizeGB - Specifies the size of an empty data disk in gigabytes. This element can be used to overwrite the name of the disk in a virtual machine image. <br><br> This value cannot be larger than 1023 GB
|
||||||
DiskSizeGB *int32 `json:"diskSizeGB,omitempty"`
|
DiskSizeGB *int32 `json:"diskSizeGB,omitempty"`
|
||||||
// ManagedDisk - The managed disk parameters.
|
// ManagedDisk - The managed disk parameters.
|
||||||
ManagedDisk *ManagedDiskParameters `json:"managedDisk,omitempty"`
|
ManagedDisk *ManagedDiskParameters `json:"managedDisk,omitempty"`
|
||||||
|
@ -3794,7 +3794,7 @@ type VirtualMachineProperties struct {
|
||||||
// ProvisioningState - The provisioning state, which only appears in the response.
|
// ProvisioningState - The provisioning state, which only appears in the response.
|
||||||
ProvisioningState *string `json:"provisioningState,omitempty"`
|
ProvisioningState *string `json:"provisioningState,omitempty"`
|
||||||
// InstanceView - The virtual machine instance view.
|
// InstanceView - The virtual machine instance view.
|
||||||
InstanceView *VirtualMachineInstanceView `json:"instanceView,omitempty"`
|
InstanceView *VirtualMachineScaleSetVMInstanceView `json:"instanceView,omitempty"`
|
||||||
// LicenseType - Specifies that the image or disk that is being used was licensed on-premises. This element is only used for images that contain the Windows Server operating system. <br><br> Possible values are: <br><br> Windows_Client <br><br> Windows_Server <br><br> If this element is included in a request for an update, the value must match the initial value. This value cannot be updated. <br><br> For more information, see [Azure Hybrid Use Benefit for Windows Server](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-hybrid-use-benefit-licensing?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json) <br><br> Minimum api-version: 2015-06-15
|
// LicenseType - Specifies that the image or disk that is being used was licensed on-premises. This element is only used for images that contain the Windows Server operating system. <br><br> Possible values are: <br><br> Windows_Client <br><br> Windows_Server <br><br> If this element is included in a request for an update, the value must match the initial value. This value cannot be updated. <br><br> For more information, see [Azure Hybrid Use Benefit for Windows Server](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-hybrid-use-benefit-licensing?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json) <br><br> Minimum api-version: 2015-06-15
|
||||||
LicenseType *string `json:"licenseType,omitempty"`
|
LicenseType *string `json:"licenseType,omitempty"`
|
||||||
// VMID - Specifies the VM unique ID which is a 128-bits identifier that is encoded and stored in all Azure IaaS VMs SMBIOS and can be read using platform BIOS commands.
|
// VMID - Specifies the VM unique ID which is a 128-bits identifier that is encoded and stored in all Azure IaaS VMs SMBIOS and can be read using platform BIOS commands.
|
||||||
|
@ -3978,7 +3978,7 @@ type VirtualMachineScaleSetDataDisk struct {
|
||||||
WriteAcceleratorEnabled *bool `json:"writeAcceleratorEnabled,omitempty"`
|
WriteAcceleratorEnabled *bool `json:"writeAcceleratorEnabled,omitempty"`
|
||||||
// CreateOption - The create option. Possible values include: 'FromImage', 'Empty', 'Attach'
|
// CreateOption - The create option. Possible values include: 'FromImage', 'Empty', 'Attach'
|
||||||
CreateOption DiskCreateOptionTypes `json:"createOption,omitempty"`
|
CreateOption DiskCreateOptionTypes `json:"createOption,omitempty"`
|
||||||
// DiskSizeGB - Specifies the size of an empty data disk in gigabytes. This element can be used to overwrite the size of the disk in a virtual machine image. <br><br> This value cannot be larger than 1023 GB
|
// DiskSizeGB - Specifies the size of an empty data disk in gigabytes. This element can be used to overwrite the name of the disk in a virtual machine image. <br><br> This value cannot be larger than 1023 GB
|
||||||
DiskSizeGB *int32 `json:"diskSizeGB,omitempty"`
|
DiskSizeGB *int32 `json:"diskSizeGB,omitempty"`
|
||||||
// ManagedDisk - The managed disk parameters.
|
// ManagedDisk - The managed disk parameters.
|
||||||
ManagedDisk *VirtualMachineScaleSetManagedDiskParameters `json:"managedDisk,omitempty"`
|
ManagedDisk *VirtualMachineScaleSetManagedDiskParameters `json:"managedDisk,omitempty"`
|
||||||
|
@ -6205,7 +6205,7 @@ type VirtualMachineScaleSetVMProperties struct {
|
||||||
// VMID - Azure VM unique ID.
|
// VMID - Azure VM unique ID.
|
||||||
VMID *string `json:"vmId,omitempty"`
|
VMID *string `json:"vmId,omitempty"`
|
||||||
// InstanceView - The virtual machine instance view.
|
// InstanceView - The virtual machine instance view.
|
||||||
InstanceView *VirtualMachineScaleSetVMInstanceView `json:"instanceView,omitempty"`
|
InstanceView *VirtualMachineInstanceView `json:"instanceView,omitempty"`
|
||||||
// HardwareProfile - Specifies the hardware settings for the virtual machine.
|
// HardwareProfile - Specifies the hardware settings for the virtual machine.
|
||||||
HardwareProfile *HardwareProfile `json:"hardwareProfile,omitempty"`
|
HardwareProfile *HardwareProfile `json:"hardwareProfile,omitempty"`
|
||||||
// StorageProfile - Specifies the storage settings for the virtual machine disks.
|
// StorageProfile - Specifies the storage settings for the virtual machine disks.
|
||||||
|
|
2037
vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/models.go
generated
vendored
2037
vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/models.go
generated
vendored
File diff suppressed because it is too large
Load Diff
176
vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/oauth2.go
generated
vendored
176
vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/oauth2.go
generated
vendored
|
@ -1,176 +0,0 @@
|
||||||
package graphrbac
|
|
||||||
|
|
||||||
// Copyright (c) Microsoft and contributors. All rights reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
//
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
//
|
|
||||||
// Code generated by Microsoft (R) AutoRest Code Generator.
|
|
||||||
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"github.com/Azure/go-autorest/autorest"
|
|
||||||
"github.com/Azure/go-autorest/autorest/azure"
|
|
||||||
"net/http"
|
|
||||||
)
|
|
||||||
|
|
||||||
// OAuth2Client is the the Graph RBAC Management Client
|
|
||||||
type OAuth2Client struct {
|
|
||||||
BaseClient
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewOAuth2Client creates an instance of the OAuth2Client client.
|
|
||||||
func NewOAuth2Client(tenantID string) OAuth2Client {
|
|
||||||
return NewOAuth2ClientWithBaseURI(DefaultBaseURI, tenantID)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewOAuth2ClientWithBaseURI creates an instance of the OAuth2Client client.
|
|
||||||
func NewOAuth2ClientWithBaseURI(baseURI string, tenantID string) OAuth2Client {
|
|
||||||
return OAuth2Client{NewWithBaseURI(baseURI, tenantID)}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get queries OAuth2 permissions for the relevant SP ObjectId of an app.
|
|
||||||
// Parameters:
|
|
||||||
// filter - this is the Service Principal ObjectId associated with the app
|
|
||||||
func (client OAuth2Client) Get(ctx context.Context, filter string) (result Permissions, err error) {
|
|
||||||
req, err := client.GetPreparer(ctx, filter)
|
|
||||||
if err != nil {
|
|
||||||
err = autorest.NewErrorWithError(err, "graphrbac.OAuth2Client", "Get", nil, "Failure preparing request")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
resp, err := client.GetSender(req)
|
|
||||||
if err != nil {
|
|
||||||
result.Response = autorest.Response{Response: resp}
|
|
||||||
err = autorest.NewErrorWithError(err, "graphrbac.OAuth2Client", "Get", resp, "Failure sending request")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
result, err = client.GetResponder(resp)
|
|
||||||
if err != nil {
|
|
||||||
err = autorest.NewErrorWithError(err, "graphrbac.OAuth2Client", "Get", resp, "Failure responding to request")
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetPreparer prepares the Get request.
|
|
||||||
func (client OAuth2Client) GetPreparer(ctx context.Context, filter string) (*http.Request, error) {
|
|
||||||
pathParameters := map[string]interface{}{
|
|
||||||
"tenantID": autorest.Encode("path", client.TenantID),
|
|
||||||
}
|
|
||||||
|
|
||||||
const APIVersion = "1.6"
|
|
||||||
queryParameters := map[string]interface{}{
|
|
||||||
"api-version": APIVersion,
|
|
||||||
}
|
|
||||||
if len(filter) > 0 {
|
|
||||||
queryParameters["$filter"] = autorest.Encode("query", filter)
|
|
||||||
}
|
|
||||||
|
|
||||||
preparer := autorest.CreatePreparer(
|
|
||||||
autorest.AsGet(),
|
|
||||||
autorest.WithBaseURL(client.BaseURI),
|
|
||||||
autorest.WithPathParameters("/{tenantID}/oauth2PermissionGrants", pathParameters),
|
|
||||||
autorest.WithQueryParameters(queryParameters))
|
|
||||||
return preparer.Prepare((&http.Request{}).WithContext(ctx))
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetSender sends the Get request. The method will close the
|
|
||||||
// http.Response Body if it receives an error.
|
|
||||||
func (client OAuth2Client) GetSender(req *http.Request) (*http.Response, error) {
|
|
||||||
return autorest.SendWithSender(client, req,
|
|
||||||
autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetResponder handles the response to the Get request. The method always
|
|
||||||
// closes the http.Response Body.
|
|
||||||
func (client OAuth2Client) GetResponder(resp *http.Response) (result Permissions, err error) {
|
|
||||||
err = autorest.Respond(
|
|
||||||
resp,
|
|
||||||
client.ByInspecting(),
|
|
||||||
azure.WithErrorUnlessStatusCode(http.StatusOK),
|
|
||||||
autorest.ByUnmarshallingJSON(&result),
|
|
||||||
autorest.ByClosing())
|
|
||||||
result.Response = autorest.Response{Response: resp}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Post grants OAuth2 permissions for the relevant resource Ids of an app.
|
|
||||||
// Parameters:
|
|
||||||
// body - the relevant app Service Principal Object Id and the Service Principal Objecit Id you want to grant.
|
|
||||||
func (client OAuth2Client) Post(ctx context.Context, body *Permissions) (result Permissions, err error) {
|
|
||||||
req, err := client.PostPreparer(ctx, body)
|
|
||||||
if err != nil {
|
|
||||||
err = autorest.NewErrorWithError(err, "graphrbac.OAuth2Client", "Post", nil, "Failure preparing request")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
resp, err := client.PostSender(req)
|
|
||||||
if err != nil {
|
|
||||||
result.Response = autorest.Response{Response: resp}
|
|
||||||
err = autorest.NewErrorWithError(err, "graphrbac.OAuth2Client", "Post", resp, "Failure sending request")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
result, err = client.PostResponder(resp)
|
|
||||||
if err != nil {
|
|
||||||
err = autorest.NewErrorWithError(err, "graphrbac.OAuth2Client", "Post", resp, "Failure responding to request")
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// PostPreparer prepares the Post request.
|
|
||||||
func (client OAuth2Client) PostPreparer(ctx context.Context, body *Permissions) (*http.Request, error) {
|
|
||||||
pathParameters := map[string]interface{}{
|
|
||||||
"tenantID": autorest.Encode("path", client.TenantID),
|
|
||||||
}
|
|
||||||
|
|
||||||
const APIVersion = "1.6"
|
|
||||||
queryParameters := map[string]interface{}{
|
|
||||||
"api-version": APIVersion,
|
|
||||||
}
|
|
||||||
|
|
||||||
preparer := autorest.CreatePreparer(
|
|
||||||
autorest.AsContentType("application/json; charset=utf-8"),
|
|
||||||
autorest.AsPost(),
|
|
||||||
autorest.WithBaseURL(client.BaseURI),
|
|
||||||
autorest.WithPathParameters("/{tenantID}/oauth2PermissionGrants", pathParameters),
|
|
||||||
autorest.WithQueryParameters(queryParameters))
|
|
||||||
if body != nil {
|
|
||||||
preparer = autorest.DecoratePreparer(preparer,
|
|
||||||
autorest.WithJSON(body))
|
|
||||||
}
|
|
||||||
return preparer.Prepare((&http.Request{}).WithContext(ctx))
|
|
||||||
}
|
|
||||||
|
|
||||||
// PostSender sends the Post request. The method will close the
|
|
||||||
// http.Response Body if it receives an error.
|
|
||||||
func (client OAuth2Client) PostSender(req *http.Request) (*http.Response, error) {
|
|
||||||
return autorest.SendWithSender(client, req,
|
|
||||||
autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
|
|
||||||
}
|
|
||||||
|
|
||||||
// PostResponder handles the response to the Post request. The method always
|
|
||||||
// closes the http.Response Body.
|
|
||||||
func (client OAuth2Client) PostResponder(resp *http.Response) (result Permissions, err error) {
|
|
||||||
err = autorest.Respond(
|
|
||||||
resp,
|
|
||||||
client.ByInspecting(),
|
|
||||||
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
|
|
||||||
autorest.ByUnmarshallingJSON(&result),
|
|
||||||
autorest.ByClosing())
|
|
||||||
result.Response = autorest.Response{Response: resp}
|
|
||||||
return
|
|
||||||
}
|
|
|
@ -178,7 +178,7 @@ func (client ServicePrincipalsClient) DeleteResponder(resp *http.Response) (resu
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get gets service principal information from the directory. Query by objectId or pass a filter to query by appId
|
// Get gets service principal information from the directory.
|
||||||
// Parameters:
|
// Parameters:
|
||||||
// objectID - the object ID of the service principal to get.
|
// objectID - the object ID of the service principal to get.
|
||||||
func (client ServicePrincipalsClient) Get(ctx context.Context, objectID string) (result ServicePrincipal, err error) {
|
func (client ServicePrincipalsClient) Get(ctx context.Context, objectID string) (result ServicePrincipal, err error) {
|
||||||
|
|
|
@ -21,7 +21,6 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"github.com/Azure/go-autorest/autorest"
|
"github.com/Azure/go-autorest/autorest"
|
||||||
"github.com/Azure/go-autorest/autorest/azure"
|
"github.com/Azure/go-autorest/autorest/azure"
|
||||||
"github.com/Azure/go-autorest/autorest/validation"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -53,15 +52,6 @@ func NewRoleAssignmentsClientWithBaseURI(baseURI string, subscriptionID string)
|
||||||
// roleAssignmentName - the name of the role assignment to create. It can be any valid GUID.
|
// roleAssignmentName - the name of the role assignment to create. It can be any valid GUID.
|
||||||
// parameters - parameters for the role assignment.
|
// parameters - parameters for the role assignment.
|
||||||
func (client RoleAssignmentsClient) Create(ctx context.Context, scope string, roleAssignmentName string, parameters RoleAssignmentCreateParameters) (result RoleAssignment, err error) {
|
func (client RoleAssignmentsClient) Create(ctx context.Context, scope string, roleAssignmentName string, parameters RoleAssignmentCreateParameters) (result RoleAssignment, err error) {
|
||||||
if err := validation.Validate([]validation.Validation{
|
|
||||||
{TargetValue: parameters,
|
|
||||||
Constraints: []validation.Constraint{{Target: "parameters.RoleAssignmentProperties", Name: validation.Null, Rule: true,
|
|
||||||
Chain: []validation.Constraint{{Target: "parameters.RoleAssignmentProperties.RoleDefinitionID", Name: validation.Null, Rule: true, Chain: nil},
|
|
||||||
{Target: "parameters.RoleAssignmentProperties.PrincipalID", Name: validation.Null, Rule: true, Chain: nil},
|
|
||||||
}}}}}); err != nil {
|
|
||||||
return result, validation.NewError("authorization.RoleAssignmentsClient", "Create", err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
req, err := client.CreatePreparer(ctx, scope, roleAssignmentName, parameters)
|
req, err := client.CreatePreparer(ctx, scope, roleAssignmentName, parameters)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = autorest.NewErrorWithError(err, "authorization.RoleAssignmentsClient", "Create", nil, "Failure preparing request")
|
err = autorest.NewErrorWithError(err, "authorization.RoleAssignmentsClient", "Create", nil, "Failure preparing request")
|
||||||
|
@ -130,15 +120,6 @@ func (client RoleAssignmentsClient) CreateResponder(resp *http.Response) (result
|
||||||
// roleID - the ID of the role assignment to create.
|
// roleID - the ID of the role assignment to create.
|
||||||
// parameters - parameters for the role assignment.
|
// parameters - parameters for the role assignment.
|
||||||
func (client RoleAssignmentsClient) CreateByID(ctx context.Context, roleID string, parameters RoleAssignmentCreateParameters) (result RoleAssignment, err error) {
|
func (client RoleAssignmentsClient) CreateByID(ctx context.Context, roleID string, parameters RoleAssignmentCreateParameters) (result RoleAssignment, err error) {
|
||||||
if err := validation.Validate([]validation.Validation{
|
|
||||||
{TargetValue: parameters,
|
|
||||||
Constraints: []validation.Constraint{{Target: "parameters.RoleAssignmentProperties", Name: validation.Null, Rule: true,
|
|
||||||
Chain: []validation.Constraint{{Target: "parameters.RoleAssignmentProperties.RoleDefinitionID", Name: validation.Null, Rule: true, Chain: nil},
|
|
||||||
{Target: "parameters.RoleAssignmentProperties.PrincipalID", Name: validation.Null, Rule: true, Chain: nil},
|
|
||||||
}}}}}); err != nil {
|
|
||||||
return result, validation.NewError("authorization.RoleAssignmentsClient", "CreateByID", err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
req, err := client.CreateByIDPreparer(ctx, roleID, parameters)
|
req, err := client.CreateByIDPreparer(ctx, roleID, parameters)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = autorest.NewErrorWithError(err, "authorization.RoleAssignmentsClient", "CreateByID", nil, "Failure preparing request")
|
err = autorest.NewErrorWithError(err, "authorization.RoleAssignmentsClient", "CreateByID", nil, "Failure preparing request")
|
||||||
|
|
|
@ -169,7 +169,7 @@ func (q *Queue) GetMetadata(options *QueueServiceOptions) error {
|
||||||
params = addTimeout(params, options.Timeout)
|
params = addTimeout(params, options.Timeout)
|
||||||
headers = mergeHeaders(headers, headersFromStruct(*options))
|
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||||
}
|
}
|
||||||
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params)
|
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), url.Values{"comp": {"metadata"}})
|
||||||
|
|
||||||
resp, err := q.qsc.client.exec(http.MethodGet, uri, headers, nil, q.qsc.auth)
|
resp, err := q.qsc.client.exec(http.MethodGet, uri, headers, nil, q.qsc.auth)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -18,4 +18,4 @@ package version
|
||||||
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
|
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
|
||||||
|
|
||||||
// Number contains the semantic version of this SDK.
|
// Number contains the semantic version of this SDK.
|
||||||
const Number = "v21.2.0"
|
const Number = "v19.1.0"
|
||||||
|
|
|
@ -34,7 +34,7 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/Azure/go-autorest/autorest/date"
|
"github.com/Azure/go-autorest/autorest/date"
|
||||||
"github.com/Azure/go-autorest/version"
|
"github.com/Azure/go-autorest/tracing"
|
||||||
"github.com/dgrijalva/jwt-go"
|
"github.com/dgrijalva/jwt-go"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -385,8 +385,13 @@ func (spt *ServicePrincipalToken) UnmarshalJSON(data []byte) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
// Don't override the refreshLock or the sender if those have been already set.
|
||||||
|
if spt.refreshLock == nil {
|
||||||
spt.refreshLock = &sync.RWMutex{}
|
spt.refreshLock = &sync.RWMutex{}
|
||||||
spt.sender = &http.Client{}
|
}
|
||||||
|
if spt.sender == nil {
|
||||||
|
spt.sender = &http.Client{Transport: tracing.Transport}
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -433,7 +438,7 @@ func NewServicePrincipalTokenWithSecret(oauthConfig OAuthConfig, id string, reso
|
||||||
RefreshWithin: defaultRefresh,
|
RefreshWithin: defaultRefresh,
|
||||||
},
|
},
|
||||||
refreshLock: &sync.RWMutex{},
|
refreshLock: &sync.RWMutex{},
|
||||||
sender: &http.Client{},
|
sender: &http.Client{Transport: tracing.Transport},
|
||||||
refreshCallbacks: callbacks,
|
refreshCallbacks: callbacks,
|
||||||
}
|
}
|
||||||
return spt, nil
|
return spt, nil
|
||||||
|
@ -674,7 +679,7 @@ func newServicePrincipalTokenFromMSI(msiEndpoint, resource string, userAssignedI
|
||||||
RefreshWithin: defaultRefresh,
|
RefreshWithin: defaultRefresh,
|
||||||
},
|
},
|
||||||
refreshLock: &sync.RWMutex{},
|
refreshLock: &sync.RWMutex{},
|
||||||
sender: &http.Client{},
|
sender: &http.Client{Transport: tracing.Transport},
|
||||||
refreshCallbacks: callbacks,
|
refreshCallbacks: callbacks,
|
||||||
MaxMSIRefreshAttempts: defaultMaxMSIRefreshAttempts,
|
MaxMSIRefreshAttempts: defaultMaxMSIRefreshAttempts,
|
||||||
}
|
}
|
||||||
|
@ -791,7 +796,7 @@ func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("adal: Failed to build the refresh request. Error = '%v'", err)
|
return fmt.Errorf("adal: Failed to build the refresh request. Error = '%v'", err)
|
||||||
}
|
}
|
||||||
req.Header.Add("User-Agent", version.UserAgent())
|
req.Header.Add("User-Agent", UserAgent())
|
||||||
req = req.WithContext(ctx)
|
req = req.WithContext(ctx)
|
||||||
if !isIMDS(spt.inner.OauthConfig.TokenEndpoint) {
|
if !isIMDS(spt.inner.OauthConfig.TokenEndpoint) {
|
||||||
v := url.Values{}
|
v := url.Values{}
|
||||||
|
|
|
@ -1,4 +1,9 @@
|
||||||
package version
|
package adal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"runtime"
|
||||||
|
)
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
// Copyright 2017 Microsoft Corporation
|
||||||
//
|
//
|
||||||
|
@ -14,24 +19,27 @@ package version
|
||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
import (
|
const number = "v1.0.0"
|
||||||
"fmt"
|
|
||||||
"runtime"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Number contains the semantic version of this SDK.
|
|
||||||
const Number = "v11.1.1"
|
|
||||||
|
|
||||||
var (
|
var (
|
||||||
userAgent = fmt.Sprintf("Go/%s (%s-%s) go-autorest/%s",
|
ua = fmt.Sprintf("Go/%s (%s-%s) go-autorest/adal/%s",
|
||||||
runtime.Version(),
|
runtime.Version(),
|
||||||
runtime.GOARCH,
|
runtime.GOARCH,
|
||||||
runtime.GOOS,
|
runtime.GOOS,
|
||||||
Number,
|
number,
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
// UserAgent returns a string containing the Go version, system archityecture and OS, and the go-autorest version.
|
// UserAgent returns a string containing the Go version, system architecture and OS, and the adal version.
|
||||||
func UserAgent() string {
|
func UserAgent() string {
|
||||||
return userAgent
|
return ua
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddToUserAgent adds an extension to the current user agent
|
||||||
|
func AddToUserAgent(extension string) error {
|
||||||
|
if extension != "" {
|
||||||
|
ua = fmt.Sprintf("%s %s", ua, extension)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return fmt.Errorf("Extension was empty, User Agent remained as '%s'", ua)
|
||||||
}
|
}
|
|
@ -15,12 +15,14 @@ package autorest
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"encoding/base64"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/Azure/go-autorest/autorest/adal"
|
"github.com/Azure/go-autorest/autorest/adal"
|
||||||
|
"github.com/Azure/go-autorest/tracing"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -30,6 +32,8 @@ const (
|
||||||
apiKeyAuthorizerHeader = "Ocp-Apim-Subscription-Key"
|
apiKeyAuthorizerHeader = "Ocp-Apim-Subscription-Key"
|
||||||
bingAPISdkHeader = "X-BingApis-SDK-Client"
|
bingAPISdkHeader = "X-BingApis-SDK-Client"
|
||||||
golangBingAPISdkHeaderValue = "Go-SDK"
|
golangBingAPISdkHeaderValue = "Go-SDK"
|
||||||
|
authorization = "Authorization"
|
||||||
|
basic = "Basic"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Authorizer is the interface that provides a PrepareDecorator used to supply request
|
// Authorizer is the interface that provides a PrepareDecorator used to supply request
|
||||||
|
@ -68,7 +72,7 @@ func NewAPIKeyAuthorizer(headers map[string]interface{}, queryParameters map[str
|
||||||
return &APIKeyAuthorizer{headers: headers, queryParameters: queryParameters}
|
return &APIKeyAuthorizer{headers: headers, queryParameters: queryParameters}
|
||||||
}
|
}
|
||||||
|
|
||||||
// WithAuthorization returns a PrepareDecorator that adds an HTTP headers and Query Paramaters
|
// WithAuthorization returns a PrepareDecorator that adds an HTTP headers and Query Parameters.
|
||||||
func (aka *APIKeyAuthorizer) WithAuthorization() PrepareDecorator {
|
func (aka *APIKeyAuthorizer) WithAuthorization() PrepareDecorator {
|
||||||
return func(p Preparer) Preparer {
|
return func(p Preparer) Preparer {
|
||||||
return DecoratePreparer(p, WithHeaders(aka.headers), WithQueryParameters(aka.queryParameters))
|
return DecoratePreparer(p, WithHeaders(aka.headers), WithQueryParameters(aka.queryParameters))
|
||||||
|
@ -147,7 +151,7 @@ type BearerAuthorizerCallback struct {
|
||||||
// is invoked when the HTTP request is submitted.
|
// is invoked when the HTTP request is submitted.
|
||||||
func NewBearerAuthorizerCallback(sender Sender, callback BearerAuthorizerCallbackFunc) *BearerAuthorizerCallback {
|
func NewBearerAuthorizerCallback(sender Sender, callback BearerAuthorizerCallbackFunc) *BearerAuthorizerCallback {
|
||||||
if sender == nil {
|
if sender == nil {
|
||||||
sender = &http.Client{}
|
sender = &http.Client{Transport: tracing.Transport}
|
||||||
}
|
}
|
||||||
return &BearerAuthorizerCallback{sender: sender, callback: callback}
|
return &BearerAuthorizerCallback{sender: sender, callback: callback}
|
||||||
}
|
}
|
||||||
|
@ -257,3 +261,27 @@ func (egta EventGridKeyAuthorizer) WithAuthorization() PrepareDecorator {
|
||||||
}
|
}
|
||||||
return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization()
|
return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// BasicAuthorizer implements basic HTTP authorization by adding the Authorization HTTP header
|
||||||
|
// with the value "Basic <TOKEN>" where <TOKEN> is a base64-encoded username:password tuple.
|
||||||
|
type BasicAuthorizer struct {
|
||||||
|
userName string
|
||||||
|
password string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewBasicAuthorizer creates a new BasicAuthorizer with the specified username and password.
|
||||||
|
func NewBasicAuthorizer(userName, password string) *BasicAuthorizer {
|
||||||
|
return &BasicAuthorizer{
|
||||||
|
userName: userName,
|
||||||
|
password: password,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose
|
||||||
|
// value is "Basic " followed by the base64-encoded username:password tuple.
|
||||||
|
func (ba *BasicAuthorizer) WithAuthorization() PrepareDecorator {
|
||||||
|
headers := make(map[string]interface{})
|
||||||
|
headers[authorization] = basic + " " + base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%s:%s", ba.userName, ba.password)))
|
||||||
|
|
||||||
|
return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization()
|
||||||
|
}
|
||||||
|
|
|
@ -26,6 +26,7 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/Azure/go-autorest/autorest"
|
"github.com/Azure/go-autorest/autorest"
|
||||||
|
"github.com/Azure/go-autorest/tracing"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -86,7 +87,23 @@ func (f Future) PollingMethod() PollingMethodType {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Done queries the service to see if the operation has completed.
|
// Done queries the service to see if the operation has completed.
|
||||||
|
// Deprecated: Use DoneWithContext()
|
||||||
func (f *Future) Done(sender autorest.Sender) (bool, error) {
|
func (f *Future) Done(sender autorest.Sender) (bool, error) {
|
||||||
|
return f.DoneWithContext(context.Background(), sender)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DoneWithContext queries the service to see if the operation has completed.
|
||||||
|
func (f *Future) DoneWithContext(ctx context.Context, sender autorest.Sender) (done bool, err error) {
|
||||||
|
ctx = tracing.StartSpan(ctx, "github.com/Azure/go-autorest/autorest/azure/async.DoneWithContext")
|
||||||
|
defer func() {
|
||||||
|
sc := -1
|
||||||
|
resp := f.Response()
|
||||||
|
if resp != nil {
|
||||||
|
sc = resp.StatusCode
|
||||||
|
}
|
||||||
|
tracing.EndSpan(ctx, sc, err)
|
||||||
|
}()
|
||||||
|
|
||||||
// support for legacy Future implementation
|
// support for legacy Future implementation
|
||||||
if f.req != nil {
|
if f.req != nil {
|
||||||
resp, err := sender.Do(f.req)
|
resp, err := sender.Do(f.req)
|
||||||
|
@ -107,7 +124,7 @@ func (f *Future) Done(sender autorest.Sender) (bool, error) {
|
||||||
if f.pt.hasTerminated() {
|
if f.pt.hasTerminated() {
|
||||||
return true, f.pt.pollingError()
|
return true, f.pt.pollingError()
|
||||||
}
|
}
|
||||||
if err := f.pt.pollForStatus(sender); err != nil {
|
if err := f.pt.pollForStatus(ctx, sender); err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
if err := f.pt.checkForErrors(); err != nil {
|
if err := f.pt.checkForErrors(); err != nil {
|
||||||
|
@ -164,15 +181,31 @@ func (f Future) WaitForCompletion(ctx context.Context, client autorest.Client) e
|
||||||
// running operation has completed, the provided context is cancelled, or the client's
|
// running operation has completed, the provided context is cancelled, or the client's
|
||||||
// polling duration has been exceeded. It will retry failed polling attempts based on
|
// polling duration has been exceeded. It will retry failed polling attempts based on
|
||||||
// the retry value defined in the client up to the maximum retry attempts.
|
// the retry value defined in the client up to the maximum retry attempts.
|
||||||
func (f *Future) WaitForCompletionRef(ctx context.Context, client autorest.Client) error {
|
// If no deadline is specified in the context then the client.PollingDuration will be
|
||||||
if d := client.PollingDuration; d != 0 {
|
// used to determine if a default deadline should be used.
|
||||||
|
// If PollingDuration is greater than zero the value will be used as the context's timeout.
|
||||||
|
// If PollingDuration is zero then no default deadline will be used.
|
||||||
|
func (f *Future) WaitForCompletionRef(ctx context.Context, client autorest.Client) (err error) {
|
||||||
|
ctx = tracing.StartSpan(ctx, "github.com/Azure/go-autorest/autorest/azure/async.WaitForCompletionRef")
|
||||||
|
defer func() {
|
||||||
|
sc := -1
|
||||||
|
resp := f.Response()
|
||||||
|
if resp != nil {
|
||||||
|
sc = resp.StatusCode
|
||||||
|
}
|
||||||
|
tracing.EndSpan(ctx, sc, err)
|
||||||
|
}()
|
||||||
|
cancelCtx := ctx
|
||||||
|
// if the provided context already has a deadline don't override it
|
||||||
|
_, hasDeadline := ctx.Deadline()
|
||||||
|
if d := client.PollingDuration; !hasDeadline && d != 0 {
|
||||||
var cancel context.CancelFunc
|
var cancel context.CancelFunc
|
||||||
ctx, cancel = context.WithTimeout(ctx, d)
|
cancelCtx, cancel = context.WithTimeout(ctx, d)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
}
|
}
|
||||||
|
|
||||||
done, err := f.Done(client)
|
done, err := f.DoneWithContext(ctx, client)
|
||||||
for attempts := 0; !done; done, err = f.Done(client) {
|
for attempts := 0; !done; done, err = f.DoneWithContext(ctx, client) {
|
||||||
if attempts >= client.RetryAttempts {
|
if attempts >= client.RetryAttempts {
|
||||||
return autorest.NewErrorWithError(err, "Future", "WaitForCompletion", f.pt.latestResponse(), "the number of retries has been exceeded")
|
return autorest.NewErrorWithError(err, "Future", "WaitForCompletion", f.pt.latestResponse(), "the number of retries has been exceeded")
|
||||||
}
|
}
|
||||||
|
@ -196,12 +229,12 @@ func (f *Future) WaitForCompletionRef(ctx context.Context, client autorest.Clien
|
||||||
attempts++
|
attempts++
|
||||||
}
|
}
|
||||||
// wait until the delay elapses or the context is cancelled
|
// wait until the delay elapses or the context is cancelled
|
||||||
delayElapsed := autorest.DelayForBackoff(delay, delayAttempt, ctx.Done())
|
delayElapsed := autorest.DelayForBackoff(delay, delayAttempt, cancelCtx.Done())
|
||||||
if !delayElapsed {
|
if !delayElapsed {
|
||||||
return autorest.NewErrorWithError(ctx.Err(), "Future", "WaitForCompletion", f.pt.latestResponse(), "context has been cancelled")
|
return autorest.NewErrorWithError(cancelCtx.Err(), "Future", "WaitForCompletion", f.pt.latestResponse(), "context has been cancelled")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return err
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// MarshalJSON implements the json.Marshaler interface.
|
// MarshalJSON implements the json.Marshaler interface.
|
||||||
|
@ -286,7 +319,7 @@ type pollingTracker interface {
|
||||||
initializeState() error
|
initializeState() error
|
||||||
|
|
||||||
// makes an HTTP request to check the status of the LRO
|
// makes an HTTP request to check the status of the LRO
|
||||||
pollForStatus(sender autorest.Sender) error
|
pollForStatus(ctx context.Context, sender autorest.Sender) error
|
||||||
|
|
||||||
// updates internal tracker state, call this after each call to pollForStatus
|
// updates internal tracker state, call this after each call to pollForStatus
|
||||||
updatePollingState(provStateApl bool) error
|
updatePollingState(provStateApl bool) error
|
||||||
|
@ -400,6 +433,10 @@ func (pt *pollingTrackerBase) updateRawBody() error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return autorest.NewErrorWithError(err, "pollingTrackerBase", "updateRawBody", nil, "failed to read response body")
|
return autorest.NewErrorWithError(err, "pollingTrackerBase", "updateRawBody", nil, "failed to read response body")
|
||||||
}
|
}
|
||||||
|
// observed in 204 responses over HTTP/2.0; the content length is -1 but body is empty
|
||||||
|
if len(b) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
// put the body back so it's available to other callers
|
// put the body back so it's available to other callers
|
||||||
pt.resp.Body = ioutil.NopCloser(bytes.NewReader(b))
|
pt.resp.Body = ioutil.NopCloser(bytes.NewReader(b))
|
||||||
if err = json.Unmarshal(b, &pt.rawBody); err != nil {
|
if err = json.Unmarshal(b, &pt.rawBody); err != nil {
|
||||||
|
@ -409,15 +446,13 @@ func (pt *pollingTrackerBase) updateRawBody() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pt *pollingTrackerBase) pollForStatus(sender autorest.Sender) error {
|
func (pt *pollingTrackerBase) pollForStatus(ctx context.Context, sender autorest.Sender) error {
|
||||||
req, err := http.NewRequest(http.MethodGet, pt.URI, nil)
|
req, err := http.NewRequest(http.MethodGet, pt.URI, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed to create HTTP request")
|
return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed to create HTTP request")
|
||||||
}
|
}
|
||||||
// attach the context from the original request if available (it will be absent for deserialized futures)
|
|
||||||
if pt.resp != nil {
|
req = req.WithContext(ctx)
|
||||||
req = req.WithContext(pt.resp.Request.Context())
|
|
||||||
}
|
|
||||||
pt.resp, err = sender.Do(req)
|
pt.resp, err = sender.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed to send HTTP request")
|
return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed to send HTTP request")
|
||||||
|
@ -446,7 +481,7 @@ func (pt *pollingTrackerBase) updateErrorFromResponse() {
|
||||||
re := respErr{}
|
re := respErr{}
|
||||||
defer pt.resp.Body.Close()
|
defer pt.resp.Body.Close()
|
||||||
var b []byte
|
var b []byte
|
||||||
if b, err = ioutil.ReadAll(pt.resp.Body); err != nil {
|
if b, err = ioutil.ReadAll(pt.resp.Body); err != nil || len(b) == 0 {
|
||||||
goto Default
|
goto Default
|
||||||
}
|
}
|
||||||
if err = json.Unmarshal(b, &re); err != nil {
|
if err = json.Unmarshal(b, &re); err != nil {
|
||||||
|
@ -664,7 +699,7 @@ func (pt *pollingTrackerPatch) updatePollingMethod() error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary
|
// for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary
|
||||||
// note the absense of the "final GET" mechanism for PATCH
|
// note the absence of the "final GET" mechanism for PATCH
|
||||||
if pt.resp.StatusCode == http.StatusAccepted {
|
if pt.resp.StatusCode == http.StatusAccepted {
|
||||||
ao, err := getURLFromAsyncOpHeader(pt.resp)
|
ao, err := getURLFromAsyncOpHeader(pt.resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -795,8 +830,6 @@ func (pt *pollingTrackerPut) updatePollingMethod() error {
|
||||||
pt.URI = lh
|
pt.URI = lh
|
||||||
pt.Pm = PollingLocation
|
pt.Pm = PollingLocation
|
||||||
}
|
}
|
||||||
// when both headers are returned we use the value in the Location header for the final GET
|
|
||||||
pt.FinalGetURI = lh
|
|
||||||
}
|
}
|
||||||
// make sure a polling URL was found
|
// make sure a polling URL was found
|
||||||
if pt.URI == "" {
|
if pt.URI == "" {
|
||||||
|
|
|
@ -36,22 +36,37 @@ import (
|
||||||
"golang.org/x/crypto/pkcs12"
|
"golang.org/x/crypto/pkcs12"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// The possible keys in the Values map.
|
||||||
|
const (
|
||||||
|
SubscriptionID = "AZURE_SUBSCRIPTION_ID"
|
||||||
|
TenantID = "AZURE_TENANT_ID"
|
||||||
|
ClientID = "AZURE_CLIENT_ID"
|
||||||
|
ClientSecret = "AZURE_CLIENT_SECRET"
|
||||||
|
CertificatePath = "AZURE_CERTIFICATE_PATH"
|
||||||
|
CertificatePassword = "AZURE_CERTIFICATE_PASSWORD"
|
||||||
|
Username = "AZURE_USERNAME"
|
||||||
|
Password = "AZURE_PASSWORD"
|
||||||
|
EnvironmentName = "AZURE_ENVIRONMENT"
|
||||||
|
Resource = "AZURE_AD_RESOURCE"
|
||||||
|
ActiveDirectoryEndpoint = "ActiveDirectoryEndpoint"
|
||||||
|
ResourceManagerEndpoint = "ResourceManagerEndpoint"
|
||||||
|
GraphResourceID = "GraphResourceID"
|
||||||
|
SQLManagementEndpoint = "SQLManagementEndpoint"
|
||||||
|
GalleryEndpoint = "GalleryEndpoint"
|
||||||
|
ManagementEndpoint = "ManagementEndpoint"
|
||||||
|
)
|
||||||
|
|
||||||
// NewAuthorizerFromEnvironment creates an Authorizer configured from environment variables in the order:
|
// NewAuthorizerFromEnvironment creates an Authorizer configured from environment variables in the order:
|
||||||
// 1. Client credentials
|
// 1. Client credentials
|
||||||
// 2. Client certificate
|
// 2. Client certificate
|
||||||
// 3. Username password
|
// 3. Username password
|
||||||
// 4. MSI
|
// 4. MSI
|
||||||
func NewAuthorizerFromEnvironment() (autorest.Authorizer, error) {
|
func NewAuthorizerFromEnvironment() (autorest.Authorizer, error) {
|
||||||
settings, err := getAuthenticationSettings()
|
settings, err := GetSettingsFromEnvironment()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
return settings.GetAuthorizer()
|
||||||
if settings.resource == "" {
|
|
||||||
settings.resource = settings.environment.ResourceManagerEndpoint
|
|
||||||
}
|
|
||||||
|
|
||||||
return settings.getAuthorizer()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewAuthorizerFromEnvironmentWithResource creates an Authorizer configured from environment variables in the order:
|
// NewAuthorizerFromEnvironmentWithResource creates an Authorizer configured from environment variables in the order:
|
||||||
|
@ -60,126 +75,197 @@ func NewAuthorizerFromEnvironment() (autorest.Authorizer, error) {
|
||||||
// 3. Username password
|
// 3. Username password
|
||||||
// 4. MSI
|
// 4. MSI
|
||||||
func NewAuthorizerFromEnvironmentWithResource(resource string) (autorest.Authorizer, error) {
|
func NewAuthorizerFromEnvironmentWithResource(resource string) (autorest.Authorizer, error) {
|
||||||
settings, err := getAuthenticationSettings()
|
settings, err := GetSettingsFromEnvironment()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
settings.resource = resource
|
settings.Values[Resource] = resource
|
||||||
return settings.getAuthorizer()
|
return settings.GetAuthorizer()
|
||||||
}
|
}
|
||||||
|
|
||||||
type settings struct {
|
// EnvironmentSettings contains the available authentication settings.
|
||||||
tenantID string
|
type EnvironmentSettings struct {
|
||||||
clientID string
|
Values map[string]string
|
||||||
clientSecret string
|
Environment azure.Environment
|
||||||
certificatePath string
|
|
||||||
certificatePassword string
|
|
||||||
username string
|
|
||||||
password string
|
|
||||||
envName string
|
|
||||||
resource string
|
|
||||||
environment azure.Environment
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func getAuthenticationSettings() (s settings, err error) {
|
// GetSettingsFromEnvironment returns the available authentication settings from the environment.
|
||||||
s = settings{
|
func GetSettingsFromEnvironment() (s EnvironmentSettings, err error) {
|
||||||
tenantID: os.Getenv("AZURE_TENANT_ID"),
|
s = EnvironmentSettings{
|
||||||
clientID: os.Getenv("AZURE_CLIENT_ID"),
|
Values: map[string]string{},
|
||||||
clientSecret: os.Getenv("AZURE_CLIENT_SECRET"),
|
|
||||||
certificatePath: os.Getenv("AZURE_CERTIFICATE_PATH"),
|
|
||||||
certificatePassword: os.Getenv("AZURE_CERTIFICATE_PASSWORD"),
|
|
||||||
username: os.Getenv("AZURE_USERNAME"),
|
|
||||||
password: os.Getenv("AZURE_PASSWORD"),
|
|
||||||
envName: os.Getenv("AZURE_ENVIRONMENT"),
|
|
||||||
resource: os.Getenv("AZURE_AD_RESOURCE"),
|
|
||||||
}
|
}
|
||||||
|
s.setValue(SubscriptionID)
|
||||||
if s.envName == "" {
|
s.setValue(TenantID)
|
||||||
s.environment = azure.PublicCloud
|
s.setValue(ClientID)
|
||||||
|
s.setValue(ClientSecret)
|
||||||
|
s.setValue(CertificatePath)
|
||||||
|
s.setValue(CertificatePassword)
|
||||||
|
s.setValue(Username)
|
||||||
|
s.setValue(Password)
|
||||||
|
s.setValue(EnvironmentName)
|
||||||
|
s.setValue(Resource)
|
||||||
|
if v := s.Values[EnvironmentName]; v == "" {
|
||||||
|
s.Environment = azure.PublicCloud
|
||||||
} else {
|
} else {
|
||||||
s.environment, err = azure.EnvironmentFromName(s.envName)
|
s.Environment, err = azure.EnvironmentFromName(v)
|
||||||
|
}
|
||||||
|
if s.Values[Resource] == "" {
|
||||||
|
s.Values[Resource] = s.Environment.ResourceManagerEndpoint
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (settings settings) getAuthorizer() (autorest.Authorizer, error) {
|
// GetSubscriptionID returns the available subscription ID or an empty string.
|
||||||
|
func (settings EnvironmentSettings) GetSubscriptionID() string {
|
||||||
|
return settings.Values[SubscriptionID]
|
||||||
|
}
|
||||||
|
|
||||||
|
// adds the specified environment variable value to the Values map if it exists
|
||||||
|
func (settings EnvironmentSettings) setValue(key string) {
|
||||||
|
if v := os.Getenv(key); v != "" {
|
||||||
|
settings.Values[key] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// helper to return client and tenant IDs
|
||||||
|
func (settings EnvironmentSettings) getClientAndTenant() (string, string) {
|
||||||
|
clientID := settings.Values[ClientID]
|
||||||
|
tenantID := settings.Values[TenantID]
|
||||||
|
return clientID, tenantID
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetClientCredentials creates a config object from the available client credentials.
|
||||||
|
// An error is returned if no client credentials are available.
|
||||||
|
func (settings EnvironmentSettings) GetClientCredentials() (ClientCredentialsConfig, error) {
|
||||||
|
secret := settings.Values[ClientSecret]
|
||||||
|
if secret == "" {
|
||||||
|
return ClientCredentialsConfig{}, errors.New("missing client secret")
|
||||||
|
}
|
||||||
|
clientID, tenantID := settings.getClientAndTenant()
|
||||||
|
config := NewClientCredentialsConfig(clientID, secret, tenantID)
|
||||||
|
config.AADEndpoint = settings.Environment.ActiveDirectoryEndpoint
|
||||||
|
config.Resource = settings.Values[Resource]
|
||||||
|
return config, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetClientCertificate creates a config object from the available certificate credentials.
|
||||||
|
// An error is returned if no certificate credentials are available.
|
||||||
|
func (settings EnvironmentSettings) GetClientCertificate() (ClientCertificateConfig, error) {
|
||||||
|
certPath := settings.Values[CertificatePath]
|
||||||
|
if certPath == "" {
|
||||||
|
return ClientCertificateConfig{}, errors.New("missing certificate path")
|
||||||
|
}
|
||||||
|
certPwd := settings.Values[CertificatePassword]
|
||||||
|
clientID, tenantID := settings.getClientAndTenant()
|
||||||
|
config := NewClientCertificateConfig(certPath, certPwd, clientID, tenantID)
|
||||||
|
config.AADEndpoint = settings.Environment.ActiveDirectoryEndpoint
|
||||||
|
config.Resource = settings.Values[Resource]
|
||||||
|
return config, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetUsernamePassword creates a config object from the available username/password credentials.
|
||||||
|
// An error is returned if no username/password credentials are available.
|
||||||
|
func (settings EnvironmentSettings) GetUsernamePassword() (UsernamePasswordConfig, error) {
|
||||||
|
username := settings.Values[Username]
|
||||||
|
password := settings.Values[Password]
|
||||||
|
if username == "" || password == "" {
|
||||||
|
return UsernamePasswordConfig{}, errors.New("missing username/password")
|
||||||
|
}
|
||||||
|
clientID, tenantID := settings.getClientAndTenant()
|
||||||
|
config := NewUsernamePasswordConfig(username, password, clientID, tenantID)
|
||||||
|
config.AADEndpoint = settings.Environment.ActiveDirectoryEndpoint
|
||||||
|
config.Resource = settings.Values[Resource]
|
||||||
|
return config, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetMSI creates a MSI config object from the available client ID.
|
||||||
|
func (settings EnvironmentSettings) GetMSI() MSIConfig {
|
||||||
|
config := NewMSIConfig()
|
||||||
|
config.Resource = settings.Values[Resource]
|
||||||
|
config.ClientID = settings.Values[ClientID]
|
||||||
|
return config
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetDeviceFlow creates a device-flow config object from the available client and tenant IDs.
|
||||||
|
func (settings EnvironmentSettings) GetDeviceFlow() DeviceFlowConfig {
|
||||||
|
clientID, tenantID := settings.getClientAndTenant()
|
||||||
|
config := NewDeviceFlowConfig(clientID, tenantID)
|
||||||
|
config.AADEndpoint = settings.Environment.ActiveDirectoryEndpoint
|
||||||
|
config.Resource = settings.Values[Resource]
|
||||||
|
return config
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetAuthorizer creates an Authorizer configured from environment variables in the order:
|
||||||
|
// 1. Client credentials
|
||||||
|
// 2. Client certificate
|
||||||
|
// 3. Username password
|
||||||
|
// 4. MSI
|
||||||
|
func (settings EnvironmentSettings) GetAuthorizer() (autorest.Authorizer, error) {
|
||||||
//1.Client Credentials
|
//1.Client Credentials
|
||||||
if settings.clientSecret != "" {
|
if c, e := settings.GetClientCredentials(); e == nil {
|
||||||
config := NewClientCredentialsConfig(settings.clientID, settings.clientSecret, settings.tenantID)
|
return c.Authorizer()
|
||||||
config.AADEndpoint = settings.environment.ActiveDirectoryEndpoint
|
|
||||||
config.Resource = settings.resource
|
|
||||||
return config.Authorizer()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
//2. Client Certificate
|
//2. Client Certificate
|
||||||
if settings.certificatePath != "" {
|
if c, e := settings.GetClientCertificate(); e == nil {
|
||||||
config := NewClientCertificateConfig(settings.certificatePath, settings.certificatePassword, settings.clientID, settings.tenantID)
|
return c.Authorizer()
|
||||||
config.AADEndpoint = settings.environment.ActiveDirectoryEndpoint
|
|
||||||
config.Resource = settings.resource
|
|
||||||
return config.Authorizer()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
//3. Username Password
|
//3. Username Password
|
||||||
if settings.username != "" && settings.password != "" {
|
if c, e := settings.GetUsernamePassword(); e == nil {
|
||||||
config := NewUsernamePasswordConfig(settings.username, settings.password, settings.clientID, settings.tenantID)
|
return c.Authorizer()
|
||||||
config.AADEndpoint = settings.environment.ActiveDirectoryEndpoint
|
|
||||||
config.Resource = settings.resource
|
|
||||||
return config.Authorizer()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// 4. MSI
|
// 4. MSI
|
||||||
config := NewMSIConfig()
|
return settings.GetMSI().Authorizer()
|
||||||
config.Resource = settings.resource
|
|
||||||
config.ClientID = settings.clientID
|
|
||||||
return config.Authorizer()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewAuthorizerFromFile creates an Authorizer configured from a configuration file.
|
// NewAuthorizerFromFile creates an Authorizer configured from a configuration file in the following order.
|
||||||
|
// 1. Client credentials
|
||||||
|
// 2. Client certificate
|
||||||
func NewAuthorizerFromFile(baseURI string) (autorest.Authorizer, error) {
|
func NewAuthorizerFromFile(baseURI string) (autorest.Authorizer, error) {
|
||||||
file, err := getAuthFile()
|
settings, err := GetSettingsFromFile()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
if a, err := settings.ClientCredentialsAuthorizer(baseURI); err == nil {
|
||||||
resource, err := getResourceForToken(*file, baseURI)
|
return a, err
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
return NewAuthorizerFromFileWithResource(resource)
|
if a, err := settings.ClientCertificateAuthorizer(baseURI); err == nil {
|
||||||
|
return a, err
|
||||||
|
}
|
||||||
|
return nil, errors.New("auth file missing client and certificate credentials")
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewAuthorizerFromFileWithResource creates an Authorizer configured from a configuration file.
|
// NewAuthorizerFromFileWithResource creates an Authorizer configured from a configuration file in the following order.
|
||||||
|
// 1. Client credentials
|
||||||
|
// 2. Client certificate
|
||||||
func NewAuthorizerFromFileWithResource(resource string) (autorest.Authorizer, error) {
|
func NewAuthorizerFromFileWithResource(resource string) (autorest.Authorizer, error) {
|
||||||
file, err := getAuthFile()
|
s, err := GetSettingsFromFile()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
if a, err := s.ClientCredentialsAuthorizerWithResource(resource); err == nil {
|
||||||
config, err := adal.NewOAuthConfig(file.ActiveDirectoryEndpoint, file.TenantID)
|
return a, err
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
|
if a, err := s.ClientCertificateAuthorizerWithResource(resource); err == nil {
|
||||||
spToken, err := adal.NewServicePrincipalToken(*config, file.ClientID, file.ClientSecret, resource)
|
return a, err
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
|
return nil, errors.New("auth file missing client and certificate credentials")
|
||||||
return autorest.NewBearerAuthorizer(spToken), nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewAuthorizerFromCLI creates an Authorizer configured from Azure CLI 2.0 for local development scenarios.
|
// NewAuthorizerFromCLI creates an Authorizer configured from Azure CLI 2.0 for local development scenarios.
|
||||||
func NewAuthorizerFromCLI() (autorest.Authorizer, error) {
|
func NewAuthorizerFromCLI() (autorest.Authorizer, error) {
|
||||||
settings, err := getAuthenticationSettings()
|
settings, err := GetSettingsFromEnvironment()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if settings.resource == "" {
|
if settings.Values[Resource] == "" {
|
||||||
settings.resource = settings.environment.ResourceManagerEndpoint
|
settings.Values[Resource] = settings.Environment.ResourceManagerEndpoint
|
||||||
}
|
}
|
||||||
|
|
||||||
return NewAuthorizerFromCLIWithResource(settings.resource)
|
return NewAuthorizerFromCLIWithResource(settings.Values[Resource])
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewAuthorizerFromCLIWithResource creates an Authorizer configured from Azure CLI 2.0 for local development scenarios.
|
// NewAuthorizerFromCLIWithResource creates an Authorizer configured from Azure CLI 2.0 for local development scenarios.
|
||||||
|
@ -197,44 +283,156 @@ func NewAuthorizerFromCLIWithResource(resource string) (autorest.Authorizer, err
|
||||||
return autorest.NewBearerAuthorizer(&adalToken), nil
|
return autorest.NewBearerAuthorizer(&adalToken), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getAuthFile() (*file, error) {
|
// GetSettingsFromFile returns the available authentication settings from an Azure CLI authentication file.
|
||||||
|
func GetSettingsFromFile() (FileSettings, error) {
|
||||||
|
s := FileSettings{}
|
||||||
fileLocation := os.Getenv("AZURE_AUTH_LOCATION")
|
fileLocation := os.Getenv("AZURE_AUTH_LOCATION")
|
||||||
if fileLocation == "" {
|
if fileLocation == "" {
|
||||||
return nil, errors.New("environment variable AZURE_AUTH_LOCATION is not set")
|
return s, errors.New("environment variable AZURE_AUTH_LOCATION is not set")
|
||||||
}
|
}
|
||||||
|
|
||||||
contents, err := ioutil.ReadFile(fileLocation)
|
contents, err := ioutil.ReadFile(fileLocation)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return s, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Auth file might be encoded
|
// Auth file might be encoded
|
||||||
decoded, err := decode(contents)
|
decoded, err := decode(contents)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return s, err
|
||||||
}
|
}
|
||||||
|
|
||||||
authFile := file{}
|
authFile := map[string]interface{}{}
|
||||||
err = json.Unmarshal(decoded, &authFile)
|
err = json.Unmarshal(decoded, &authFile)
|
||||||
|
if err != nil {
|
||||||
|
return s, err
|
||||||
|
}
|
||||||
|
|
||||||
|
s.Values = map[string]string{}
|
||||||
|
s.setKeyValue(ClientID, authFile["clientId"])
|
||||||
|
s.setKeyValue(ClientSecret, authFile["clientSecret"])
|
||||||
|
s.setKeyValue(CertificatePath, authFile["clientCertificate"])
|
||||||
|
s.setKeyValue(CertificatePassword, authFile["clientCertificatePassword"])
|
||||||
|
s.setKeyValue(SubscriptionID, authFile["subscriptionId"])
|
||||||
|
s.setKeyValue(TenantID, authFile["tenantId"])
|
||||||
|
s.setKeyValue(ActiveDirectoryEndpoint, authFile["activeDirectoryEndpointUrl"])
|
||||||
|
s.setKeyValue(ResourceManagerEndpoint, authFile["resourceManagerEndpointUrl"])
|
||||||
|
s.setKeyValue(GraphResourceID, authFile["activeDirectoryGraphResourceId"])
|
||||||
|
s.setKeyValue(SQLManagementEndpoint, authFile["sqlManagementEndpointUrl"])
|
||||||
|
s.setKeyValue(GalleryEndpoint, authFile["galleryEndpointUrl"])
|
||||||
|
s.setKeyValue(ManagementEndpoint, authFile["managementEndpointUrl"])
|
||||||
|
return s, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileSettings contains the available authentication settings.
|
||||||
|
type FileSettings struct {
|
||||||
|
Values map[string]string
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetSubscriptionID returns the available subscription ID or an empty string.
|
||||||
|
func (settings FileSettings) GetSubscriptionID() string {
|
||||||
|
return settings.Values[SubscriptionID]
|
||||||
|
}
|
||||||
|
|
||||||
|
// adds the specified value to the Values map if it isn't nil
|
||||||
|
func (settings FileSettings) setKeyValue(key string, val interface{}) {
|
||||||
|
if val != nil {
|
||||||
|
settings.Values[key] = val.(string)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// returns the specified AAD endpoint or the public cloud endpoint if unspecified
|
||||||
|
func (settings FileSettings) getAADEndpoint() string {
|
||||||
|
if v, ok := settings.Values[ActiveDirectoryEndpoint]; ok {
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
return azure.PublicCloud.ActiveDirectoryEndpoint
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServicePrincipalTokenFromClientCredentials creates a ServicePrincipalToken from the available client credentials.
|
||||||
|
func (settings FileSettings) ServicePrincipalTokenFromClientCredentials(baseURI string) (*adal.ServicePrincipalToken, error) {
|
||||||
|
resource, err := settings.getResourceForToken(baseURI)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
return settings.ServicePrincipalTokenFromClientCredentialsWithResource(resource)
|
||||||
return &authFile, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// File represents the authentication file
|
// ClientCredentialsAuthorizer creates an authorizer from the available client credentials.
|
||||||
type file struct {
|
func (settings FileSettings) ClientCredentialsAuthorizer(baseURI string) (autorest.Authorizer, error) {
|
||||||
ClientID string `json:"clientId,omitempty"`
|
resource, err := settings.getResourceForToken(baseURI)
|
||||||
ClientSecret string `json:"clientSecret,omitempty"`
|
if err != nil {
|
||||||
SubscriptionID string `json:"subscriptionId,omitempty"`
|
return nil, err
|
||||||
TenantID string `json:"tenantId,omitempty"`
|
}
|
||||||
ActiveDirectoryEndpoint string `json:"activeDirectoryEndpointUrl,omitempty"`
|
return settings.ClientCredentialsAuthorizerWithResource(resource)
|
||||||
ResourceManagerEndpoint string `json:"resourceManagerEndpointUrl,omitempty"`
|
}
|
||||||
GraphResourceID string `json:"activeDirectoryGraphResourceId,omitempty"`
|
|
||||||
SQLManagementEndpoint string `json:"sqlManagementEndpointUrl,omitempty"`
|
// ServicePrincipalTokenFromClientCredentialsWithResource creates a ServicePrincipalToken
|
||||||
GalleryEndpoint string `json:"galleryEndpointUrl,omitempty"`
|
// from the available client credentials and the specified resource.
|
||||||
ManagementEndpoint string `json:"managementEndpointUrl,omitempty"`
|
func (settings FileSettings) ServicePrincipalTokenFromClientCredentialsWithResource(resource string) (*adal.ServicePrincipalToken, error) {
|
||||||
|
if _, ok := settings.Values[ClientSecret]; !ok {
|
||||||
|
return nil, errors.New("missing client secret")
|
||||||
|
}
|
||||||
|
config, err := adal.NewOAuthConfig(settings.getAADEndpoint(), settings.Values[TenantID])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return adal.NewServicePrincipalToken(*config, settings.Values[ClientID], settings.Values[ClientSecret], resource)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (settings FileSettings) clientCertificateConfigWithResource(resource string) (ClientCertificateConfig, error) {
|
||||||
|
if _, ok := settings.Values[CertificatePath]; !ok {
|
||||||
|
return ClientCertificateConfig{}, errors.New("missing certificate path")
|
||||||
|
}
|
||||||
|
cfg := NewClientCertificateConfig(settings.Values[CertificatePath], settings.Values[CertificatePassword], settings.Values[ClientID], settings.Values[TenantID])
|
||||||
|
cfg.AADEndpoint = settings.getAADEndpoint()
|
||||||
|
cfg.Resource = resource
|
||||||
|
return cfg, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientCredentialsAuthorizerWithResource creates an authorizer from the available client credentials and the specified resource.
|
||||||
|
func (settings FileSettings) ClientCredentialsAuthorizerWithResource(resource string) (autorest.Authorizer, error) {
|
||||||
|
spToken, err := settings.ServicePrincipalTokenFromClientCredentialsWithResource(resource)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return autorest.NewBearerAuthorizer(spToken), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServicePrincipalTokenFromClientCertificate creates a ServicePrincipalToken from the available certificate credentials.
|
||||||
|
func (settings FileSettings) ServicePrincipalTokenFromClientCertificate(baseURI string) (*adal.ServicePrincipalToken, error) {
|
||||||
|
resource, err := settings.getResourceForToken(baseURI)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return settings.ServicePrincipalTokenFromClientCertificateWithResource(resource)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientCertificateAuthorizer creates an authorizer from the available certificate credentials.
|
||||||
|
func (settings FileSettings) ClientCertificateAuthorizer(baseURI string) (autorest.Authorizer, error) {
|
||||||
|
resource, err := settings.getResourceForToken(baseURI)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return settings.ClientCertificateAuthorizerWithResource(resource)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServicePrincipalTokenFromClientCertificateWithResource creates a ServicePrincipalToken from the available certificate credentials.
|
||||||
|
func (settings FileSettings) ServicePrincipalTokenFromClientCertificateWithResource(resource string) (*adal.ServicePrincipalToken, error) {
|
||||||
|
cfg, err := settings.clientCertificateConfigWithResource(resource)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return cfg.ServicePrincipalToken()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientCertificateAuthorizerWithResource creates an authorizer from the available certificate credentials and the specified resource.
|
||||||
|
func (settings FileSettings) ClientCertificateAuthorizerWithResource(resource string) (autorest.Authorizer, error) {
|
||||||
|
cfg, err := settings.clientCertificateConfigWithResource(resource)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return cfg.Authorizer()
|
||||||
}
|
}
|
||||||
|
|
||||||
func decode(b []byte) ([]byte, error) {
|
func decode(b []byte) ([]byte, error) {
|
||||||
|
@ -259,7 +457,7 @@ func decode(b []byte) ([]byte, error) {
|
||||||
return ioutil.ReadAll(reader)
|
return ioutil.ReadAll(reader)
|
||||||
}
|
}
|
||||||
|
|
||||||
func getResourceForToken(f file, baseURI string) (string, error) {
|
func (settings FileSettings) getResourceForToken(baseURI string) (string, error) {
|
||||||
// Compare dafault base URI from the SDK to the endpoints from the public cloud
|
// Compare dafault base URI from the SDK to the endpoints from the public cloud
|
||||||
// Base URI and token resource are the same string. This func finds the authentication
|
// Base URI and token resource are the same string. This func finds the authentication
|
||||||
// file field that matches the SDK base URI. The SDK defines the public cloud
|
// file field that matches the SDK base URI. The SDK defines the public cloud
|
||||||
|
@ -269,15 +467,15 @@ func getResourceForToken(f file, baseURI string) (string, error) {
|
||||||
}
|
}
|
||||||
switch baseURI {
|
switch baseURI {
|
||||||
case azure.PublicCloud.ServiceManagementEndpoint:
|
case azure.PublicCloud.ServiceManagementEndpoint:
|
||||||
return f.ManagementEndpoint, nil
|
return settings.Values[ManagementEndpoint], nil
|
||||||
case azure.PublicCloud.ResourceManagerEndpoint:
|
case azure.PublicCloud.ResourceManagerEndpoint:
|
||||||
return f.ResourceManagerEndpoint, nil
|
return settings.Values[ResourceManagerEndpoint], nil
|
||||||
case azure.PublicCloud.ActiveDirectoryEndpoint:
|
case azure.PublicCloud.ActiveDirectoryEndpoint:
|
||||||
return f.ActiveDirectoryEndpoint, nil
|
return settings.Values[ActiveDirectoryEndpoint], nil
|
||||||
case azure.PublicCloud.GalleryEndpoint:
|
case azure.PublicCloud.GalleryEndpoint:
|
||||||
return f.GalleryEndpoint, nil
|
return settings.Values[GalleryEndpoint], nil
|
||||||
case azure.PublicCloud.GraphEndpoint:
|
case azure.PublicCloud.GraphEndpoint:
|
||||||
return f.GraphResourceID, nil
|
return settings.Values[GraphResourceID], nil
|
||||||
}
|
}
|
||||||
return "", fmt.Errorf("auth: base URI not found in endpoints")
|
return "", fmt.Errorf("auth: base URI not found in endpoints")
|
||||||
}
|
}
|
||||||
|
@ -352,18 +550,21 @@ type ClientCredentialsConfig struct {
|
||||||
Resource string
|
Resource string
|
||||||
}
|
}
|
||||||
|
|
||||||
// Authorizer gets the authorizer from client credentials.
|
// ServicePrincipalToken creates a ServicePrincipalToken from client credentials.
|
||||||
func (ccc ClientCredentialsConfig) Authorizer() (autorest.Authorizer, error) {
|
func (ccc ClientCredentialsConfig) ServicePrincipalToken() (*adal.ServicePrincipalToken, error) {
|
||||||
oauthConfig, err := adal.NewOAuthConfig(ccc.AADEndpoint, ccc.TenantID)
|
oauthConfig, err := adal.NewOAuthConfig(ccc.AADEndpoint, ccc.TenantID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
return adal.NewServicePrincipalToken(*oauthConfig, ccc.ClientID, ccc.ClientSecret, ccc.Resource)
|
||||||
|
}
|
||||||
|
|
||||||
spToken, err := adal.NewServicePrincipalToken(*oauthConfig, ccc.ClientID, ccc.ClientSecret, ccc.Resource)
|
// Authorizer gets the authorizer from client credentials.
|
||||||
|
func (ccc ClientCredentialsConfig) Authorizer() (autorest.Authorizer, error) {
|
||||||
|
spToken, err := ccc.ServicePrincipalToken()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to get oauth token from client credentials: %v", err)
|
return nil, fmt.Errorf("failed to get oauth token from client credentials: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return autorest.NewBearerAuthorizer(spToken), nil
|
return autorest.NewBearerAuthorizer(spToken), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -377,26 +578,29 @@ type ClientCertificateConfig struct {
|
||||||
Resource string
|
Resource string
|
||||||
}
|
}
|
||||||
|
|
||||||
// Authorizer gets an authorizer object from client certificate.
|
// ServicePrincipalToken creates a ServicePrincipalToken from client certificate.
|
||||||
func (ccc ClientCertificateConfig) Authorizer() (autorest.Authorizer, error) {
|
func (ccc ClientCertificateConfig) ServicePrincipalToken() (*adal.ServicePrincipalToken, error) {
|
||||||
oauthConfig, err := adal.NewOAuthConfig(ccc.AADEndpoint, ccc.TenantID)
|
oauthConfig, err := adal.NewOAuthConfig(ccc.AADEndpoint, ccc.TenantID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
certData, err := ioutil.ReadFile(ccc.CertificatePath)
|
certData, err := ioutil.ReadFile(ccc.CertificatePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to read the certificate file (%s): %v", ccc.CertificatePath, err)
|
return nil, fmt.Errorf("failed to read the certificate file (%s): %v", ccc.CertificatePath, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
certificate, rsaPrivateKey, err := decodePkcs12(certData, ccc.CertificatePassword)
|
certificate, rsaPrivateKey, err := decodePkcs12(certData, ccc.CertificatePassword)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to decode pkcs12 certificate while creating spt: %v", err)
|
return nil, fmt.Errorf("failed to decode pkcs12 certificate while creating spt: %v", err)
|
||||||
}
|
}
|
||||||
|
return adal.NewServicePrincipalTokenFromCertificate(*oauthConfig, ccc.ClientID, certificate, rsaPrivateKey, ccc.Resource)
|
||||||
|
}
|
||||||
|
|
||||||
spToken, err := adal.NewServicePrincipalTokenFromCertificate(*oauthConfig, ccc.ClientID, certificate, rsaPrivateKey, ccc.Resource)
|
// Authorizer gets an authorizer object from client certificate.
|
||||||
|
func (ccc ClientCertificateConfig) Authorizer() (autorest.Authorizer, error) {
|
||||||
|
spToken, err := ccc.ServicePrincipalToken()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to get oauth token from certificate auth: %v", err)
|
return nil, fmt.Errorf("failed to get oauth token from certificate auth: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return autorest.NewBearerAuthorizer(spToken), nil
|
return autorest.NewBearerAuthorizer(spToken), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -410,26 +614,30 @@ type DeviceFlowConfig struct {
|
||||||
|
|
||||||
// Authorizer gets the authorizer from device flow.
|
// Authorizer gets the authorizer from device flow.
|
||||||
func (dfc DeviceFlowConfig) Authorizer() (autorest.Authorizer, error) {
|
func (dfc DeviceFlowConfig) Authorizer() (autorest.Authorizer, error) {
|
||||||
oauthClient := &autorest.Client{}
|
spToken, err := dfc.ServicePrincipalToken()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to get oauth token from device flow: %v", err)
|
||||||
|
}
|
||||||
|
return autorest.NewBearerAuthorizer(spToken), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServicePrincipalToken gets the service principal token from device flow.
|
||||||
|
func (dfc DeviceFlowConfig) ServicePrincipalToken() (*adal.ServicePrincipalToken, error) {
|
||||||
oauthConfig, err := adal.NewOAuthConfig(dfc.AADEndpoint, dfc.TenantID)
|
oauthConfig, err := adal.NewOAuthConfig(dfc.AADEndpoint, dfc.TenantID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
oauthClient := &autorest.Client{}
|
||||||
deviceCode, err := adal.InitiateDeviceAuth(oauthClient, *oauthConfig, dfc.ClientID, dfc.Resource)
|
deviceCode, err := adal.InitiateDeviceAuth(oauthClient, *oauthConfig, dfc.ClientID, dfc.Resource)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to start device auth flow: %s", err)
|
return nil, fmt.Errorf("failed to start device auth flow: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Println(*deviceCode.Message)
|
log.Println(*deviceCode.Message)
|
||||||
|
|
||||||
token, err := adal.WaitForUserCompletion(oauthClient, deviceCode)
|
token, err := adal.WaitForUserCompletion(oauthClient, deviceCode)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to finish device auth flow: %s", err)
|
return nil, fmt.Errorf("failed to finish device auth flow: %s", err)
|
||||||
}
|
}
|
||||||
|
return adal.NewServicePrincipalTokenFromManualToken(*oauthConfig, dfc.ClientID, dfc.Resource, *token)
|
||||||
spToken, err := adal.NewServicePrincipalTokenFromManualToken(*oauthConfig, dfc.ClientID, dfc.Resource, *token)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to get oauth token from device flow: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return autorest.NewBearerAuthorizer(spToken), nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func decodePkcs12(pkcs []byte, password string) (*x509.Certificate, *rsa.PrivateKey, error) {
|
func decodePkcs12(pkcs []byte, password string) (*x509.Certificate, *rsa.PrivateKey, error) {
|
||||||
|
@ -456,17 +664,21 @@ type UsernamePasswordConfig struct {
|
||||||
Resource string
|
Resource string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ServicePrincipalToken creates a ServicePrincipalToken from username and password.
|
||||||
|
func (ups UsernamePasswordConfig) ServicePrincipalToken() (*adal.ServicePrincipalToken, error) {
|
||||||
|
oauthConfig, err := adal.NewOAuthConfig(ups.AADEndpoint, ups.TenantID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return adal.NewServicePrincipalTokenFromUsernamePassword(*oauthConfig, ups.ClientID, ups.Username, ups.Password, ups.Resource)
|
||||||
|
}
|
||||||
|
|
||||||
// Authorizer gets the authorizer from a username and a password.
|
// Authorizer gets the authorizer from a username and a password.
|
||||||
func (ups UsernamePasswordConfig) Authorizer() (autorest.Authorizer, error) {
|
func (ups UsernamePasswordConfig) Authorizer() (autorest.Authorizer, error) {
|
||||||
|
spToken, err := ups.ServicePrincipalToken()
|
||||||
oauthConfig, err := adal.NewOAuthConfig(ups.AADEndpoint, ups.TenantID)
|
|
||||||
|
|
||||||
spToken, err := adal.NewServicePrincipalTokenFromUsernamePassword(*oauthConfig, ups.ClientID, ups.Username, ups.Password, ups.Resource)
|
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to get oauth token from username and password auth: %v", err)
|
return nil, fmt.Errorf("failed to get oauth token from username and password auth: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return autorest.NewBearerAuthorizer(spToken), nil
|
return autorest.NewBearerAuthorizer(spToken), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -483,10 +695,18 @@ func (mc MSIConfig) Authorizer() (autorest.Authorizer, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
spToken, err := adal.NewServicePrincipalTokenFromMSI(msiEndpoint, mc.Resource)
|
var spToken *adal.ServicePrincipalToken
|
||||||
|
if mc.ClientID == "" {
|
||||||
|
spToken, err = adal.NewServicePrincipalTokenFromMSI(msiEndpoint, mc.Resource)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to get oauth token from MSI: %v", err)
|
return nil, fmt.Errorf("failed to get oauth token from MSI: %v", err)
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
spToken, err = adal.NewServicePrincipalTokenFromMSIWithUserAssignedID(msiEndpoint, mc.Resource, mc.ClientID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to get oauth token from MSI for user assigned identity: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return autorest.NewBearerAuthorizer(spToken), nil
|
return autorest.NewBearerAuthorizer(spToken), nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,6 +19,8 @@ import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
"github.com/dimchansky/utfbom"
|
"github.com/dimchansky/utfbom"
|
||||||
"github.com/mitchellh/go-homedir"
|
"github.com/mitchellh/go-homedir"
|
||||||
|
@ -47,9 +49,14 @@ type User struct {
|
||||||
Type string `json:"type"`
|
Type string `json:"type"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const azureProfileJSON = "azureProfile.json"
|
||||||
|
|
||||||
// ProfilePath returns the path where the Azure Profile is stored from the Azure CLI
|
// ProfilePath returns the path where the Azure Profile is stored from the Azure CLI
|
||||||
func ProfilePath() (string, error) {
|
func ProfilePath() (string, error) {
|
||||||
return homedir.Expand("~/.azure/azureProfile.json")
|
if cfgDir := os.Getenv("AZURE_CONFIG_DIR"); cfgDir != "" {
|
||||||
|
return filepath.Join(cfgDir, azureProfileJSON), nil
|
||||||
|
}
|
||||||
|
return homedir.Expand("~/.azure/" + azureProfileJSON)
|
||||||
}
|
}
|
||||||
|
|
||||||
// LoadProfile restores a Profile object from a file located at 'path'.
|
// LoadProfile restores a Profile object from a file located at 'path'.
|
||||||
|
|
|
@ -126,7 +126,7 @@ func GetTokenFromCLI(resource string) (*Token, error) {
|
||||||
azureCLIDefaultPathWindows := fmt.Sprintf("%s\\Microsoft SDKs\\Azure\\CLI2\\wbin; %s\\Microsoft SDKs\\Azure\\CLI2\\wbin", os.Getenv("ProgramFiles(x86)"), os.Getenv("ProgramFiles"))
|
azureCLIDefaultPathWindows := fmt.Sprintf("%s\\Microsoft SDKs\\Azure\\CLI2\\wbin; %s\\Microsoft SDKs\\Azure\\CLI2\\wbin", os.Getenv("ProgramFiles(x86)"), os.Getenv("ProgramFiles"))
|
||||||
|
|
||||||
// Default path for non-Windows.
|
// Default path for non-Windows.
|
||||||
const azureCLIDefaultPath = "/usr/bin:/usr/local/bin"
|
const azureCLIDefaultPath = "/bin:/sbin:/usr/bin:/usr/local/bin"
|
||||||
|
|
||||||
// Validate resource, since it gets sent as a command line argument to Azure CLI
|
// Validate resource, since it gets sent as a command line argument to Azure CLI
|
||||||
const invalidResourceErrorTemplate = "Resource %s is not in expected format. Only alphanumeric characters, [dot], [colon], [hyphen], and [forward slash] are allowed."
|
const invalidResourceErrorTemplate = "Resource %s is not in expected format. Only alphanumeric characters, [dot], [colon], [hyphen], and [forward slash] are allowed."
|
||||||
|
@ -144,13 +144,13 @@ func GetTokenFromCLI(resource string) (*Token, error) {
|
||||||
cliCmd = exec.Command(fmt.Sprintf("%s\\system32\\cmd.exe", os.Getenv("windir")))
|
cliCmd = exec.Command(fmt.Sprintf("%s\\system32\\cmd.exe", os.Getenv("windir")))
|
||||||
cliCmd.Env = os.Environ()
|
cliCmd.Env = os.Environ()
|
||||||
cliCmd.Env = append(cliCmd.Env, fmt.Sprintf("PATH=%s;%s", os.Getenv(azureCLIPath), azureCLIDefaultPathWindows))
|
cliCmd.Env = append(cliCmd.Env, fmt.Sprintf("PATH=%s;%s", os.Getenv(azureCLIPath), azureCLIDefaultPathWindows))
|
||||||
cliCmd.Args = append(cliCmd.Args, "/c")
|
cliCmd.Args = append(cliCmd.Args, "/c", "az")
|
||||||
} else {
|
} else {
|
||||||
cliCmd = exec.Command(os.Getenv("SHELL"))
|
cliCmd = exec.Command("az")
|
||||||
cliCmd.Env = os.Environ()
|
cliCmd.Env = os.Environ()
|
||||||
cliCmd.Env = append(cliCmd.Env, fmt.Sprintf("PATH=%s:%s", os.Getenv(azureCLIPath), azureCLIDefaultPath))
|
cliCmd.Env = append(cliCmd.Env, fmt.Sprintf("PATH=%s:%s", os.Getenv(azureCLIPath), azureCLIDefaultPath))
|
||||||
}
|
}
|
||||||
cliCmd.Args = append(cliCmd.Args, "az", "account", "get-access-token", "-o", "json", "--resource", resource)
|
cliCmd.Args = append(cliCmd.Args, "account", "get-access-token", "-o", "json", "--resource", resource)
|
||||||
|
|
||||||
var stderr bytes.Buffer
|
var stderr bytes.Buffer
|
||||||
cliCmd.Stderr = &stderr
|
cliCmd.Stderr = &stderr
|
||||||
|
|
|
@ -54,6 +54,7 @@ type Environment struct {
|
||||||
ServiceManagementVMDNSSuffix string `json:"serviceManagementVMDNSSuffix"`
|
ServiceManagementVMDNSSuffix string `json:"serviceManagementVMDNSSuffix"`
|
||||||
ResourceManagerVMDNSSuffix string `json:"resourceManagerVMDNSSuffix"`
|
ResourceManagerVMDNSSuffix string `json:"resourceManagerVMDNSSuffix"`
|
||||||
ContainerRegistryDNSSuffix string `json:"containerRegistryDNSSuffix"`
|
ContainerRegistryDNSSuffix string `json:"containerRegistryDNSSuffix"`
|
||||||
|
CosmosDBDNSSuffix string `json:"cosmosDBDNSSuffix"`
|
||||||
TokenAudience string `json:"tokenAudience"`
|
TokenAudience string `json:"tokenAudience"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -79,6 +80,7 @@ var (
|
||||||
ServiceManagementVMDNSSuffix: "cloudapp.net",
|
ServiceManagementVMDNSSuffix: "cloudapp.net",
|
||||||
ResourceManagerVMDNSSuffix: "cloudapp.azure.com",
|
ResourceManagerVMDNSSuffix: "cloudapp.azure.com",
|
||||||
ContainerRegistryDNSSuffix: "azurecr.io",
|
ContainerRegistryDNSSuffix: "azurecr.io",
|
||||||
|
CosmosDBDNSSuffix: "documents.azure.com",
|
||||||
TokenAudience: "https://management.azure.com/",
|
TokenAudience: "https://management.azure.com/",
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -102,7 +104,8 @@ var (
|
||||||
ServiceBusEndpointSuffix: "servicebus.usgovcloudapi.net",
|
ServiceBusEndpointSuffix: "servicebus.usgovcloudapi.net",
|
||||||
ServiceManagementVMDNSSuffix: "usgovcloudapp.net",
|
ServiceManagementVMDNSSuffix: "usgovcloudapp.net",
|
||||||
ResourceManagerVMDNSSuffix: "cloudapp.windowsazure.us",
|
ResourceManagerVMDNSSuffix: "cloudapp.windowsazure.us",
|
||||||
ContainerRegistryDNSSuffix: "azurecr.io",
|
ContainerRegistryDNSSuffix: "azurecr.us",
|
||||||
|
CosmosDBDNSSuffix: "documents.azure.us",
|
||||||
TokenAudience: "https://management.usgovcloudapi.net/",
|
TokenAudience: "https://management.usgovcloudapi.net/",
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -126,7 +129,8 @@ var (
|
||||||
ServiceBusEndpointSuffix: "servicebus.chinacloudapi.cn",
|
ServiceBusEndpointSuffix: "servicebus.chinacloudapi.cn",
|
||||||
ServiceManagementVMDNSSuffix: "chinacloudapp.cn",
|
ServiceManagementVMDNSSuffix: "chinacloudapp.cn",
|
||||||
ResourceManagerVMDNSSuffix: "cloudapp.azure.cn",
|
ResourceManagerVMDNSSuffix: "cloudapp.azure.cn",
|
||||||
ContainerRegistryDNSSuffix: "azurecr.io",
|
ContainerRegistryDNSSuffix: "azurecr.cn",
|
||||||
|
CosmosDBDNSSuffix: "documents.azure.cn",
|
||||||
TokenAudience: "https://management.chinacloudapi.cn/",
|
TokenAudience: "https://management.chinacloudapi.cn/",
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -150,7 +154,8 @@ var (
|
||||||
ServiceBusEndpointSuffix: "servicebus.cloudapi.de",
|
ServiceBusEndpointSuffix: "servicebus.cloudapi.de",
|
||||||
ServiceManagementVMDNSSuffix: "azurecloudapp.de",
|
ServiceManagementVMDNSSuffix: "azurecloudapp.de",
|
||||||
ResourceManagerVMDNSSuffix: "cloudapp.microsoftazure.de",
|
ResourceManagerVMDNSSuffix: "cloudapp.microsoftazure.de",
|
||||||
ContainerRegistryDNSSuffix: "azurecr.io",
|
// ContainerRegistryDNSSuffix: "", ACR not present yet in the German Cloud
|
||||||
|
CosmosDBDNSSuffix: "documents.microsoftazure.de",
|
||||||
TokenAudience: "https://management.microsoftazure.de/",
|
TokenAudience: "https://management.microsoftazure.de/",
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
|
@ -16,6 +16,7 @@ package autorest
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"crypto/tls"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
@ -26,7 +27,7 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/Azure/go-autorest/logger"
|
"github.com/Azure/go-autorest/logger"
|
||||||
"github.com/Azure/go-autorest/version"
|
"github.com/Azure/go-autorest/tracing"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -174,7 +175,7 @@ func NewClientWithUserAgent(ua string) Client {
|
||||||
PollingDuration: DefaultPollingDuration,
|
PollingDuration: DefaultPollingDuration,
|
||||||
RetryAttempts: DefaultRetryAttempts,
|
RetryAttempts: DefaultRetryAttempts,
|
||||||
RetryDuration: DefaultRetryDuration,
|
RetryDuration: DefaultRetryDuration,
|
||||||
UserAgent: version.UserAgent(),
|
UserAgent: UserAgent(),
|
||||||
}
|
}
|
||||||
c.Sender = c.sender()
|
c.Sender = c.sender()
|
||||||
c.AddToUserAgent(ua)
|
c.AddToUserAgent(ua)
|
||||||
|
@ -229,9 +230,25 @@ func (c Client) Do(r *http.Request) (*http.Response, error) {
|
||||||
// sender returns the Sender to which to send requests.
|
// sender returns the Sender to which to send requests.
|
||||||
func (c Client) sender() Sender {
|
func (c Client) sender() Sender {
|
||||||
if c.Sender == nil {
|
if c.Sender == nil {
|
||||||
j, _ := cookiejar.New(nil)
|
// Use behaviour compatible with DefaultTransport, but require TLS minimum version.
|
||||||
return &http.Client{Jar: j}
|
var defaultTransport = http.DefaultTransport.(*http.Transport)
|
||||||
|
|
||||||
|
tracing.Transport.Base = &http.Transport{
|
||||||
|
Proxy: defaultTransport.Proxy,
|
||||||
|
DialContext: defaultTransport.DialContext,
|
||||||
|
MaxIdleConns: defaultTransport.MaxIdleConns,
|
||||||
|
IdleConnTimeout: defaultTransport.IdleConnTimeout,
|
||||||
|
TLSHandshakeTimeout: defaultTransport.TLSHandshakeTimeout,
|
||||||
|
ExpectContinueTimeout: defaultTransport.ExpectContinueTimeout,
|
||||||
|
TLSClientConfig: &tls.Config{
|
||||||
|
MinVersion: tls.VersionTLS12,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
j, _ := cookiejar.New(nil)
|
||||||
|
return &http.Client{Jar: j, Transport: tracing.Transport}
|
||||||
|
}
|
||||||
|
|
||||||
return c.Sender
|
return c.Sender
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -21,6 +21,8 @@ import (
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/Azure/go-autorest/tracing"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Sender is the interface that wraps the Do method to send HTTP requests.
|
// Sender is the interface that wraps the Do method to send HTTP requests.
|
||||||
|
@ -38,7 +40,7 @@ func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) {
|
||||||
return sf(r)
|
return sf(r)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SendDecorator takes and possibily decorates, by wrapping, a Sender. Decorators may affect the
|
// SendDecorator takes and possibly decorates, by wrapping, a Sender. Decorators may affect the
|
||||||
// http.Request and pass it along or, first, pass the http.Request along then react to the
|
// http.Request and pass it along or, first, pass the http.Request along then react to the
|
||||||
// http.Response result.
|
// http.Response result.
|
||||||
type SendDecorator func(Sender) Sender
|
type SendDecorator func(Sender) Sender
|
||||||
|
@ -68,7 +70,7 @@ func DecorateSender(s Sender, decorators ...SendDecorator) Sender {
|
||||||
//
|
//
|
||||||
// Send will not poll or retry requests.
|
// Send will not poll or retry requests.
|
||||||
func Send(r *http.Request, decorators ...SendDecorator) (*http.Response, error) {
|
func Send(r *http.Request, decorators ...SendDecorator) (*http.Response, error) {
|
||||||
return SendWithSender(&http.Client{}, r, decorators...)
|
return SendWithSender(&http.Client{Transport: tracing.Transport}, r, decorators...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SendWithSender sends the passed http.Request, through the provided Sender, returning the
|
// SendWithSender sends the passed http.Request, through the provided Sender, returning the
|
||||||
|
@ -216,8 +218,7 @@ func DoRetryForStatusCodes(attempts int, backoff time.Duration, codes ...int) Se
|
||||||
return SenderFunc(func(r *http.Request) (resp *http.Response, err error) {
|
return SenderFunc(func(r *http.Request) (resp *http.Response, err error) {
|
||||||
rr := NewRetriableRequest(r)
|
rr := NewRetriableRequest(r)
|
||||||
// Increment to add the first call (attempts denotes number of retries)
|
// Increment to add the first call (attempts denotes number of retries)
|
||||||
attempts++
|
for attempt := 0; attempt < attempts+1; {
|
||||||
for attempt := 0; attempt < attempts; {
|
|
||||||
err = rr.Prepare()
|
err = rr.Prepare()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return resp, err
|
return resp, err
|
||||||
|
|
|
@ -157,7 +157,7 @@ func AsStringSlice(s interface{}) ([]string, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// String method converts interface v to string. If interface is a list, it
|
// String method converts interface v to string. If interface is a list, it
|
||||||
// joins list elements using the seperator. Note that only sep[0] will be used for
|
// joins list elements using the separator. Note that only sep[0] will be used for
|
||||||
// joining if any separator is specified.
|
// joining if any separator is specified.
|
||||||
func String(v interface{}, sep ...string) string {
|
func String(v interface{}, sep ...string) string {
|
||||||
if len(sep) == 0 {
|
if len(sep) == 0 {
|
||||||
|
|
|
@ -1,7 +1,5 @@
|
||||||
package autorest
|
package autorest
|
||||||
|
|
||||||
import "github.com/Azure/go-autorest/version"
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
// Copyright 2017 Microsoft Corporation
|
||||||
//
|
//
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
@ -16,7 +14,28 @@ import "github.com/Azure/go-autorest/version"
|
||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"runtime"
|
||||||
|
)
|
||||||
|
|
||||||
|
const number = "v11.7.1"
|
||||||
|
|
||||||
|
var (
|
||||||
|
userAgent = fmt.Sprintf("Go/%s (%s-%s) go-autorest/%s",
|
||||||
|
runtime.Version(),
|
||||||
|
runtime.GOARCH,
|
||||||
|
runtime.GOOS,
|
||||||
|
number,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
// UserAgent returns a string containing the Go version, system architecture and OS, and the go-autorest version.
|
||||||
|
func UserAgent() string {
|
||||||
|
return userAgent
|
||||||
|
}
|
||||||
|
|
||||||
// Version returns the semantic version (see http://semver.org).
|
// Version returns the semantic version (see http://semver.org).
|
||||||
func Version() string {
|
func Version() string {
|
||||||
return version.Number
|
return number
|
||||||
}
|
}
|
||||||
|
|
|
@ -162,7 +162,7 @@ type Writer interface {
|
||||||
// WriteResponse writes the specified HTTP response to the logger if the log level is greater than
|
// WriteResponse writes the specified HTTP response to the logger if the log level is greater than
|
||||||
// or equal to LogInfo. The response body, if set, is logged at level LogDebug or higher.
|
// or equal to LogInfo. The response body, if set, is logged at level LogDebug or higher.
|
||||||
// Custom filters can be specified to exclude URL, header, and/or body content from the log.
|
// Custom filters can be specified to exclude URL, header, and/or body content from the log.
|
||||||
// By default no respone content is excluded.
|
// By default no response content is excluded.
|
||||||
WriteResponse(resp *http.Response, filter Filter)
|
WriteResponse(resp *http.Response, filter Filter)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -318,7 +318,7 @@ func (fl fileLogger) WriteResponse(resp *http.Response, filter Filter) {
|
||||||
// returns true if the provided body should be included in the log
|
// returns true if the provided body should be included in the log
|
||||||
func (fl fileLogger) shouldLogBody(header http.Header, body io.ReadCloser) bool {
|
func (fl fileLogger) shouldLogBody(header http.Header, body io.ReadCloser) bool {
|
||||||
ct := header.Get("Content-Type")
|
ct := header.Get("Content-Type")
|
||||||
return fl.logLevel >= LogDebug && body != nil && strings.Index(ct, "application/octet-stream") == -1
|
return fl.logLevel >= LogDebug && body != nil && !strings.Contains(ct, "application/octet-stream")
|
||||||
}
|
}
|
||||||
|
|
||||||
// creates standard header for log entries, it contains a timestamp and the log level
|
// creates standard header for log entries, it contains a timestamp and the log level
|
||||||
|
|
|
@ -0,0 +1,190 @@
|
||||||
|
package tracing
|
||||||
|
|
||||||
|
// Copyright 2018 Microsoft Corporation
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"contrib.go.opencensus.io/exporter/ocagent"
|
||||||
|
"go.opencensus.io/plugin/ochttp"
|
||||||
|
"go.opencensus.io/plugin/ochttp/propagation/tracecontext"
|
||||||
|
"go.opencensus.io/stats/view"
|
||||||
|
"go.opencensus.io/trace"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// Transport is the default tracing RoundTripper. The custom options setter will control
|
||||||
|
// if traces are being emitted or not.
|
||||||
|
Transport = &ochttp.Transport{
|
||||||
|
Propagation: &tracecontext.HTTPFormat{},
|
||||||
|
GetStartOptions: getStartOptions,
|
||||||
|
}
|
||||||
|
|
||||||
|
// enabled is the flag for marking if tracing is enabled.
|
||||||
|
enabled = false
|
||||||
|
|
||||||
|
// Sampler is the tracing sampler. If tracing is disabled it will never sample. Otherwise
|
||||||
|
// it will be using the parent sampler or the default.
|
||||||
|
sampler = trace.NeverSample()
|
||||||
|
|
||||||
|
// Views for metric instrumentation.
|
||||||
|
views = map[string]*view.View{}
|
||||||
|
|
||||||
|
// the trace exporter
|
||||||
|
traceExporter trace.Exporter
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
enableFromEnv()
|
||||||
|
}
|
||||||
|
|
||||||
|
func enableFromEnv() {
|
||||||
|
_, ok := os.LookupEnv("AZURE_SDK_TRACING_ENABLED")
|
||||||
|
_, legacyOk := os.LookupEnv("AZURE_SDK_TRACING_ENABELD")
|
||||||
|
if ok || legacyOk {
|
||||||
|
agentEndpoint, ok := os.LookupEnv("OCAGENT_TRACE_EXPORTER_ENDPOINT")
|
||||||
|
|
||||||
|
if ok {
|
||||||
|
EnableWithAIForwarding(agentEndpoint)
|
||||||
|
} else {
|
||||||
|
Enable()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsEnabled returns true if monitoring is enabled for the sdk.
|
||||||
|
func IsEnabled() bool {
|
||||||
|
return enabled
|
||||||
|
}
|
||||||
|
|
||||||
|
// Enable will start instrumentation for metrics and traces.
|
||||||
|
func Enable() error {
|
||||||
|
enabled = true
|
||||||
|
sampler = nil
|
||||||
|
|
||||||
|
err := initStats()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Disable will disable instrumentation for metrics and traces.
|
||||||
|
func Disable() {
|
||||||
|
disableStats()
|
||||||
|
sampler = trace.NeverSample()
|
||||||
|
if traceExporter != nil {
|
||||||
|
trace.UnregisterExporter(traceExporter)
|
||||||
|
}
|
||||||
|
enabled = false
|
||||||
|
}
|
||||||
|
|
||||||
|
// EnableWithAIForwarding will start instrumentation and will connect to app insights forwarder
|
||||||
|
// exporter making the metrics and traces available in app insights.
|
||||||
|
func EnableWithAIForwarding(agentEndpoint string) (err error) {
|
||||||
|
err = Enable()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
traceExporter, err := ocagent.NewExporter(ocagent.WithInsecure(), ocagent.WithAddress(agentEndpoint))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
trace.RegisterExporter(traceExporter)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// getStartOptions is the custom options setter for the ochttp package.
|
||||||
|
func getStartOptions(*http.Request) trace.StartOptions {
|
||||||
|
return trace.StartOptions{
|
||||||
|
Sampler: sampler,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// initStats registers the views for the http metrics
|
||||||
|
func initStats() (err error) {
|
||||||
|
clientViews := []*view.View{
|
||||||
|
ochttp.ClientCompletedCount,
|
||||||
|
ochttp.ClientRoundtripLatencyDistribution,
|
||||||
|
ochttp.ClientReceivedBytesDistribution,
|
||||||
|
ochttp.ClientSentBytesDistribution,
|
||||||
|
}
|
||||||
|
for _, cv := range clientViews {
|
||||||
|
vn := fmt.Sprintf("Azure/go-autorest/tracing-%s", cv.Name)
|
||||||
|
views[vn] = cv.WithName(vn)
|
||||||
|
err = view.Register(views[vn])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// disableStats will unregister the previously registered metrics
|
||||||
|
func disableStats() {
|
||||||
|
for _, v := range views {
|
||||||
|
view.Unregister(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// StartSpan starts a trace span
|
||||||
|
func StartSpan(ctx context.Context, name string) context.Context {
|
||||||
|
ctx, _ = trace.StartSpan(ctx, name, trace.WithSampler(sampler))
|
||||||
|
return ctx
|
||||||
|
}
|
||||||
|
|
||||||
|
// EndSpan ends a previously started span stored in the context
|
||||||
|
func EndSpan(ctx context.Context, httpStatusCode int, err error) {
|
||||||
|
span := trace.FromContext(ctx)
|
||||||
|
|
||||||
|
if span == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
span.SetStatus(trace.Status{Message: err.Error(), Code: toTraceStatusCode(httpStatusCode)})
|
||||||
|
}
|
||||||
|
span.End()
|
||||||
|
}
|
||||||
|
|
||||||
|
// toTraceStatusCode converts HTTP Codes to OpenCensus codes as defined
|
||||||
|
// at https://github.com/census-instrumentation/opencensus-specs/blob/master/trace/HTTP.md#status
|
||||||
|
func toTraceStatusCode(httpStatusCode int) int32 {
|
||||||
|
switch {
|
||||||
|
case http.StatusOK <= httpStatusCode && httpStatusCode < http.StatusBadRequest:
|
||||||
|
return trace.StatusCodeOK
|
||||||
|
case httpStatusCode == http.StatusBadRequest:
|
||||||
|
return trace.StatusCodeInvalidArgument
|
||||||
|
case httpStatusCode == http.StatusUnauthorized: // 401 is actually unauthenticated.
|
||||||
|
return trace.StatusCodeUnauthenticated
|
||||||
|
case httpStatusCode == http.StatusForbidden:
|
||||||
|
return trace.StatusCodePermissionDenied
|
||||||
|
case httpStatusCode == http.StatusNotFound:
|
||||||
|
return trace.StatusCodeNotFound
|
||||||
|
case httpStatusCode == http.StatusTooManyRequests:
|
||||||
|
return trace.StatusCodeResourceExhausted
|
||||||
|
case httpStatusCode == 499:
|
||||||
|
return trace.StatusCodeCancelled
|
||||||
|
case httpStatusCode == http.StatusNotImplemented:
|
||||||
|
return trace.StatusCodeUnimplemented
|
||||||
|
case httpStatusCode == http.StatusServiceUnavailable:
|
||||||
|
return trace.StatusCodeUnavailable
|
||||||
|
case httpStatusCode == http.StatusGatewayTimeout:
|
||||||
|
return trace.StatusCodeDeadlineExceeded
|
||||||
|
default:
|
||||||
|
return trace.StatusCodeUnknown
|
||||||
|
}
|
||||||
|
}
|
|
@ -3,62 +3,3 @@
|
||||||
Package `statsd` provides a Go [dogstatsd](http://docs.datadoghq.com/guides/dogstatsd/) client. Dogstatsd extends Statsd, adding tags
|
Package `statsd` provides a Go [dogstatsd](http://docs.datadoghq.com/guides/dogstatsd/) client. Dogstatsd extends Statsd, adding tags
|
||||||
and histograms.
|
and histograms.
|
||||||
|
|
||||||
## Get the code
|
|
||||||
|
|
||||||
$ go get github.com/DataDog/datadog-go/statsd
|
|
||||||
|
|
||||||
## Usage
|
|
||||||
|
|
||||||
```go
|
|
||||||
// Create the client
|
|
||||||
c, err := statsd.New("127.0.0.1:8125")
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
// Prefix every metric with the app name
|
|
||||||
c.Namespace = "flubber."
|
|
||||||
// Send the EC2 availability zone as a tag with every metric
|
|
||||||
c.Tags = append(c.Tags, "us-east-1a")
|
|
||||||
|
|
||||||
// Do some metrics!
|
|
||||||
err = c.Gauge("request.queue_depth", 12, nil, 1)
|
|
||||||
err = c.Timing("request.duration", duration, nil, 1) // Uses a time.Duration!
|
|
||||||
err = c.TimeInMilliseconds("request", 12, nil, 1)
|
|
||||||
err = c.Incr("request.count_total", nil, 1)
|
|
||||||
err = c.Decr("request.count_total", nil, 1)
|
|
||||||
err = c.Count("request.count_total", 2, nil, 1)
|
|
||||||
```
|
|
||||||
|
|
||||||
## Buffering Client
|
|
||||||
|
|
||||||
DogStatsD accepts packets with multiple statsd payloads in them. Using the BufferingClient via `NewBufferingClient` will buffer up commands and send them when the buffer is reached or after 100msec.
|
|
||||||
|
|
||||||
## Unix Domain Sockets Client
|
|
||||||
|
|
||||||
DogStatsD version 6 accepts packets through a Unix Socket datagram connection. You can use this protocol by giving a
|
|
||||||
`unix:///path/to/dsd.socket` addr argument to the `New` or `NewBufferingClient`.
|
|
||||||
|
|
||||||
With this protocol, writes can become blocking if the server's receiving buffer is full. Our default behaviour is to
|
|
||||||
timeout and drop the packet after 1 ms. You can set a custom timeout duration via the `SetWriteTimeout` method.
|
|
||||||
|
|
||||||
The default mode is to pass write errors from the socket to the caller. This includes write errors the library will
|
|
||||||
automatically recover from (DogStatsD server not ready yet or is restarting). You can drop these errors and emulate
|
|
||||||
the UDP behaviour by setting the `SkipErrors` property to `true`. Please note that packets will be dropped in both modes.
|
|
||||||
|
|
||||||
## Development
|
|
||||||
|
|
||||||
Run the tests with:
|
|
||||||
|
|
||||||
$ go test
|
|
||||||
|
|
||||||
## Documentation
|
|
||||||
|
|
||||||
Please see: http://godoc.org/github.com/DataDog/datadog-go/statsd
|
|
||||||
|
|
||||||
## License
|
|
||||||
|
|
||||||
go-dogstatsd is released under the [MIT license](http://www.opensource.org/licenses/mit-license.php).
|
|
||||||
|
|
||||||
## Credits
|
|
||||||
|
|
||||||
Original code by [ooyala](https://github.com/ooyala/go-dogstatsd).
|
|
||||||
|
|
|
@ -0,0 +1,109 @@
|
||||||
|
package statsd
|
||||||
|
|
||||||
|
import "time"
|
||||||
|
|
||||||
|
var (
|
||||||
|
// DefaultNamespace is the default value for the Namespace option
|
||||||
|
DefaultNamespace = ""
|
||||||
|
// DefaultTags is the default value for the Tags option
|
||||||
|
DefaultTags = []string{}
|
||||||
|
// DefaultBuffered is the default value for the Buffered option
|
||||||
|
DefaultBuffered = false
|
||||||
|
// DefaultMaxMessagesPerPayload is the default value for the MaxMessagesPerPayload option
|
||||||
|
DefaultMaxMessagesPerPayload = 16
|
||||||
|
// DefaultAsyncUDS is the default value for the AsyncUDS option
|
||||||
|
DefaultAsyncUDS = false
|
||||||
|
// DefaultWriteTimeoutUDS is the default value for the WriteTimeoutUDS option
|
||||||
|
DefaultWriteTimeoutUDS = 1 * time.Millisecond
|
||||||
|
)
|
||||||
|
|
||||||
|
// Options contains the configuration options for a client.
|
||||||
|
type Options struct {
|
||||||
|
// Namespace to prepend to all metrics, events and service checks name.
|
||||||
|
Namespace string
|
||||||
|
// Tags are global tags to be applied to every metrics, events and service checks.
|
||||||
|
Tags []string
|
||||||
|
// Buffered allows to pack multiple DogStatsD messages in one payload. Messages will be buffered
|
||||||
|
// until the total size of the payload exceeds MaxMessagesPerPayload metrics, events and/or service
|
||||||
|
// checks or after 100ms since the payload startedto be built.
|
||||||
|
Buffered bool
|
||||||
|
// MaxMessagesPerPayload is the maximum number of metrics, events and/or service checks a single payload will contain.
|
||||||
|
// Note that this option only takes effect when the client is buffered.
|
||||||
|
MaxMessagesPerPayload int
|
||||||
|
// AsyncUDS allows to switch between async and blocking mode for UDS.
|
||||||
|
// Blocking mode allows for error checking but does not guarentee that calls won't block the execution.
|
||||||
|
AsyncUDS bool
|
||||||
|
// WriteTimeoutUDS is the timeout after which a UDS packet is dropped.
|
||||||
|
WriteTimeoutUDS time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
func resolveOptions(options []Option) (*Options, error) {
|
||||||
|
o := &Options{
|
||||||
|
Namespace: DefaultNamespace,
|
||||||
|
Tags: DefaultTags,
|
||||||
|
Buffered: DefaultBuffered,
|
||||||
|
MaxMessagesPerPayload: DefaultMaxMessagesPerPayload,
|
||||||
|
AsyncUDS: DefaultAsyncUDS,
|
||||||
|
WriteTimeoutUDS: DefaultWriteTimeoutUDS,
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, option := range options {
|
||||||
|
err := option(o)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return o, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Option is a client option. Can return an error if validation fails.
|
||||||
|
type Option func(*Options) error
|
||||||
|
|
||||||
|
// WithNamespace sets the Namespace option.
|
||||||
|
func WithNamespace(namespace string) Option {
|
||||||
|
return func(o *Options) error {
|
||||||
|
o.Namespace = namespace
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithTags sets the Tags option.
|
||||||
|
func WithTags(tags []string) Option {
|
||||||
|
return func(o *Options) error {
|
||||||
|
o.Tags = tags
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Buffered sets the Buffered option.
|
||||||
|
func Buffered() Option {
|
||||||
|
return func(o *Options) error {
|
||||||
|
o.Buffered = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithMaxMessagesPerPayload sets the MaxMessagesPerPayload option.
|
||||||
|
func WithMaxMessagesPerPayload(maxMessagesPerPayload int) Option {
|
||||||
|
return func(o *Options) error {
|
||||||
|
o.MaxMessagesPerPayload = maxMessagesPerPayload
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithAsyncUDS sets the AsyncUDS option.
|
||||||
|
func WithAsyncUDS() Option {
|
||||||
|
return func(o *Options) error {
|
||||||
|
o.AsyncUDS = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithWriteTimeoutUDS sets the WriteTimeoutUDS option.
|
||||||
|
func WithWriteTimeoutUDS(writeTimeoutUDS time.Duration) Option {
|
||||||
|
return func(o *Options) error {
|
||||||
|
o.WriteTimeoutUDS = writeTimeoutUDS
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
|
@ -29,6 +29,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
|
"os"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
@ -60,6 +61,12 @@ traffic instead of UDP.
|
||||||
*/
|
*/
|
||||||
const UnixAddressPrefix = "unix://"
|
const UnixAddressPrefix = "unix://"
|
||||||
|
|
||||||
|
// Client-side entity ID injection for container tagging
|
||||||
|
const (
|
||||||
|
entityIDEnvName = "DD_ENTITY_ID"
|
||||||
|
entityIDTagName = "dd.internal.entity_id"
|
||||||
|
)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Stat suffixes
|
Stat suffixes
|
||||||
*/
|
*/
|
||||||
|
@ -104,41 +111,72 @@ type Client struct {
|
||||||
|
|
||||||
// New returns a pointer to a new Client given an addr in the format "hostname:port" or
|
// New returns a pointer to a new Client given an addr in the format "hostname:port" or
|
||||||
// "unix:///path/to/socket".
|
// "unix:///path/to/socket".
|
||||||
func New(addr string) (*Client, error) {
|
func New(addr string, options ...Option) (*Client, error) {
|
||||||
if strings.HasPrefix(addr, UnixAddressPrefix) {
|
o, err := resolveOptions(options)
|
||||||
w, err := newUdsWriter(addr[len(UnixAddressPrefix)-1:])
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return NewWithWriter(w)
|
|
||||||
|
var w statsdWriter
|
||||||
|
|
||||||
|
if !strings.HasPrefix(addr, UnixAddressPrefix) {
|
||||||
|
w, err = newUDPWriter(addr)
|
||||||
|
} else if o.AsyncUDS {
|
||||||
|
w, err = newAsyncUdsWriter(addr[len(UnixAddressPrefix)-1:])
|
||||||
|
} else {
|
||||||
|
w, err = newBlockingUdsWriter(addr[len(UnixAddressPrefix)-1:])
|
||||||
}
|
}
|
||||||
w, err := newUDPWriter(addr)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return NewWithWriter(w)
|
w.SetWriteTimeout(o.WriteTimeoutUDS)
|
||||||
|
|
||||||
|
c := Client{
|
||||||
|
Namespace: o.Namespace,
|
||||||
|
Tags: o.Tags,
|
||||||
|
writer: w,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Inject DD_ENTITY_ID as a constant tag if found
|
||||||
|
entityID := os.Getenv(entityIDEnvName)
|
||||||
|
if entityID != "" {
|
||||||
|
entityTag := fmt.Sprintf("%s:%s", entityIDTagName, entityID)
|
||||||
|
c.Tags = append(c.Tags, entityTag)
|
||||||
|
}
|
||||||
|
|
||||||
|
if o.Buffered {
|
||||||
|
c.bufferLength = o.MaxMessagesPerPayload
|
||||||
|
c.commands = make([][]byte, 0, o.MaxMessagesPerPayload)
|
||||||
|
c.flushTime = time.Millisecond * 100
|
||||||
|
c.stop = make(chan struct{}, 1)
|
||||||
|
go c.watch()
|
||||||
|
}
|
||||||
|
|
||||||
|
return &c, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewWithWriter creates a new Client with given writer. Writer is a
|
// NewWithWriter creates a new Client with given writer. Writer is a
|
||||||
// io.WriteCloser + SetWriteTimeout(time.Duration) error
|
// io.WriteCloser + SetWriteTimeout(time.Duration) error
|
||||||
func NewWithWriter(w statsdWriter) (*Client, error) {
|
func NewWithWriter(w statsdWriter) (*Client, error) {
|
||||||
client := &Client{writer: w, SkipErrors: false}
|
client := &Client{writer: w, SkipErrors: false}
|
||||||
|
|
||||||
|
// Inject DD_ENTITY_ID as a constant tag if found
|
||||||
|
entityID := os.Getenv(entityIDEnvName)
|
||||||
|
if entityID != "" {
|
||||||
|
entityTag := fmt.Sprintf("%s:%s", entityIDTagName, entityID)
|
||||||
|
client.Tags = append(client.Tags, entityTag)
|
||||||
|
}
|
||||||
|
|
||||||
return client, nil
|
return client, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewBuffered returns a Client that buffers its output and sends it in chunks.
|
// NewBuffered returns a Client that buffers its output and sends it in chunks.
|
||||||
// Buflen is the length of the buffer in number of commands.
|
// Buflen is the length of the buffer in number of commands.
|
||||||
|
//
|
||||||
|
// When addr is empty, the client will default to a UDP client and use the DD_AGENT_HOST
|
||||||
|
// and (optionally) the DD_DOGSTATSD_PORT environment variables to build the target address.
|
||||||
func NewBuffered(addr string, buflen int) (*Client, error) {
|
func NewBuffered(addr string, buflen int) (*Client, error) {
|
||||||
client, err := New(addr)
|
return New(addr, Buffered(), WithMaxMessagesPerPayload(buflen))
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
client.bufferLength = buflen
|
|
||||||
client.commands = make([][]byte, 0, buflen)
|
|
||||||
client.flushTime = time.Millisecond * 100
|
|
||||||
client.stop = make(chan struct{}, 1)
|
|
||||||
go client.watch()
|
|
||||||
return client, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// format a message from its name, value, tags and rate. Also adds global
|
// format a message from its name, value, tags and rate. Also adds global
|
||||||
|
@ -182,7 +220,7 @@ func (c *Client) format(name string, value interface{}, suffix []byte, tags []st
|
||||||
// SetWriteTimeout allows the user to set a custom UDS write timeout. Not supported for UDP.
|
// SetWriteTimeout allows the user to set a custom UDS write timeout. Not supported for UDP.
|
||||||
func (c *Client) SetWriteTimeout(d time.Duration) error {
|
func (c *Client) SetWriteTimeout(d time.Duration) error {
|
||||||
if c == nil {
|
if c == nil {
|
||||||
return nil
|
return fmt.Errorf("Client is nil")
|
||||||
}
|
}
|
||||||
return c.writer.SetWriteTimeout(d)
|
return c.writer.SetWriteTimeout(d)
|
||||||
}
|
}
|
||||||
|
@ -269,7 +307,7 @@ func copyAndResetBuffer(buf *bytes.Buffer) []byte {
|
||||||
// Flush forces a flush of the pending commands in the buffer
|
// Flush forces a flush of the pending commands in the buffer
|
||||||
func (c *Client) Flush() error {
|
func (c *Client) Flush() error {
|
||||||
if c == nil {
|
if c == nil {
|
||||||
return nil
|
return fmt.Errorf("Client is nil")
|
||||||
}
|
}
|
||||||
c.Lock()
|
c.Lock()
|
||||||
defer c.Unlock()
|
defer c.Unlock()
|
||||||
|
@ -323,7 +361,7 @@ func (c *Client) sendMsg(msg []byte) error {
|
||||||
// send handles sampling and sends the message over UDP. It also adds global namespace prefixes and tags.
|
// send handles sampling and sends the message over UDP. It also adds global namespace prefixes and tags.
|
||||||
func (c *Client) send(name string, value interface{}, suffix []byte, tags []string, rate float64) error {
|
func (c *Client) send(name string, value interface{}, suffix []byte, tags []string, rate float64) error {
|
||||||
if c == nil {
|
if c == nil {
|
||||||
return nil
|
return fmt.Errorf("Client is nil")
|
||||||
}
|
}
|
||||||
if rate < 1 && rand.Float64() > rate {
|
if rate < 1 && rand.Float64() > rate {
|
||||||
return nil
|
return nil
|
||||||
|
@ -381,7 +419,7 @@ func (c *Client) TimeInMilliseconds(name string, value float64, tags []string, r
|
||||||
// Event sends the provided Event.
|
// Event sends the provided Event.
|
||||||
func (c *Client) Event(e *Event) error {
|
func (c *Client) Event(e *Event) error {
|
||||||
if c == nil {
|
if c == nil {
|
||||||
return nil
|
return fmt.Errorf("Client is nil")
|
||||||
}
|
}
|
||||||
stat, err := e.Encode(c.Tags...)
|
stat, err := e.Encode(c.Tags...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -399,7 +437,7 @@ func (c *Client) SimpleEvent(title, text string) error {
|
||||||
// ServiceCheck sends the provided ServiceCheck.
|
// ServiceCheck sends the provided ServiceCheck.
|
||||||
func (c *Client) ServiceCheck(sc *ServiceCheck) error {
|
func (c *Client) ServiceCheck(sc *ServiceCheck) error {
|
||||||
if c == nil {
|
if c == nil {
|
||||||
return nil
|
return fmt.Errorf("Client is nil")
|
||||||
}
|
}
|
||||||
stat, err := sc.Encode(c.Tags...)
|
stat, err := sc.Encode(c.Tags...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -417,7 +455,7 @@ func (c *Client) SimpleServiceCheck(name string, status ServiceCheckStatus) erro
|
||||||
// Close the client connection.
|
// Close the client connection.
|
||||||
func (c *Client) Close() error {
|
func (c *Client) Close() error {
|
||||||
if c == nil {
|
if c == nil {
|
||||||
return nil
|
return fmt.Errorf("Client is nil")
|
||||||
}
|
}
|
||||||
select {
|
select {
|
||||||
case c.stop <- struct{}{}:
|
case c.stop <- struct{}{}:
|
||||||
|
|
|
@ -2,10 +2,18 @@ package statsd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
|
"os"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
autoHostEnvName = "DD_AGENT_HOST"
|
||||||
|
autoPortEnvName = "DD_DOGSTATSD_PORT"
|
||||||
|
defaultUDPPort = "8125"
|
||||||
|
)
|
||||||
|
|
||||||
// udpWriter is an internal class wrapping around management of UDP connection
|
// udpWriter is an internal class wrapping around management of UDP connection
|
||||||
type udpWriter struct {
|
type udpWriter struct {
|
||||||
conn net.Conn
|
conn net.Conn
|
||||||
|
@ -13,6 +21,13 @@ type udpWriter struct {
|
||||||
|
|
||||||
// New returns a pointer to a new udpWriter given an addr in the format "hostname:port".
|
// New returns a pointer to a new udpWriter given an addr in the format "hostname:port".
|
||||||
func newUDPWriter(addr string) (*udpWriter, error) {
|
func newUDPWriter(addr string) (*udpWriter, error) {
|
||||||
|
if addr == "" {
|
||||||
|
addr = addressFromEnvironment()
|
||||||
|
}
|
||||||
|
if addr == "" {
|
||||||
|
return nil, errors.New("No address passed and autodetection from environment failed")
|
||||||
|
}
|
||||||
|
|
||||||
udpAddr, err := net.ResolveUDPAddr("udp", addr)
|
udpAddr, err := net.ResolveUDPAddr("udp", addr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -38,3 +53,21 @@ func (w *udpWriter) Write(data []byte) (int, error) {
|
||||||
func (w *udpWriter) Close() error {
|
func (w *udpWriter) Close() error {
|
||||||
return w.conn.Close()
|
return w.conn.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (w *udpWriter) remoteAddr() net.Addr {
|
||||||
|
return w.conn.RemoteAddr()
|
||||||
|
}
|
||||||
|
|
||||||
|
func addressFromEnvironment() string {
|
||||||
|
autoHost := os.Getenv(autoHostEnvName)
|
||||||
|
if autoHost == "" {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
autoPort := os.Getenv(autoPortEnvName)
|
||||||
|
if autoPort == "" {
|
||||||
|
autoPort = defaultUDPPort
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Sprintf("%s:%s", autoHost, autoPort)
|
||||||
|
}
|
||||||
|
|
|
@ -1,8 +1,6 @@
|
||||||
package statsd
|
package statsd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"net"
|
|
||||||
"sync"
|
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -11,61 +9,3 @@ UDSTimeout holds the default timeout for UDS socket writes, as they can get
|
||||||
blocking when the receiving buffer is full.
|
blocking when the receiving buffer is full.
|
||||||
*/
|
*/
|
||||||
const defaultUDSTimeout = 1 * time.Millisecond
|
const defaultUDSTimeout = 1 * time.Millisecond
|
||||||
|
|
||||||
// udsWriter is an internal class wrapping around management of UDS connection
|
|
||||||
type udsWriter struct {
|
|
||||||
// Address to send metrics to, needed to allow reconnection on error
|
|
||||||
addr net.Addr
|
|
||||||
// Established connection object, or nil if not connected yet
|
|
||||||
conn net.Conn
|
|
||||||
// write timeout
|
|
||||||
writeTimeout time.Duration
|
|
||||||
sync.Mutex // used to lock conn / writer can replace it
|
|
||||||
}
|
|
||||||
|
|
||||||
// New returns a pointer to a new udsWriter given a socket file path as addr.
|
|
||||||
func newUdsWriter(addr string) (*udsWriter, error) {
|
|
||||||
udsAddr, err := net.ResolveUnixAddr("unixgram", addr)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
// Defer connection to first Write
|
|
||||||
writer := &udsWriter{addr: udsAddr, conn: nil, writeTimeout: defaultUDSTimeout}
|
|
||||||
return writer, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetWriteTimeout allows the user to set a custom write timeout
|
|
||||||
func (w *udsWriter) SetWriteTimeout(d time.Duration) error {
|
|
||||||
w.writeTimeout = d
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write data to the UDS connection with write timeout and minimal error handling:
|
|
||||||
// create the connection if nil, and destroy it if the statsd server has disconnected
|
|
||||||
func (w *udsWriter) Write(data []byte) (int, error) {
|
|
||||||
w.Lock()
|
|
||||||
defer w.Unlock()
|
|
||||||
// Try connecting (first packet or connection lost)
|
|
||||||
if w.conn == nil {
|
|
||||||
conn, err := net.Dial(w.addr.Network(), w.addr.String())
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
w.conn = conn
|
|
||||||
}
|
|
||||||
w.conn.SetWriteDeadline(time.Now().Add(w.writeTimeout))
|
|
||||||
n, e := w.conn.Write(data)
|
|
||||||
if e != nil {
|
|
||||||
// Statsd server disconnected, retry connecting at next packet
|
|
||||||
w.conn = nil
|
|
||||||
return 0, e
|
|
||||||
}
|
|
||||||
return n, e
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *udsWriter) Close() error {
|
|
||||||
if w.conn != nil {
|
|
||||||
return w.conn.Close()
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
|
@ -0,0 +1,113 @@
|
||||||
|
package statsd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// asyncUdsWriter is an internal class wrapping around management of UDS connection
|
||||||
|
type asyncUdsWriter struct {
|
||||||
|
// Address to send metrics to, needed to allow reconnection on error
|
||||||
|
addr net.Addr
|
||||||
|
// Established connection object, or nil if not connected yet
|
||||||
|
conn net.Conn
|
||||||
|
// write timeout
|
||||||
|
writeTimeout time.Duration
|
||||||
|
// datagramQueue is the queue of datagrams ready to be sent
|
||||||
|
datagramQueue chan []byte
|
||||||
|
stopChan chan struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// New returns a pointer to a new asyncUdsWriter given a socket file path as addr.
|
||||||
|
func newAsyncUdsWriter(addr string) (*asyncUdsWriter, error) {
|
||||||
|
udsAddr, err := net.ResolveUnixAddr("unixgram", addr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
writer := &asyncUdsWriter{
|
||||||
|
addr: udsAddr,
|
||||||
|
conn: nil,
|
||||||
|
writeTimeout: defaultUDSTimeout,
|
||||||
|
// 8192 * 8KB = 65.5MB
|
||||||
|
datagramQueue: make(chan []byte, 8192),
|
||||||
|
stopChan: make(chan struct{}, 1),
|
||||||
|
}
|
||||||
|
|
||||||
|
go writer.sendLoop()
|
||||||
|
return writer, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *asyncUdsWriter) sendLoop() {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case datagram := <-w.datagramQueue:
|
||||||
|
w.write(datagram)
|
||||||
|
case <-w.stopChan:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetWriteTimeout allows the user to set a custom write timeout
|
||||||
|
func (w *asyncUdsWriter) SetWriteTimeout(d time.Duration) error {
|
||||||
|
w.writeTimeout = d
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write data to the UDS connection with write timeout and minimal error handling:
|
||||||
|
// create the connection if nil, and destroy it if the statsd server has disconnected
|
||||||
|
func (w *asyncUdsWriter) Write(data []byte) (int, error) {
|
||||||
|
select {
|
||||||
|
case w.datagramQueue <- data:
|
||||||
|
return len(data), nil
|
||||||
|
default:
|
||||||
|
return 0, fmt.Errorf("uds datagram queue is full (the agent might not be able to keep up)")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// write writes the given data to the UDS.
|
||||||
|
// This function is **not** thread safe.
|
||||||
|
func (w *asyncUdsWriter) write(data []byte) (int, error) {
|
||||||
|
conn, err := w.ensureConnection()
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
conn.SetWriteDeadline(time.Now().Add(w.writeTimeout))
|
||||||
|
n, err := conn.Write(data)
|
||||||
|
|
||||||
|
if e, isNetworkErr := err.(net.Error); !isNetworkErr || !e.Temporary() {
|
||||||
|
// err is not temporary, Statsd server disconnected, retry connecting at next packet
|
||||||
|
w.unsetConnection()
|
||||||
|
return 0, e
|
||||||
|
}
|
||||||
|
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *asyncUdsWriter) Close() error {
|
||||||
|
close(w.stopChan)
|
||||||
|
if w.conn != nil {
|
||||||
|
return w.conn.Close()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *asyncUdsWriter) ensureConnection() (net.Conn, error) {
|
||||||
|
if w.conn != nil {
|
||||||
|
return w.conn, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
newConn, err := net.Dial(w.addr.Network(), w.addr.String())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
w.conn = newConn
|
||||||
|
return newConn, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *asyncUdsWriter) unsetConnection() {
|
||||||
|
w.conn = nil
|
||||||
|
}
|
|
@ -0,0 +1,92 @@
|
||||||
|
package statsd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// blockingUdsWriter is an internal class wrapping around management of UDS connection
|
||||||
|
type blockingUdsWriter struct {
|
||||||
|
// Address to send metrics to, needed to allow reconnection on error
|
||||||
|
addr net.Addr
|
||||||
|
// Established connection object, or nil if not connected yet
|
||||||
|
conn net.Conn
|
||||||
|
// write timeout
|
||||||
|
writeTimeout time.Duration
|
||||||
|
sync.RWMutex // used to lock conn / writer can replace it
|
||||||
|
}
|
||||||
|
|
||||||
|
// New returns a pointer to a new blockingUdsWriter given a socket file path as addr.
|
||||||
|
func newBlockingUdsWriter(addr string) (*blockingUdsWriter, error) {
|
||||||
|
udsAddr, err := net.ResolveUnixAddr("unixgram", addr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// Defer connection to first Write
|
||||||
|
writer := &blockingUdsWriter{addr: udsAddr, conn: nil, writeTimeout: defaultUDSTimeout}
|
||||||
|
return writer, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetWriteTimeout allows the user to set a custom write timeout
|
||||||
|
func (w *blockingUdsWriter) SetWriteTimeout(d time.Duration) error {
|
||||||
|
w.writeTimeout = d
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write data to the UDS connection with write timeout and minimal error handling:
|
||||||
|
// create the connection if nil, and destroy it if the statsd server has disconnected
|
||||||
|
func (w *blockingUdsWriter) Write(data []byte) (int, error) {
|
||||||
|
conn, err := w.ensureConnection()
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
conn.SetWriteDeadline(time.Now().Add(w.writeTimeout))
|
||||||
|
n, e := conn.Write(data)
|
||||||
|
|
||||||
|
if err, isNetworkErr := e.(net.Error); !isNetworkErr || !err.Temporary() {
|
||||||
|
// Statsd server disconnected, retry connecting at next packet
|
||||||
|
w.unsetConnection()
|
||||||
|
return 0, e
|
||||||
|
}
|
||||||
|
return n, e
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *blockingUdsWriter) Close() error {
|
||||||
|
if w.conn != nil {
|
||||||
|
return w.conn.Close()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *blockingUdsWriter) ensureConnection() (net.Conn, error) {
|
||||||
|
// Check if we've already got a socket we can use
|
||||||
|
w.RLock()
|
||||||
|
currentConn := w.conn
|
||||||
|
w.RUnlock()
|
||||||
|
|
||||||
|
if currentConn != nil {
|
||||||
|
return currentConn, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Looks like we might need to connect - try again with write locking.
|
||||||
|
w.Lock()
|
||||||
|
defer w.Unlock()
|
||||||
|
if w.conn != nil {
|
||||||
|
return w.conn, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
newConn, err := net.Dial(w.addr.Network(), w.addr.String())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
w.conn = newConn
|
||||||
|
return newConn, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *blockingUdsWriter) unsetConnection() {
|
||||||
|
w.Lock()
|
||||||
|
defer w.Unlock()
|
||||||
|
w.conn = nil
|
||||||
|
}
|
|
@ -309,40 +309,34 @@ func (g *Container) DeleteP(path string) error {
|
||||||
return g.Delete(strings.Split(path, ".")...)
|
return g.Delete(strings.Split(path, ".")...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Merge - Merges two gabs-containers
|
// MergeFn merges two objects using a provided function to resolve collisions.
|
||||||
func (g *Container) Merge(toMerge *Container) error {
|
//
|
||||||
|
// The collision function receives two interface{} arguments, destination (the
|
||||||
|
// original object) and source (the object being merged into the destination).
|
||||||
|
// Which ever value is returned becomes the new value in the destination object
|
||||||
|
// at the location of the collision.
|
||||||
|
func (g *Container) MergeFn(source *Container, collisionFn func(destination, source interface{}) interface{}) error {
|
||||||
var recursiveFnc func(map[string]interface{}, []string) error
|
var recursiveFnc func(map[string]interface{}, []string) error
|
||||||
recursiveFnc = func(mmap map[string]interface{}, path []string) error {
|
recursiveFnc = func(mmap map[string]interface{}, path []string) error {
|
||||||
for key, value := range mmap {
|
for key, value := range mmap {
|
||||||
newPath := append(path, key)
|
newPath := append(path, key)
|
||||||
if g.Exists(newPath...) {
|
if g.Exists(newPath...) {
|
||||||
target := g.Search(newPath...)
|
existingData := g.Search(newPath...).Data()
|
||||||
switch t := value.(type) {
|
switch t := value.(type) {
|
||||||
case map[string]interface{}:
|
case map[string]interface{}:
|
||||||
switch targetV := target.Data().(type) {
|
switch existingVal := existingData.(type) {
|
||||||
case map[string]interface{}:
|
case map[string]interface{}:
|
||||||
if err := recursiveFnc(t, newPath); err != nil {
|
if err := recursiveFnc(t, newPath); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
case []interface{}:
|
|
||||||
g.Set(append(targetV, t), newPath...)
|
|
||||||
default:
|
default:
|
||||||
newSlice := append([]interface{}{}, targetV)
|
if _, err := g.Set(collisionFn(existingVal, t), newPath...); err != nil {
|
||||||
g.Set(append(newSlice, t), newPath...)
|
|
||||||
}
|
|
||||||
case []interface{}:
|
|
||||||
for _, valueOfSlice := range t {
|
|
||||||
if err := g.ArrayAppend(valueOfSlice, newPath...); err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
switch targetV := target.Data().(type) {
|
if _, err := g.Set(collisionFn(existingData, t), newPath...); err != nil {
|
||||||
case []interface{}:
|
return err
|
||||||
g.Set(append(targetV, t), newPath...)
|
|
||||||
default:
|
|
||||||
newSlice := append([]interface{}{}, targetV)
|
|
||||||
g.Set(append(newSlice, t), newPath...)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -354,12 +348,37 @@ func (g *Container) Merge(toMerge *Container) error {
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if mmap, ok := toMerge.Data().(map[string]interface{}); ok {
|
if mmap, ok := source.Data().(map[string]interface{}); ok {
|
||||||
return recursiveFnc(mmap, []string{})
|
return recursiveFnc(mmap, []string{})
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Merge a source object into an existing destination object. When a collision
|
||||||
|
// is found within the merged structures (both a source and destination object
|
||||||
|
// contain the same non-object keys) the result will be an array containing both
|
||||||
|
// values, where values that are already arrays will be expanded into the
|
||||||
|
// resulting array.
|
||||||
|
//
|
||||||
|
// It is possible to merge structures will different collision behaviours with
|
||||||
|
// MergeFn.
|
||||||
|
func (g *Container) Merge(source *Container) error {
|
||||||
|
return g.MergeFn(source, func(dest, source interface{}) interface{} {
|
||||||
|
destArr, destIsArray := dest.([]interface{})
|
||||||
|
sourceArr, sourceIsArray := source.([]interface{})
|
||||||
|
if destIsArray {
|
||||||
|
if sourceIsArray {
|
||||||
|
return append(destArr, sourceArr...)
|
||||||
|
}
|
||||||
|
return append(destArr, source)
|
||||||
|
}
|
||||||
|
if sourceIsArray {
|
||||||
|
return append(append([]interface{}{}, dest), sourceArr...)
|
||||||
|
}
|
||||||
|
return []interface{}{dest, source}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
//--------------------------------------------------------------------------------------------------
|
//--------------------------------------------------------------------------------------------------
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -377,7 +396,9 @@ func (g *Container) ArrayAppend(value interface{}, path ...string) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
newArray := []interface{}{}
|
newArray := []interface{}{}
|
||||||
newArray = append(newArray, g.Search(path...).Data())
|
if d := g.Search(path...).Data(); d != nil {
|
||||||
|
newArray = append(newArray, d)
|
||||||
|
}
|
||||||
newArray = append(newArray, value)
|
newArray = append(newArray, value)
|
||||||
|
|
||||||
_, err := g.Set(newArray, path...)
|
_, err := g.Set(newArray, path...)
|
||||||
|
|
|
@ -3,10 +3,13 @@
|
||||||
package winio
|
package winio
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net"
|
"net"
|
||||||
"os"
|
"os"
|
||||||
|
"runtime"
|
||||||
"syscall"
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
|
@ -18,6 +21,48 @@ import (
|
||||||
//sys getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo
|
//sys getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo
|
||||||
//sys getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW
|
//sys getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW
|
||||||
//sys localAlloc(uFlags uint32, length uint32) (ptr uintptr) = LocalAlloc
|
//sys localAlloc(uFlags uint32, length uint32) (ptr uintptr) = LocalAlloc
|
||||||
|
//sys ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntstatus) = ntdll.NtCreateNamedPipeFile
|
||||||
|
//sys rtlNtStatusToDosError(status ntstatus) (winerr error) = ntdll.RtlNtStatusToDosErrorNoTeb
|
||||||
|
//sys rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntstatus) = ntdll.RtlDosPathNameToNtPathName_U
|
||||||
|
//sys rtlDefaultNpAcl(dacl *uintptr) (status ntstatus) = ntdll.RtlDefaultNpAcl
|
||||||
|
|
||||||
|
type ioStatusBlock struct {
|
||||||
|
Status, Information uintptr
|
||||||
|
}
|
||||||
|
|
||||||
|
type objectAttributes struct {
|
||||||
|
Length uintptr
|
||||||
|
RootDirectory uintptr
|
||||||
|
ObjectName *unicodeString
|
||||||
|
Attributes uintptr
|
||||||
|
SecurityDescriptor *securityDescriptor
|
||||||
|
SecurityQoS uintptr
|
||||||
|
}
|
||||||
|
|
||||||
|
type unicodeString struct {
|
||||||
|
Length uint16
|
||||||
|
MaximumLength uint16
|
||||||
|
Buffer uintptr
|
||||||
|
}
|
||||||
|
|
||||||
|
type securityDescriptor struct {
|
||||||
|
Revision byte
|
||||||
|
Sbz1 byte
|
||||||
|
Control uint16
|
||||||
|
Owner uintptr
|
||||||
|
Group uintptr
|
||||||
|
Sacl uintptr
|
||||||
|
Dacl uintptr
|
||||||
|
}
|
||||||
|
|
||||||
|
type ntstatus int32
|
||||||
|
|
||||||
|
func (status ntstatus) Err() error {
|
||||||
|
if status >= 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return rtlNtStatusToDosError(status)
|
||||||
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
cERROR_PIPE_BUSY = syscall.Errno(231)
|
cERROR_PIPE_BUSY = syscall.Errno(231)
|
||||||
|
@ -25,21 +70,20 @@ const (
|
||||||
cERROR_PIPE_CONNECTED = syscall.Errno(535)
|
cERROR_PIPE_CONNECTED = syscall.Errno(535)
|
||||||
cERROR_SEM_TIMEOUT = syscall.Errno(121)
|
cERROR_SEM_TIMEOUT = syscall.Errno(121)
|
||||||
|
|
||||||
cPIPE_ACCESS_DUPLEX = 0x3
|
|
||||||
cFILE_FLAG_FIRST_PIPE_INSTANCE = 0x80000
|
|
||||||
cSECURITY_SQOS_PRESENT = 0x100000
|
cSECURITY_SQOS_PRESENT = 0x100000
|
||||||
cSECURITY_ANONYMOUS = 0
|
cSECURITY_ANONYMOUS = 0
|
||||||
|
|
||||||
cPIPE_REJECT_REMOTE_CLIENTS = 0x8
|
|
||||||
|
|
||||||
cPIPE_UNLIMITED_INSTANCES = 255
|
|
||||||
|
|
||||||
cNMPWAIT_USE_DEFAULT_WAIT = 0
|
|
||||||
cNMPWAIT_NOWAIT = 1
|
|
||||||
|
|
||||||
cPIPE_TYPE_MESSAGE = 4
|
cPIPE_TYPE_MESSAGE = 4
|
||||||
|
|
||||||
cPIPE_READMODE_MESSAGE = 2
|
cPIPE_READMODE_MESSAGE = 2
|
||||||
|
|
||||||
|
cFILE_OPEN = 1
|
||||||
|
cFILE_CREATE = 2
|
||||||
|
|
||||||
|
cFILE_PIPE_MESSAGE_TYPE = 1
|
||||||
|
cFILE_PIPE_REJECT_REMOTE_CLIENTS = 2
|
||||||
|
|
||||||
|
cSE_DACL_PRESENT = 4
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -137,9 +181,30 @@ func (s pipeAddress) String() string {
|
||||||
return string(s)
|
return string(s)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// tryDialPipe attempts to dial the pipe at `path` until `ctx` cancellation or timeout.
|
||||||
|
func tryDialPipe(ctx context.Context, path *string) (syscall.Handle, error) {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return syscall.Handle(0), ctx.Err()
|
||||||
|
default:
|
||||||
|
h, err := createFile(*path, syscall.GENERIC_READ|syscall.GENERIC_WRITE, 0, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_OVERLAPPED|cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0)
|
||||||
|
if err == nil {
|
||||||
|
return h, nil
|
||||||
|
}
|
||||||
|
if err != cERROR_PIPE_BUSY {
|
||||||
|
return h, &os.PathError{Err: err, Op: "open", Path: *path}
|
||||||
|
}
|
||||||
|
// Wait 10 msec and try again. This is a rather simplistic
|
||||||
|
// view, as we always try each 10 milliseconds.
|
||||||
|
time.Sleep(time.Millisecond * 10)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// DialPipe connects to a named pipe by path, timing out if the connection
|
// DialPipe connects to a named pipe by path, timing out if the connection
|
||||||
// takes longer than the specified duration. If timeout is nil, then we use
|
// takes longer than the specified duration. If timeout is nil, then we use
|
||||||
// a default timeout of 5 seconds. (We do not use WaitNamedPipe.)
|
// a default timeout of 2 seconds. (We do not use WaitNamedPipe.)
|
||||||
func DialPipe(path string, timeout *time.Duration) (net.Conn, error) {
|
func DialPipe(path string, timeout *time.Duration) (net.Conn, error) {
|
||||||
var absTimeout time.Time
|
var absTimeout time.Time
|
||||||
if timeout != nil {
|
if timeout != nil {
|
||||||
|
@ -147,23 +212,22 @@ func DialPipe(path string, timeout *time.Duration) (net.Conn, error) {
|
||||||
} else {
|
} else {
|
||||||
absTimeout = time.Now().Add(time.Second * 2)
|
absTimeout = time.Now().Add(time.Second * 2)
|
||||||
}
|
}
|
||||||
var err error
|
ctx, _ := context.WithDeadline(context.Background(), absTimeout)
|
||||||
var h syscall.Handle
|
conn, err := DialPipeContext(ctx, path)
|
||||||
for {
|
if err == context.DeadlineExceeded {
|
||||||
h, err = createFile(path, syscall.GENERIC_READ|syscall.GENERIC_WRITE, 0, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_OVERLAPPED|cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0)
|
|
||||||
if err != cERROR_PIPE_BUSY {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if time.Now().After(absTimeout) {
|
|
||||||
return nil, ErrTimeout
|
return nil, ErrTimeout
|
||||||
}
|
}
|
||||||
|
return conn, err
|
||||||
// Wait 10 msec and try again. This is a rather simplistic
|
|
||||||
// view, as we always try each 10 milliseconds.
|
|
||||||
time.Sleep(time.Millisecond * 10)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DialPipeContext attempts to connect to a named pipe by `path` until `ctx`
|
||||||
|
// cancellation or timeout.
|
||||||
|
func DialPipeContext(ctx context.Context, path string) (net.Conn, error) {
|
||||||
|
var err error
|
||||||
|
var h syscall.Handle
|
||||||
|
h, err = tryDialPipe(ctx, &path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, &os.PathError{Op: "open", Path: path, Err: err}
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
var flags uint32
|
var flags uint32
|
||||||
|
@ -196,41 +260,85 @@ type acceptResponse struct {
|
||||||
type win32PipeListener struct {
|
type win32PipeListener struct {
|
||||||
firstHandle syscall.Handle
|
firstHandle syscall.Handle
|
||||||
path string
|
path string
|
||||||
securityDescriptor []byte
|
|
||||||
config PipeConfig
|
config PipeConfig
|
||||||
acceptCh chan (chan acceptResponse)
|
acceptCh chan (chan acceptResponse)
|
||||||
closeCh chan int
|
closeCh chan int
|
||||||
doneCh chan int
|
doneCh chan int
|
||||||
}
|
}
|
||||||
|
|
||||||
func makeServerPipeHandle(path string, securityDescriptor []byte, c *PipeConfig, first bool) (syscall.Handle, error) {
|
func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (syscall.Handle, error) {
|
||||||
var flags uint32 = cPIPE_ACCESS_DUPLEX | syscall.FILE_FLAG_OVERLAPPED
|
path16, err := syscall.UTF16FromString(path)
|
||||||
if first {
|
|
||||||
flags |= cFILE_FLAG_FIRST_PIPE_INSTANCE
|
|
||||||
}
|
|
||||||
|
|
||||||
var mode uint32 = cPIPE_REJECT_REMOTE_CLIENTS
|
|
||||||
if c.MessageMode {
|
|
||||||
mode |= cPIPE_TYPE_MESSAGE
|
|
||||||
}
|
|
||||||
|
|
||||||
sa := &syscall.SecurityAttributes{}
|
|
||||||
sa.Length = uint32(unsafe.Sizeof(*sa))
|
|
||||||
if securityDescriptor != nil {
|
|
||||||
len := uint32(len(securityDescriptor))
|
|
||||||
sa.SecurityDescriptor = localAlloc(0, len)
|
|
||||||
defer localFree(sa.SecurityDescriptor)
|
|
||||||
copy((*[0xffff]byte)(unsafe.Pointer(sa.SecurityDescriptor))[:], securityDescriptor)
|
|
||||||
}
|
|
||||||
h, err := createNamedPipe(path, flags, mode, cPIPE_UNLIMITED_INSTANCES, uint32(c.OutputBufferSize), uint32(c.InputBufferSize), 0, sa)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, &os.PathError{Op: "open", Path: path, Err: err}
|
return 0, &os.PathError{Op: "open", Path: path, Err: err}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var oa objectAttributes
|
||||||
|
oa.Length = unsafe.Sizeof(oa)
|
||||||
|
|
||||||
|
var ntPath unicodeString
|
||||||
|
if err := rtlDosPathNameToNtPathName(&path16[0], &ntPath, 0, 0).Err(); err != nil {
|
||||||
|
return 0, &os.PathError{Op: "open", Path: path, Err: err}
|
||||||
|
}
|
||||||
|
defer localFree(ntPath.Buffer)
|
||||||
|
oa.ObjectName = &ntPath
|
||||||
|
|
||||||
|
// The security descriptor is only needed for the first pipe.
|
||||||
|
if first {
|
||||||
|
if sd != nil {
|
||||||
|
len := uint32(len(sd))
|
||||||
|
sdb := localAlloc(0, len)
|
||||||
|
defer localFree(sdb)
|
||||||
|
copy((*[0xffff]byte)(unsafe.Pointer(sdb))[:], sd)
|
||||||
|
oa.SecurityDescriptor = (*securityDescriptor)(unsafe.Pointer(sdb))
|
||||||
|
} else {
|
||||||
|
// Construct the default named pipe security descriptor.
|
||||||
|
var dacl uintptr
|
||||||
|
if err := rtlDefaultNpAcl(&dacl).Err(); err != nil {
|
||||||
|
return 0, fmt.Errorf("getting default named pipe ACL: %s", err)
|
||||||
|
}
|
||||||
|
defer localFree(dacl)
|
||||||
|
|
||||||
|
sdb := &securityDescriptor{
|
||||||
|
Revision: 1,
|
||||||
|
Control: cSE_DACL_PRESENT,
|
||||||
|
Dacl: dacl,
|
||||||
|
}
|
||||||
|
oa.SecurityDescriptor = sdb
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
typ := uint32(cFILE_PIPE_REJECT_REMOTE_CLIENTS)
|
||||||
|
if c.MessageMode {
|
||||||
|
typ |= cFILE_PIPE_MESSAGE_TYPE
|
||||||
|
}
|
||||||
|
|
||||||
|
disposition := uint32(cFILE_OPEN)
|
||||||
|
access := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE | syscall.SYNCHRONIZE)
|
||||||
|
if first {
|
||||||
|
disposition = cFILE_CREATE
|
||||||
|
// By not asking for read or write access, the named pipe file system
|
||||||
|
// will put this pipe into an initially disconnected state, blocking
|
||||||
|
// client connections until the next call with first == false.
|
||||||
|
access = syscall.SYNCHRONIZE
|
||||||
|
}
|
||||||
|
|
||||||
|
timeout := int64(-50 * 10000) // 50ms
|
||||||
|
|
||||||
|
var (
|
||||||
|
h syscall.Handle
|
||||||
|
iosb ioStatusBlock
|
||||||
|
)
|
||||||
|
err = ntCreateNamedPipeFile(&h, access, &oa, &iosb, syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE, disposition, 0, typ, 0, 0, 0xffffffff, uint32(c.InputBufferSize), uint32(c.OutputBufferSize), &timeout).Err()
|
||||||
|
if err != nil {
|
||||||
|
return 0, &os.PathError{Op: "open", Path: path, Err: err}
|
||||||
|
}
|
||||||
|
|
||||||
|
runtime.KeepAlive(ntPath)
|
||||||
return h, nil
|
return h, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *win32PipeListener) makeServerPipe() (*win32File, error) {
|
func (l *win32PipeListener) makeServerPipe() (*win32File, error) {
|
||||||
h, err := makeServerPipeHandle(l.path, l.securityDescriptor, &l.config, false)
|
h, err := makeServerPipeHandle(l.path, nil, &l.config, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -341,28 +449,9 @@ func ListenPipe(path string, c *PipeConfig) (net.Listener, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
// Create a client handle and connect it. This results in the pipe
|
|
||||||
// instance always existing, so that clients see ERROR_PIPE_BUSY
|
|
||||||
// rather than ERROR_FILE_NOT_FOUND. This ties the first instance
|
|
||||||
// up so that no other instances can be used. This would have been
|
|
||||||
// cleaner if the Win32 API matched CreateFile with ConnectNamedPipe
|
|
||||||
// instead of CreateNamedPipe. (Apparently created named pipes are
|
|
||||||
// considered to be in listening state regardless of whether any
|
|
||||||
// active calls to ConnectNamedPipe are outstanding.)
|
|
||||||
h2, err := createFile(path, 0, 0, nil, syscall.OPEN_EXISTING, cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0)
|
|
||||||
if err != nil {
|
|
||||||
syscall.Close(h)
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
// Close the client handle. The server side of the instance will
|
|
||||||
// still be busy, leading to ERROR_PIPE_BUSY instead of
|
|
||||||
// ERROR_NOT_FOUND, as long as we don't close the server handle,
|
|
||||||
// or disconnect the client with DisconnectNamedPipe.
|
|
||||||
syscall.Close(h2)
|
|
||||||
l := &win32PipeListener{
|
l := &win32PipeListener{
|
||||||
firstHandle: h,
|
firstHandle: h,
|
||||||
path: path,
|
path: path,
|
||||||
securityDescriptor: sd,
|
|
||||||
config: *c,
|
config: *c,
|
||||||
acceptCh: make(chan (chan acceptResponse)),
|
acceptCh: make(chan (chan acceptResponse)),
|
||||||
closeCh: make(chan int),
|
closeCh: make(chan int),
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT
|
// Code generated by 'go generate'; DO NOT EDIT.
|
||||||
|
|
||||||
package winio
|
package winio
|
||||||
|
|
||||||
|
@ -38,6 +38,7 @@ func errnoErr(e syscall.Errno) error {
|
||||||
|
|
||||||
var (
|
var (
|
||||||
modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
|
modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
|
||||||
|
modntdll = windows.NewLazySystemDLL("ntdll.dll")
|
||||||
modadvapi32 = windows.NewLazySystemDLL("advapi32.dll")
|
modadvapi32 = windows.NewLazySystemDLL("advapi32.dll")
|
||||||
|
|
||||||
procCancelIoEx = modkernel32.NewProc("CancelIoEx")
|
procCancelIoEx = modkernel32.NewProc("CancelIoEx")
|
||||||
|
@ -47,10 +48,13 @@ var (
|
||||||
procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe")
|
procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe")
|
||||||
procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW")
|
procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW")
|
||||||
procCreateFileW = modkernel32.NewProc("CreateFileW")
|
procCreateFileW = modkernel32.NewProc("CreateFileW")
|
||||||
procWaitNamedPipeW = modkernel32.NewProc("WaitNamedPipeW")
|
|
||||||
procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo")
|
procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo")
|
||||||
procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW")
|
procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW")
|
||||||
procLocalAlloc = modkernel32.NewProc("LocalAlloc")
|
procLocalAlloc = modkernel32.NewProc("LocalAlloc")
|
||||||
|
procNtCreateNamedPipeFile = modntdll.NewProc("NtCreateNamedPipeFile")
|
||||||
|
procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb")
|
||||||
|
procRtlDosPathNameToNtPathName_U = modntdll.NewProc("RtlDosPathNameToNtPathName_U")
|
||||||
|
procRtlDefaultNpAcl = modntdll.NewProc("RtlDefaultNpAcl")
|
||||||
procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW")
|
procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW")
|
||||||
procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW")
|
procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW")
|
||||||
procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW")
|
procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW")
|
||||||
|
@ -176,27 +180,6 @@ func _createFile(name *uint16, access uint32, mode uint32, sa *syscall.SecurityA
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func waitNamedPipe(name string, timeout uint32) (err error) {
|
|
||||||
var _p0 *uint16
|
|
||||||
_p0, err = syscall.UTF16PtrFromString(name)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
return _waitNamedPipe(_p0, timeout)
|
|
||||||
}
|
|
||||||
|
|
||||||
func _waitNamedPipe(name *uint16, timeout uint32) (err error) {
|
|
||||||
r1, _, e1 := syscall.Syscall(procWaitNamedPipeW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(timeout), 0)
|
|
||||||
if r1 == 0 {
|
|
||||||
if e1 != 0 {
|
|
||||||
err = errnoErr(e1)
|
|
||||||
} else {
|
|
||||||
err = syscall.EINVAL
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) {
|
func getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) {
|
||||||
r1, _, e1 := syscall.Syscall6(procGetNamedPipeInfo.Addr(), 5, uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)), 0)
|
r1, _, e1 := syscall.Syscall6(procGetNamedPipeInfo.Addr(), 5, uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)), 0)
|
||||||
if r1 == 0 {
|
if r1 == 0 {
|
||||||
|
@ -227,6 +210,32 @@ func localAlloc(uFlags uint32, length uint32) (ptr uintptr) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntstatus) {
|
||||||
|
r0, _, _ := syscall.Syscall15(procNtCreateNamedPipeFile.Addr(), 14, uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout)), 0)
|
||||||
|
status = ntstatus(r0)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func rtlNtStatusToDosError(status ntstatus) (winerr error) {
|
||||||
|
r0, _, _ := syscall.Syscall(procRtlNtStatusToDosErrorNoTeb.Addr(), 1, uintptr(status), 0, 0)
|
||||||
|
if r0 != 0 {
|
||||||
|
winerr = syscall.Errno(r0)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntstatus) {
|
||||||
|
r0, _, _ := syscall.Syscall6(procRtlDosPathNameToNtPathName_U.Addr(), 4, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(ntName)), uintptr(filePart), uintptr(reserved), 0, 0)
|
||||||
|
status = ntstatus(r0)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func rtlDefaultNpAcl(dacl *uintptr) (status ntstatus) {
|
||||||
|
r0, _, _ := syscall.Syscall(procRtlDefaultNpAcl.Addr(), 1, uintptr(unsafe.Pointer(dacl)), 0, 0)
|
||||||
|
status = ntstatus(r0)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
func lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) {
|
func lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) {
|
||||||
var _p0 *uint16
|
var _p0 *uint16
|
||||||
_p0, err = syscall.UTF16PtrFromString(accountName)
|
_p0, err = syscall.UTF16PtrFromString(accountName)
|
||||||
|
|
|
@ -52,5 +52,5 @@ The docs can be found at [godoc.org][docs], as usual.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
[docs]: https://godoc.org/github.com/nytimes/gziphandler
|
[docs]: https://godoc.org/github.com/NYTimes/gziphandler
|
||||||
[license]: https://github.com/nytimes/gziphandler/blob/master/LICENSE.md
|
[license]: https://github.com/NYTimes/gziphandler/blob/master/LICENSE
|
||||||
|
|
|
@ -0,0 +1,5 @@
|
||||||
|
module github.com/NYTimes/gziphandler
|
||||||
|
|
||||||
|
go 1.11
|
||||||
|
|
||||||
|
require github.com/stretchr/testify v1.3.0
|
|
@ -0,0 +1,7 @@
|
||||||
|
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
|
||||||
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
|
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
|
||||||
|
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
|
@ -1,4 +1,4 @@
|
||||||
package gziphandler
|
package gziphandler // import "github.com/NYTimes/gziphandler"
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
|
|
|
@ -28,6 +28,12 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
/*
|
||||||
|
SessionVaribles maps session variables to their values.
|
||||||
|
All defined session variables will be set once after a database connection is opened.
|
||||||
|
*/
|
||||||
|
type SessionVariables map[string]string
|
||||||
|
|
||||||
/*
|
/*
|
||||||
A Connector represents a hdb driver in a fixed configuration.
|
A Connector represents a hdb driver in a fixed configuration.
|
||||||
A Connector can be passed to sql.OpenDB (starting from go 1.10) allowing users to bypass a string based data source name.
|
A Connector can be passed to sql.OpenDB (starting from go 1.10) allowing users to bypass a string based data source name.
|
||||||
|
@ -38,6 +44,7 @@ type Connector struct {
|
||||||
locale string
|
locale string
|
||||||
bufferSize, fetchSize, timeout int
|
bufferSize, fetchSize, timeout int
|
||||||
tlsConfig *tls.Config
|
tlsConfig *tls.Config
|
||||||
|
sessionVariables SessionVariables
|
||||||
}
|
}
|
||||||
|
|
||||||
func newConnector() *Connector {
|
func newConnector() *Connector {
|
||||||
|
@ -256,6 +263,21 @@ func (c *Connector) SetTLSConfig(tlsConfig *tls.Config) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SessionVariables returns the session variables stored in connector.
|
||||||
|
func (c *Connector) SessionVariables() SessionVariables {
|
||||||
|
c.mu.RLock()
|
||||||
|
defer c.mu.RUnlock()
|
||||||
|
return c.sessionVariables
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetSessionVariables sets the session varibles of the connector.
|
||||||
|
func (c *Connector) SetSessionVariables(sessionVariables SessionVariables) error {
|
||||||
|
c.mu.Lock()
|
||||||
|
defer c.mu.Unlock()
|
||||||
|
c.sessionVariables = sessionVariables
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// BasicAuthDSN return the connector DSN for basic authentication.
|
// BasicAuthDSN return the connector DSN for basic authentication.
|
||||||
func (c *Connector) BasicAuthDSN() string {
|
func (c *Connector) BasicAuthDSN() string {
|
||||||
values := url.Values{}
|
values := url.Values{}
|
||||||
|
|
|
@ -33,7 +33,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
// DriverVersion is the version number of the hdb driver.
|
// DriverVersion is the version number of the hdb driver.
|
||||||
const DriverVersion = "0.13.1"
|
const DriverVersion = "0.14.1"
|
||||||
|
|
||||||
// DriverName is the driver name to use with sql.Open for hdb databases.
|
// DriverName is the driver name to use with sql.Open for hdb databases.
|
||||||
const DriverName = "hdb"
|
const DriverName = "hdb"
|
||||||
|
@ -79,6 +79,7 @@ const (
|
||||||
pingQuery = "select 1 from dummy"
|
pingQuery = "select 1 from dummy"
|
||||||
isolationLevelStmt = "set transaction isolation level %s"
|
isolationLevelStmt = "set transaction isolation level %s"
|
||||||
accessModeStmt = "set transaction %s"
|
accessModeStmt = "set transaction %s"
|
||||||
|
sessionVariable = "set %s=%s"
|
||||||
)
|
)
|
||||||
|
|
||||||
// bulk statement
|
// bulk statement
|
||||||
|
@ -153,7 +154,23 @@ func newConn(ctx context.Context, c *Connector) (driver.Conn, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return &conn{session: session}, nil
|
conn := &conn{session: session}
|
||||||
|
if err := conn.init(ctx, c.sessionVariables); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return conn, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *conn) init(ctx context.Context, sv SessionVariables) error {
|
||||||
|
if sv == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
for k, v := range sv {
|
||||||
|
if _, err := c.ExecContext(ctx, fmt.Sprintf(sessionVariable, fmt.Sprintf("'%s'", k), fmt.Sprintf("'%s'", v)), nil); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *conn) Prepare(query string) (driver.Stmt, error) {
|
func (c *conn) Prepare(query string) (driver.Stmt, error) {
|
||||||
|
|
|
@ -303,6 +303,9 @@ func (w *Writer) WriteZeroes(cnt int) {
|
||||||
j = len(w.b)
|
j = len(w.b)
|
||||||
}
|
}
|
||||||
n, _ := w.wr.Write(w.b[:j])
|
n, _ := w.wr.Write(w.b[:j])
|
||||||
|
if n != j {
|
||||||
|
return
|
||||||
|
}
|
||||||
i += n
|
i += n
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -101,6 +101,12 @@ func (f *ParameterFieldSet) String() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *ParameterFieldSet) read(rd *bufio.Reader) {
|
func (f *ParameterFieldSet) read(rd *bufio.Reader) {
|
||||||
|
// This function is likely to be called multiple times on the same prepared
|
||||||
|
// statement (because HANA may send PARAMETERMETADATA parts even when
|
||||||
|
// executing the statement), so we need to empty these slices lest they keep
|
||||||
|
// growing.
|
||||||
|
f._inputFields = f._inputFields[:0]
|
||||||
|
f._outputFields = f._outputFields[:0]
|
||||||
for i := 0; i < len(f.fields); i++ {
|
for i := 0; i < len(f.fields); i++ {
|
||||||
field := newParameterField(f.names)
|
field := newParameterField(f.names)
|
||||||
field.read(rd)
|
field.read(rd)
|
||||||
|
|
|
@ -931,8 +931,11 @@ func (s *Session) readReply(beforeRead beforeRead) error {
|
||||||
}
|
}
|
||||||
cnt := s.rd.Cnt()
|
cnt := s.rd.Cnt()
|
||||||
|
|
||||||
if cnt != int(s.ph.bufferLength) {
|
switch {
|
||||||
outLogger.Printf("+++ partLenght: %d - not equal read byte amount: %d", s.ph.bufferLength, cnt)
|
case cnt < int(s.ph.bufferLength): // protocol buffer length > read bytes -> skip the unread bytes
|
||||||
|
s.rd.Skip(int(s.ph.bufferLength) - cnt)
|
||||||
|
case cnt > int(s.ph.bufferLength): // read bytes > protocol buffer length -> should never happen
|
||||||
|
return fmt.Errorf("protocol error: read bytes %d > buffer length %d", cnt, s.ph.bufferLength)
|
||||||
}
|
}
|
||||||
|
|
||||||
if i != lastPart { // not last part
|
if i != lastPart { // not last part
|
||||||
|
|
12
vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/bearer_token_credential.go
generated
vendored
Normal file
12
vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/bearer_token_credential.go
generated
vendored
Normal file
|
@ -0,0 +1,12 @@
|
||||||
|
package credentials
|
||||||
|
|
||||||
|
type BearerTokenCredential struct {
|
||||||
|
BearerToken string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewBearerTokenCredential return a BearerTokenCredential object
|
||||||
|
func NewBearerTokenCredential(token string) *BearerTokenCredential {
|
||||||
|
return &BearerTokenCredential{
|
||||||
|
BearerToken: token,
|
||||||
|
}
|
||||||
|
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue