Merge pull request #6215 from hashicorp/f-upgrade-go-getter
upgrade go-getter, leave compiled protobuf at version 1.2
This commit is contained in:
commit
a1936e3add
|
@ -0,0 +1,202 @@
|
||||||
|
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright [yyyy] [name of copyright owner]
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
|
@ -0,0 +1,513 @@
|
||||||
|
// Copyright 2014 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Package metadata provides access to Google Compute Engine (GCE)
|
||||||
|
// metadata and API service accounts.
|
||||||
|
//
|
||||||
|
// This package is a wrapper around the GCE metadata service,
|
||||||
|
// as documented at https://developers.google.com/compute/docs/metadata.
|
||||||
|
package metadata // import "cloud.google.com/go/compute/metadata"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"os"
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// metadataIP is the documented metadata server IP address.
|
||||||
|
metadataIP = "169.254.169.254"
|
||||||
|
|
||||||
|
// metadataHostEnv is the environment variable specifying the
|
||||||
|
// GCE metadata hostname. If empty, the default value of
|
||||||
|
// metadataIP ("169.254.169.254") is used instead.
|
||||||
|
// This is variable name is not defined by any spec, as far as
|
||||||
|
// I know; it was made up for the Go package.
|
||||||
|
metadataHostEnv = "GCE_METADATA_HOST"
|
||||||
|
|
||||||
|
userAgent = "gcloud-golang/0.1"
|
||||||
|
)
|
||||||
|
|
||||||
|
type cachedValue struct {
|
||||||
|
k string
|
||||||
|
trim bool
|
||||||
|
mu sync.Mutex
|
||||||
|
v string
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
projID = &cachedValue{k: "project/project-id", trim: true}
|
||||||
|
projNum = &cachedValue{k: "project/numeric-project-id", trim: true}
|
||||||
|
instID = &cachedValue{k: "instance/id", trim: true}
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
defaultClient = &Client{hc: &http.Client{
|
||||||
|
Transport: &http.Transport{
|
||||||
|
Dial: (&net.Dialer{
|
||||||
|
Timeout: 2 * time.Second,
|
||||||
|
KeepAlive: 30 * time.Second,
|
||||||
|
}).Dial,
|
||||||
|
ResponseHeaderTimeout: 2 * time.Second,
|
||||||
|
},
|
||||||
|
}}
|
||||||
|
subscribeClient = &Client{hc: &http.Client{
|
||||||
|
Transport: &http.Transport{
|
||||||
|
Dial: (&net.Dialer{
|
||||||
|
Timeout: 2 * time.Second,
|
||||||
|
KeepAlive: 30 * time.Second,
|
||||||
|
}).Dial,
|
||||||
|
},
|
||||||
|
}}
|
||||||
|
)
|
||||||
|
|
||||||
|
// NotDefinedError is returned when requested metadata is not defined.
|
||||||
|
//
|
||||||
|
// The underlying string is the suffix after "/computeMetadata/v1/".
|
||||||
|
//
|
||||||
|
// This error is not returned if the value is defined to be the empty
|
||||||
|
// string.
|
||||||
|
type NotDefinedError string
|
||||||
|
|
||||||
|
func (suffix NotDefinedError) Error() string {
|
||||||
|
return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cachedValue) get(cl *Client) (v string, err error) {
|
||||||
|
defer c.mu.Unlock()
|
||||||
|
c.mu.Lock()
|
||||||
|
if c.v != "" {
|
||||||
|
return c.v, nil
|
||||||
|
}
|
||||||
|
if c.trim {
|
||||||
|
v, err = cl.getTrimmed(c.k)
|
||||||
|
} else {
|
||||||
|
v, err = cl.Get(c.k)
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
c.v = v
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
onGCEOnce sync.Once
|
||||||
|
onGCE bool
|
||||||
|
)
|
||||||
|
|
||||||
|
// OnGCE reports whether this process is running on Google Compute Engine.
|
||||||
|
func OnGCE() bool {
|
||||||
|
onGCEOnce.Do(initOnGCE)
|
||||||
|
return onGCE
|
||||||
|
}
|
||||||
|
|
||||||
|
func initOnGCE() {
|
||||||
|
onGCE = testOnGCE()
|
||||||
|
}
|
||||||
|
|
||||||
|
func testOnGCE() bool {
|
||||||
|
// The user explicitly said they're on GCE, so trust them.
|
||||||
|
if os.Getenv(metadataHostEnv) != "" {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
resc := make(chan bool, 2)
|
||||||
|
|
||||||
|
// Try two strategies in parallel.
|
||||||
|
// See https://github.com/googleapis/google-cloud-go/issues/194
|
||||||
|
go func() {
|
||||||
|
req, _ := http.NewRequest("GET", "http://"+metadataIP, nil)
|
||||||
|
req.Header.Set("User-Agent", userAgent)
|
||||||
|
res, err := defaultClient.hc.Do(req.WithContext(ctx))
|
||||||
|
if err != nil {
|
||||||
|
resc <- false
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer res.Body.Close()
|
||||||
|
resc <- res.Header.Get("Metadata-Flavor") == "Google"
|
||||||
|
}()
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
addrs, err := net.LookupHost("metadata.google.internal")
|
||||||
|
if err != nil || len(addrs) == 0 {
|
||||||
|
resc <- false
|
||||||
|
return
|
||||||
|
}
|
||||||
|
resc <- strsContains(addrs, metadataIP)
|
||||||
|
}()
|
||||||
|
|
||||||
|
tryHarder := systemInfoSuggestsGCE()
|
||||||
|
if tryHarder {
|
||||||
|
res := <-resc
|
||||||
|
if res {
|
||||||
|
// The first strategy succeeded, so let's use it.
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
// Wait for either the DNS or metadata server probe to
|
||||||
|
// contradict the other one and say we are running on
|
||||||
|
// GCE. Give it a lot of time to do so, since the system
|
||||||
|
// info already suggests we're running on a GCE BIOS.
|
||||||
|
timer := time.NewTimer(5 * time.Second)
|
||||||
|
defer timer.Stop()
|
||||||
|
select {
|
||||||
|
case res = <-resc:
|
||||||
|
return res
|
||||||
|
case <-timer.C:
|
||||||
|
// Too slow. Who knows what this system is.
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// There's no hint from the system info that we're running on
|
||||||
|
// GCE, so use the first probe's result as truth, whether it's
|
||||||
|
// true or false. The goal here is to optimize for speed for
|
||||||
|
// users who are NOT running on GCE. We can't assume that
|
||||||
|
// either a DNS lookup or an HTTP request to a blackholed IP
|
||||||
|
// address is fast. Worst case this should return when the
|
||||||
|
// metaClient's Transport.ResponseHeaderTimeout or
|
||||||
|
// Transport.Dial.Timeout fires (in two seconds).
|
||||||
|
return <-resc
|
||||||
|
}
|
||||||
|
|
||||||
|
// systemInfoSuggestsGCE reports whether the local system (without
|
||||||
|
// doing network requests) suggests that we're running on GCE. If this
|
||||||
|
// returns true, testOnGCE tries a bit harder to reach its metadata
|
||||||
|
// server.
|
||||||
|
func systemInfoSuggestsGCE() bool {
|
||||||
|
if runtime.GOOS != "linux" {
|
||||||
|
// We don't have any non-Linux clues available, at least yet.
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
slurp, _ := ioutil.ReadFile("/sys/class/dmi/id/product_name")
|
||||||
|
name := strings.TrimSpace(string(slurp))
|
||||||
|
return name == "Google" || name == "Google Compute Engine"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Subscribe calls Client.Subscribe on a client designed for subscribing (one with no
|
||||||
|
// ResponseHeaderTimeout).
|
||||||
|
func Subscribe(suffix string, fn func(v string, ok bool) error) error {
|
||||||
|
return subscribeClient.Subscribe(suffix, fn)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get calls Client.Get on the default client.
|
||||||
|
func Get(suffix string) (string, error) { return defaultClient.Get(suffix) }
|
||||||
|
|
||||||
|
// ProjectID returns the current instance's project ID string.
|
||||||
|
func ProjectID() (string, error) { return defaultClient.ProjectID() }
|
||||||
|
|
||||||
|
// NumericProjectID returns the current instance's numeric project ID.
|
||||||
|
func NumericProjectID() (string, error) { return defaultClient.NumericProjectID() }
|
||||||
|
|
||||||
|
// InternalIP returns the instance's primary internal IP address.
|
||||||
|
func InternalIP() (string, error) { return defaultClient.InternalIP() }
|
||||||
|
|
||||||
|
// ExternalIP returns the instance's primary external (public) IP address.
|
||||||
|
func ExternalIP() (string, error) { return defaultClient.ExternalIP() }
|
||||||
|
|
||||||
|
// Hostname returns the instance's hostname. This will be of the form
|
||||||
|
// "<instanceID>.c.<projID>.internal".
|
||||||
|
func Hostname() (string, error) { return defaultClient.Hostname() }
|
||||||
|
|
||||||
|
// InstanceTags returns the list of user-defined instance tags,
|
||||||
|
// assigned when initially creating a GCE instance.
|
||||||
|
func InstanceTags() ([]string, error) { return defaultClient.InstanceTags() }
|
||||||
|
|
||||||
|
// InstanceID returns the current VM's numeric instance ID.
|
||||||
|
func InstanceID() (string, error) { return defaultClient.InstanceID() }
|
||||||
|
|
||||||
|
// InstanceName returns the current VM's instance ID string.
|
||||||
|
func InstanceName() (string, error) { return defaultClient.InstanceName() }
|
||||||
|
|
||||||
|
// Zone returns the current VM's zone, such as "us-central1-b".
|
||||||
|
func Zone() (string, error) { return defaultClient.Zone() }
|
||||||
|
|
||||||
|
// InstanceAttributes calls Client.InstanceAttributes on the default client.
|
||||||
|
func InstanceAttributes() ([]string, error) { return defaultClient.InstanceAttributes() }
|
||||||
|
|
||||||
|
// ProjectAttributes calls Client.ProjectAttributes on the default client.
|
||||||
|
func ProjectAttributes() ([]string, error) { return defaultClient.ProjectAttributes() }
|
||||||
|
|
||||||
|
// InstanceAttributeValue calls Client.InstanceAttributeValue on the default client.
|
||||||
|
func InstanceAttributeValue(attr string) (string, error) {
|
||||||
|
return defaultClient.InstanceAttributeValue(attr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProjectAttributeValue calls Client.ProjectAttributeValue on the default client.
|
||||||
|
func ProjectAttributeValue(attr string) (string, error) {
|
||||||
|
return defaultClient.ProjectAttributeValue(attr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scopes calls Client.Scopes on the default client.
|
||||||
|
func Scopes(serviceAccount string) ([]string, error) { return defaultClient.Scopes(serviceAccount) }
|
||||||
|
|
||||||
|
func strsContains(ss []string, s string) bool {
|
||||||
|
for _, v := range ss {
|
||||||
|
if v == s {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// A Client provides metadata.
|
||||||
|
type Client struct {
|
||||||
|
hc *http.Client
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewClient returns a Client that can be used to fetch metadata. All HTTP requests
|
||||||
|
// will use the given http.Client instead of the default client.
|
||||||
|
func NewClient(c *http.Client) *Client {
|
||||||
|
return &Client{hc: c}
|
||||||
|
}
|
||||||
|
|
||||||
|
// getETag returns a value from the metadata service as well as the associated ETag.
|
||||||
|
// This func is otherwise equivalent to Get.
|
||||||
|
func (c *Client) getETag(suffix string) (value, etag string, err error) {
|
||||||
|
// Using a fixed IP makes it very difficult to spoof the metadata service in
|
||||||
|
// a container, which is an important use-case for local testing of cloud
|
||||||
|
// deployments. To enable spoofing of the metadata service, the environment
|
||||||
|
// variable GCE_METADATA_HOST is first inspected to decide where metadata
|
||||||
|
// requests shall go.
|
||||||
|
host := os.Getenv(metadataHostEnv)
|
||||||
|
if host == "" {
|
||||||
|
// Using 169.254.169.254 instead of "metadata" here because Go
|
||||||
|
// binaries built with the "netgo" tag and without cgo won't
|
||||||
|
// know the search suffix for "metadata" is
|
||||||
|
// ".google.internal", and this IP address is documented as
|
||||||
|
// being stable anyway.
|
||||||
|
host = metadataIP
|
||||||
|
}
|
||||||
|
u := "http://" + host + "/computeMetadata/v1/" + suffix
|
||||||
|
req, _ := http.NewRequest("GET", u, nil)
|
||||||
|
req.Header.Set("Metadata-Flavor", "Google")
|
||||||
|
req.Header.Set("User-Agent", userAgent)
|
||||||
|
res, err := c.hc.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return "", "", err
|
||||||
|
}
|
||||||
|
defer res.Body.Close()
|
||||||
|
if res.StatusCode == http.StatusNotFound {
|
||||||
|
return "", "", NotDefinedError(suffix)
|
||||||
|
}
|
||||||
|
all, err := ioutil.ReadAll(res.Body)
|
||||||
|
if err != nil {
|
||||||
|
return "", "", err
|
||||||
|
}
|
||||||
|
if res.StatusCode != 200 {
|
||||||
|
return "", "", &Error{Code: res.StatusCode, Message: string(all)}
|
||||||
|
}
|
||||||
|
return string(all), res.Header.Get("Etag"), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get returns a value from the metadata service.
|
||||||
|
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
|
||||||
|
//
|
||||||
|
// If the GCE_METADATA_HOST environment variable is not defined, a default of
|
||||||
|
// 169.254.169.254 will be used instead.
|
||||||
|
//
|
||||||
|
// If the requested metadata is not defined, the returned error will
|
||||||
|
// be of type NotDefinedError.
|
||||||
|
func (c *Client) Get(suffix string) (string, error) {
|
||||||
|
val, _, err := c.getETag(suffix)
|
||||||
|
return val, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) getTrimmed(suffix string) (s string, err error) {
|
||||||
|
s, err = c.Get(suffix)
|
||||||
|
s = strings.TrimSpace(s)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) lines(suffix string) ([]string, error) {
|
||||||
|
j, err := c.Get(suffix)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
s := strings.Split(strings.TrimSpace(j), "\n")
|
||||||
|
for i := range s {
|
||||||
|
s[i] = strings.TrimSpace(s[i])
|
||||||
|
}
|
||||||
|
return s, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProjectID returns the current instance's project ID string.
|
||||||
|
func (c *Client) ProjectID() (string, error) { return projID.get(c) }
|
||||||
|
|
||||||
|
// NumericProjectID returns the current instance's numeric project ID.
|
||||||
|
func (c *Client) NumericProjectID() (string, error) { return projNum.get(c) }
|
||||||
|
|
||||||
|
// InstanceID returns the current VM's numeric instance ID.
|
||||||
|
func (c *Client) InstanceID() (string, error) { return instID.get(c) }
|
||||||
|
|
||||||
|
// InternalIP returns the instance's primary internal IP address.
|
||||||
|
func (c *Client) InternalIP() (string, error) {
|
||||||
|
return c.getTrimmed("instance/network-interfaces/0/ip")
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExternalIP returns the instance's primary external (public) IP address.
|
||||||
|
func (c *Client) ExternalIP() (string, error) {
|
||||||
|
return c.getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Hostname returns the instance's hostname. This will be of the form
|
||||||
|
// "<instanceID>.c.<projID>.internal".
|
||||||
|
func (c *Client) Hostname() (string, error) {
|
||||||
|
return c.getTrimmed("instance/hostname")
|
||||||
|
}
|
||||||
|
|
||||||
|
// InstanceTags returns the list of user-defined instance tags,
|
||||||
|
// assigned when initially creating a GCE instance.
|
||||||
|
func (c *Client) InstanceTags() ([]string, error) {
|
||||||
|
var s []string
|
||||||
|
j, err := c.Get("instance/tags")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return s, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// InstanceName returns the current VM's instance ID string.
|
||||||
|
func (c *Client) InstanceName() (string, error) {
|
||||||
|
host, err := c.Hostname()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return strings.Split(host, ".")[0], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Zone returns the current VM's zone, such as "us-central1-b".
|
||||||
|
func (c *Client) Zone() (string, error) {
|
||||||
|
zone, err := c.getTrimmed("instance/zone")
|
||||||
|
// zone is of the form "projects/<projNum>/zones/<zoneName>".
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return zone[strings.LastIndex(zone, "/")+1:], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// InstanceAttributes returns the list of user-defined attributes,
|
||||||
|
// assigned when initially creating a GCE VM instance. The value of an
|
||||||
|
// attribute can be obtained with InstanceAttributeValue.
|
||||||
|
func (c *Client) InstanceAttributes() ([]string, error) { return c.lines("instance/attributes/") }
|
||||||
|
|
||||||
|
// ProjectAttributes returns the list of user-defined attributes
|
||||||
|
// applying to the project as a whole, not just this VM. The value of
|
||||||
|
// an attribute can be obtained with ProjectAttributeValue.
|
||||||
|
func (c *Client) ProjectAttributes() ([]string, error) { return c.lines("project/attributes/") }
|
||||||
|
|
||||||
|
// InstanceAttributeValue returns the value of the provided VM
|
||||||
|
// instance attribute.
|
||||||
|
//
|
||||||
|
// If the requested attribute is not defined, the returned error will
|
||||||
|
// be of type NotDefinedError.
|
||||||
|
//
|
||||||
|
// InstanceAttributeValue may return ("", nil) if the attribute was
|
||||||
|
// defined to be the empty string.
|
||||||
|
func (c *Client) InstanceAttributeValue(attr string) (string, error) {
|
||||||
|
return c.Get("instance/attributes/" + attr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProjectAttributeValue returns the value of the provided
|
||||||
|
// project attribute.
|
||||||
|
//
|
||||||
|
// If the requested attribute is not defined, the returned error will
|
||||||
|
// be of type NotDefinedError.
|
||||||
|
//
|
||||||
|
// ProjectAttributeValue may return ("", nil) if the attribute was
|
||||||
|
// defined to be the empty string.
|
||||||
|
func (c *Client) ProjectAttributeValue(attr string) (string, error) {
|
||||||
|
return c.Get("project/attributes/" + attr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scopes returns the service account scopes for the given account.
|
||||||
|
// The account may be empty or the string "default" to use the instance's
|
||||||
|
// main account.
|
||||||
|
func (c *Client) Scopes(serviceAccount string) ([]string, error) {
|
||||||
|
if serviceAccount == "" {
|
||||||
|
serviceAccount = "default"
|
||||||
|
}
|
||||||
|
return c.lines("instance/service-accounts/" + serviceAccount + "/scopes")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Subscribe subscribes to a value from the metadata service.
|
||||||
|
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
|
||||||
|
// The suffix may contain query parameters.
|
||||||
|
//
|
||||||
|
// Subscribe calls fn with the latest metadata value indicated by the provided
|
||||||
|
// suffix. If the metadata value is deleted, fn is called with the empty string
|
||||||
|
// and ok false. Subscribe blocks until fn returns a non-nil error or the value
|
||||||
|
// is deleted. Subscribe returns the error value returned from the last call to
|
||||||
|
// fn, which may be nil when ok == false.
|
||||||
|
func (c *Client) Subscribe(suffix string, fn func(v string, ok bool) error) error {
|
||||||
|
const failedSubscribeSleep = time.Second * 5
|
||||||
|
|
||||||
|
// First check to see if the metadata value exists at all.
|
||||||
|
val, lastETag, err := c.getETag(suffix)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := fn(val, true); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
ok := true
|
||||||
|
if strings.ContainsRune(suffix, '?') {
|
||||||
|
suffix += "&wait_for_change=true&last_etag="
|
||||||
|
} else {
|
||||||
|
suffix += "?wait_for_change=true&last_etag="
|
||||||
|
}
|
||||||
|
for {
|
||||||
|
val, etag, err := c.getETag(suffix + url.QueryEscape(lastETag))
|
||||||
|
if err != nil {
|
||||||
|
if _, deleted := err.(NotDefinedError); !deleted {
|
||||||
|
time.Sleep(failedSubscribeSleep)
|
||||||
|
continue // Retry on other errors.
|
||||||
|
}
|
||||||
|
ok = false
|
||||||
|
}
|
||||||
|
lastETag = etag
|
||||||
|
|
||||||
|
if err := fn(val, ok); err != nil || !ok {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error contains an error response from the server.
|
||||||
|
type Error struct {
|
||||||
|
// Code is the HTTP response status code.
|
||||||
|
Code int
|
||||||
|
// Message is the server response message.
|
||||||
|
Message string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *Error) Error() string {
|
||||||
|
return fmt.Sprintf("compute: Received %d `%s`", e.Code, e.Message)
|
||||||
|
}
|
|
@ -0,0 +1,315 @@
|
||||||
|
// Copyright 2016 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Package iam supports the resource-specific operations of Google Cloud
|
||||||
|
// IAM (Identity and Access Management) for the Google Cloud Libraries.
|
||||||
|
// See https://cloud.google.com/iam for more about IAM.
|
||||||
|
//
|
||||||
|
// Users of the Google Cloud Libraries will typically not use this package
|
||||||
|
// directly. Instead they will begin with some resource that supports IAM, like
|
||||||
|
// a pubsub topic, and call its IAM method to get a Handle for that resource.
|
||||||
|
package iam
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
gax "github.com/googleapis/gax-go/v2"
|
||||||
|
pb "google.golang.org/genproto/googleapis/iam/v1"
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
|
"google.golang.org/grpc/metadata"
|
||||||
|
)
|
||||||
|
|
||||||
|
// client abstracts the IAMPolicy API to allow multiple implementations.
|
||||||
|
type client interface {
|
||||||
|
Get(ctx context.Context, resource string) (*pb.Policy, error)
|
||||||
|
Set(ctx context.Context, resource string, p *pb.Policy) error
|
||||||
|
Test(ctx context.Context, resource string, perms []string) ([]string, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// grpcClient implements client for the standard gRPC-based IAMPolicy service.
|
||||||
|
type grpcClient struct {
|
||||||
|
c pb.IAMPolicyClient
|
||||||
|
}
|
||||||
|
|
||||||
|
var withRetry = gax.WithRetry(func() gax.Retryer {
|
||||||
|
return gax.OnCodes([]codes.Code{
|
||||||
|
codes.DeadlineExceeded,
|
||||||
|
codes.Unavailable,
|
||||||
|
}, gax.Backoff{
|
||||||
|
Initial: 100 * time.Millisecond,
|
||||||
|
Max: 60 * time.Second,
|
||||||
|
Multiplier: 1.3,
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
func (g *grpcClient) Get(ctx context.Context, resource string) (*pb.Policy, error) {
|
||||||
|
var proto *pb.Policy
|
||||||
|
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", resource))
|
||||||
|
ctx = insertMetadata(ctx, md)
|
||||||
|
|
||||||
|
err := gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error {
|
||||||
|
var err error
|
||||||
|
proto, err = g.c.GetIamPolicy(ctx, &pb.GetIamPolicyRequest{Resource: resource})
|
||||||
|
return err
|
||||||
|
}, withRetry)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return proto, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *grpcClient) Set(ctx context.Context, resource string, p *pb.Policy) error {
|
||||||
|
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", resource))
|
||||||
|
ctx = insertMetadata(ctx, md)
|
||||||
|
|
||||||
|
return gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error {
|
||||||
|
_, err := g.c.SetIamPolicy(ctx, &pb.SetIamPolicyRequest{
|
||||||
|
Resource: resource,
|
||||||
|
Policy: p,
|
||||||
|
})
|
||||||
|
return err
|
||||||
|
}, withRetry)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *grpcClient) Test(ctx context.Context, resource string, perms []string) ([]string, error) {
|
||||||
|
var res *pb.TestIamPermissionsResponse
|
||||||
|
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", resource))
|
||||||
|
ctx = insertMetadata(ctx, md)
|
||||||
|
|
||||||
|
err := gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error {
|
||||||
|
var err error
|
||||||
|
res, err = g.c.TestIamPermissions(ctx, &pb.TestIamPermissionsRequest{
|
||||||
|
Resource: resource,
|
||||||
|
Permissions: perms,
|
||||||
|
})
|
||||||
|
return err
|
||||||
|
}, withRetry)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return res.Permissions, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// A Handle provides IAM operations for a resource.
|
||||||
|
type Handle struct {
|
||||||
|
c client
|
||||||
|
resource string
|
||||||
|
}
|
||||||
|
|
||||||
|
// InternalNewHandle is for use by the Google Cloud Libraries only.
|
||||||
|
//
|
||||||
|
// InternalNewHandle returns a Handle for resource.
|
||||||
|
// The conn parameter refers to a server that must support the IAMPolicy service.
|
||||||
|
func InternalNewHandle(conn *grpc.ClientConn, resource string) *Handle {
|
||||||
|
return InternalNewHandleGRPCClient(pb.NewIAMPolicyClient(conn), resource)
|
||||||
|
}
|
||||||
|
|
||||||
|
// InternalNewHandleGRPCClient is for use by the Google Cloud Libraries only.
|
||||||
|
//
|
||||||
|
// InternalNewHandleClient returns a Handle for resource using the given
|
||||||
|
// grpc service that implements IAM as a mixin
|
||||||
|
func InternalNewHandleGRPCClient(c pb.IAMPolicyClient, resource string) *Handle {
|
||||||
|
return InternalNewHandleClient(&grpcClient{c: c}, resource)
|
||||||
|
}
|
||||||
|
|
||||||
|
// InternalNewHandleClient is for use by the Google Cloud Libraries only.
|
||||||
|
//
|
||||||
|
// InternalNewHandleClient returns a Handle for resource using the given
|
||||||
|
// client implementation.
|
||||||
|
func InternalNewHandleClient(c client, resource string) *Handle {
|
||||||
|
return &Handle{
|
||||||
|
c: c,
|
||||||
|
resource: resource,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Policy retrieves the IAM policy for the resource.
|
||||||
|
func (h *Handle) Policy(ctx context.Context) (*Policy, error) {
|
||||||
|
proto, err := h.c.Get(ctx, h.resource)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &Policy{InternalProto: proto}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetPolicy replaces the resource's current policy with the supplied Policy.
|
||||||
|
//
|
||||||
|
// If policy was created from a prior call to Get, then the modification will
|
||||||
|
// only succeed if the policy has not changed since the Get.
|
||||||
|
func (h *Handle) SetPolicy(ctx context.Context, policy *Policy) error {
|
||||||
|
return h.c.Set(ctx, h.resource, policy.InternalProto)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestPermissions returns the subset of permissions that the caller has on the resource.
|
||||||
|
func (h *Handle) TestPermissions(ctx context.Context, permissions []string) ([]string, error) {
|
||||||
|
return h.c.Test(ctx, h.resource, permissions)
|
||||||
|
}
|
||||||
|
|
||||||
|
// A RoleName is a name representing a collection of permissions.
|
||||||
|
type RoleName string
|
||||||
|
|
||||||
|
// Common role names.
|
||||||
|
const (
|
||||||
|
Owner RoleName = "roles/owner"
|
||||||
|
Editor RoleName = "roles/editor"
|
||||||
|
Viewer RoleName = "roles/viewer"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// AllUsers is a special member that denotes all users, even unauthenticated ones.
|
||||||
|
AllUsers = "allUsers"
|
||||||
|
|
||||||
|
// AllAuthenticatedUsers is a special member that denotes all authenticated users.
|
||||||
|
AllAuthenticatedUsers = "allAuthenticatedUsers"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A Policy is a list of Bindings representing roles
|
||||||
|
// granted to members.
|
||||||
|
//
|
||||||
|
// The zero Policy is a valid policy with no bindings.
|
||||||
|
type Policy struct {
|
||||||
|
// TODO(jba): when type aliases are available, put Policy into an internal package
|
||||||
|
// and provide an exported alias here.
|
||||||
|
|
||||||
|
// This field is exported for use by the Google Cloud Libraries only.
|
||||||
|
// It may become unexported in a future release.
|
||||||
|
InternalProto *pb.Policy
|
||||||
|
}
|
||||||
|
|
||||||
|
// Members returns the list of members with the supplied role.
|
||||||
|
// The return value should not be modified. Use Add and Remove
|
||||||
|
// to modify the members of a role.
|
||||||
|
func (p *Policy) Members(r RoleName) []string {
|
||||||
|
b := p.binding(r)
|
||||||
|
if b == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return b.Members
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasRole reports whether member has role r.
|
||||||
|
func (p *Policy) HasRole(member string, r RoleName) bool {
|
||||||
|
return memberIndex(member, p.binding(r)) >= 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add adds member member to role r if it is not already present.
|
||||||
|
// A new binding is created if there is no binding for the role.
|
||||||
|
func (p *Policy) Add(member string, r RoleName) {
|
||||||
|
b := p.binding(r)
|
||||||
|
if b == nil {
|
||||||
|
if p.InternalProto == nil {
|
||||||
|
p.InternalProto = &pb.Policy{}
|
||||||
|
}
|
||||||
|
p.InternalProto.Bindings = append(p.InternalProto.Bindings, &pb.Binding{
|
||||||
|
Role: string(r),
|
||||||
|
Members: []string{member},
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if memberIndex(member, b) < 0 {
|
||||||
|
b.Members = append(b.Members, member)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove removes member from role r if it is present.
|
||||||
|
func (p *Policy) Remove(member string, r RoleName) {
|
||||||
|
bi := p.bindingIndex(r)
|
||||||
|
if bi < 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
bindings := p.InternalProto.Bindings
|
||||||
|
b := bindings[bi]
|
||||||
|
mi := memberIndex(member, b)
|
||||||
|
if mi < 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Order doesn't matter for bindings or members, so to remove, move the last item
|
||||||
|
// into the removed spot and shrink the slice.
|
||||||
|
if len(b.Members) == 1 {
|
||||||
|
// Remove binding.
|
||||||
|
last := len(bindings) - 1
|
||||||
|
bindings[bi] = bindings[last]
|
||||||
|
bindings[last] = nil
|
||||||
|
p.InternalProto.Bindings = bindings[:last]
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Remove member.
|
||||||
|
// TODO(jba): worry about multiple copies of m?
|
||||||
|
last := len(b.Members) - 1
|
||||||
|
b.Members[mi] = b.Members[last]
|
||||||
|
b.Members[last] = ""
|
||||||
|
b.Members = b.Members[:last]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Roles returns the names of all the roles that appear in the Policy.
|
||||||
|
func (p *Policy) Roles() []RoleName {
|
||||||
|
if p.InternalProto == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
var rns []RoleName
|
||||||
|
for _, b := range p.InternalProto.Bindings {
|
||||||
|
rns = append(rns, RoleName(b.Role))
|
||||||
|
}
|
||||||
|
return rns
|
||||||
|
}
|
||||||
|
|
||||||
|
// binding returns the Binding for the suppied role, or nil if there isn't one.
|
||||||
|
func (p *Policy) binding(r RoleName) *pb.Binding {
|
||||||
|
i := p.bindingIndex(r)
|
||||||
|
if i < 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return p.InternalProto.Bindings[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Policy) bindingIndex(r RoleName) int {
|
||||||
|
if p.InternalProto == nil {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
for i, b := range p.InternalProto.Bindings {
|
||||||
|
if b.Role == string(r) {
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
|
||||||
|
// memberIndex returns the index of m in b's Members, or -1 if not found.
|
||||||
|
func memberIndex(m string, b *pb.Binding) int {
|
||||||
|
if b == nil {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
for i, mm := range b.Members {
|
||||||
|
if mm == m {
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
|
||||||
|
// insertMetadata inserts metadata into the given context
|
||||||
|
func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
|
||||||
|
out, _ := metadata.FromOutgoingContext(ctx)
|
||||||
|
out = out.Copy()
|
||||||
|
for _, md := range mds {
|
||||||
|
for k, v := range md {
|
||||||
|
out[k] = append(out[k], v...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return metadata.NewOutgoingContext(ctx, out)
|
||||||
|
}
|
|
@ -0,0 +1,54 @@
|
||||||
|
// Copyright 2017 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package internal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"google.golang.org/api/googleapi"
|
||||||
|
"google.golang.org/grpc/status"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Annotate prepends msg to the error message in err, attempting
|
||||||
|
// to preserve other information in err, like an error code.
|
||||||
|
//
|
||||||
|
// Annotate panics if err is nil.
|
||||||
|
//
|
||||||
|
// Annotate knows about these error types:
|
||||||
|
// - "google.golang.org/grpc/status".Status
|
||||||
|
// - "google.golang.org/api/googleapi".Error
|
||||||
|
// If the error is not one of these types, Annotate behaves
|
||||||
|
// like
|
||||||
|
// fmt.Errorf("%s: %v", msg, err)
|
||||||
|
func Annotate(err error, msg string) error {
|
||||||
|
if err == nil {
|
||||||
|
panic("Annotate called with nil")
|
||||||
|
}
|
||||||
|
if s, ok := status.FromError(err); ok {
|
||||||
|
p := s.Proto()
|
||||||
|
p.Message = msg + ": " + p.Message
|
||||||
|
return status.ErrorProto(p)
|
||||||
|
}
|
||||||
|
if g, ok := err.(*googleapi.Error); ok {
|
||||||
|
g.Message = msg + ": " + g.Message
|
||||||
|
return g
|
||||||
|
}
|
||||||
|
return fmt.Errorf("%s: %v", msg, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Annotatef uses format and args to format a string, then calls Annotate.
|
||||||
|
func Annotatef(err error, format string, args ...interface{}) error {
|
||||||
|
return Annotate(err, fmt.Sprintf(format, args...))
|
||||||
|
}
|
|
@ -0,0 +1,108 @@
|
||||||
|
// Copyright 2016 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Package optional provides versions of primitive types that can
|
||||||
|
// be nil. These are useful in methods that update some of an API object's
|
||||||
|
// fields.
|
||||||
|
package optional
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type (
|
||||||
|
// Bool is either a bool or nil.
|
||||||
|
Bool interface{}
|
||||||
|
|
||||||
|
// String is either a string or nil.
|
||||||
|
String interface{}
|
||||||
|
|
||||||
|
// Int is either an int or nil.
|
||||||
|
Int interface{}
|
||||||
|
|
||||||
|
// Uint is either a uint or nil.
|
||||||
|
Uint interface{}
|
||||||
|
|
||||||
|
// Float64 is either a float64 or nil.
|
||||||
|
Float64 interface{}
|
||||||
|
|
||||||
|
// Duration is either a time.Duration or nil.
|
||||||
|
Duration interface{}
|
||||||
|
)
|
||||||
|
|
||||||
|
// ToBool returns its argument as a bool.
|
||||||
|
// It panics if its argument is nil or not a bool.
|
||||||
|
func ToBool(v Bool) bool {
|
||||||
|
x, ok := v.(bool)
|
||||||
|
if !ok {
|
||||||
|
doPanic("Bool", v)
|
||||||
|
}
|
||||||
|
return x
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToString returns its argument as a string.
|
||||||
|
// It panics if its argument is nil or not a string.
|
||||||
|
func ToString(v String) string {
|
||||||
|
x, ok := v.(string)
|
||||||
|
if !ok {
|
||||||
|
doPanic("String", v)
|
||||||
|
}
|
||||||
|
return x
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToInt returns its argument as an int.
|
||||||
|
// It panics if its argument is nil or not an int.
|
||||||
|
func ToInt(v Int) int {
|
||||||
|
x, ok := v.(int)
|
||||||
|
if !ok {
|
||||||
|
doPanic("Int", v)
|
||||||
|
}
|
||||||
|
return x
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToUint returns its argument as a uint.
|
||||||
|
// It panics if its argument is nil or not a uint.
|
||||||
|
func ToUint(v Uint) uint {
|
||||||
|
x, ok := v.(uint)
|
||||||
|
if !ok {
|
||||||
|
doPanic("Uint", v)
|
||||||
|
}
|
||||||
|
return x
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToFloat64 returns its argument as a float64.
|
||||||
|
// It panics if its argument is nil or not a float64.
|
||||||
|
func ToFloat64(v Float64) float64 {
|
||||||
|
x, ok := v.(float64)
|
||||||
|
if !ok {
|
||||||
|
doPanic("Float64", v)
|
||||||
|
}
|
||||||
|
return x
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToDuration returns its argument as a time.Duration.
|
||||||
|
// It panics if its argument is nil or not a time.Duration.
|
||||||
|
func ToDuration(v Duration) time.Duration {
|
||||||
|
x, ok := v.(time.Duration)
|
||||||
|
if !ok {
|
||||||
|
doPanic("Duration", v)
|
||||||
|
}
|
||||||
|
return x
|
||||||
|
}
|
||||||
|
|
||||||
|
func doPanic(capType string, v interface{}) {
|
||||||
|
panic(fmt.Sprintf("optional.%s value should be %s, got %T", capType, strings.ToLower(capType), v))
|
||||||
|
}
|
|
@ -0,0 +1,54 @@
|
||||||
|
// Copyright 2016 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package internal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
gax "github.com/googleapis/gax-go/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Retry calls the supplied function f repeatedly according to the provided
|
||||||
|
// backoff parameters. It returns when one of the following occurs:
|
||||||
|
// When f's first return value is true, Retry immediately returns with f's second
|
||||||
|
// return value.
|
||||||
|
// When the provided context is done, Retry returns with an error that
|
||||||
|
// includes both ctx.Error() and the last error returned by f.
|
||||||
|
func Retry(ctx context.Context, bo gax.Backoff, f func() (stop bool, err error)) error {
|
||||||
|
return retry(ctx, bo, f, gax.Sleep)
|
||||||
|
}
|
||||||
|
|
||||||
|
func retry(ctx context.Context, bo gax.Backoff, f func() (stop bool, err error),
|
||||||
|
sleep func(context.Context, time.Duration) error) error {
|
||||||
|
var lastErr error
|
||||||
|
for {
|
||||||
|
stop, err := f()
|
||||||
|
if stop {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Remember the last "real" error from f.
|
||||||
|
if err != nil && err != context.Canceled && err != context.DeadlineExceeded {
|
||||||
|
lastErr = err
|
||||||
|
}
|
||||||
|
p := bo.Pause()
|
||||||
|
if cerr := sleep(ctx, p); cerr != nil {
|
||||||
|
if lastErr != nil {
|
||||||
|
return Annotatef(lastErr, "retry failed with %v; last error", cerr)
|
||||||
|
}
|
||||||
|
return cerr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,109 @@
|
||||||
|
// Copyright 2018 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package trace
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"go.opencensus.io/trace"
|
||||||
|
"google.golang.org/api/googleapi"
|
||||||
|
"google.golang.org/genproto/googleapis/rpc/code"
|
||||||
|
"google.golang.org/grpc/status"
|
||||||
|
)
|
||||||
|
|
||||||
|
// StartSpan adds a span to the trace with the given name.
|
||||||
|
func StartSpan(ctx context.Context, name string) context.Context {
|
||||||
|
ctx, _ = trace.StartSpan(ctx, name)
|
||||||
|
return ctx
|
||||||
|
}
|
||||||
|
|
||||||
|
// EndSpan ends a span with the given error.
|
||||||
|
func EndSpan(ctx context.Context, err error) {
|
||||||
|
span := trace.FromContext(ctx)
|
||||||
|
if err != nil {
|
||||||
|
span.SetStatus(toStatus(err))
|
||||||
|
}
|
||||||
|
span.End()
|
||||||
|
}
|
||||||
|
|
||||||
|
// toStatus interrogates an error and converts it to an appropriate
|
||||||
|
// OpenCensus status.
|
||||||
|
func toStatus(err error) trace.Status {
|
||||||
|
if err2, ok := err.(*googleapi.Error); ok {
|
||||||
|
return trace.Status{Code: httpStatusCodeToOCCode(err2.Code), Message: err2.Message}
|
||||||
|
} else if s, ok := status.FromError(err); ok {
|
||||||
|
return trace.Status{Code: int32(s.Code()), Message: s.Message()}
|
||||||
|
} else {
|
||||||
|
return trace.Status{Code: int32(code.Code_UNKNOWN), Message: err.Error()}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(deklerk): switch to using OpenCensus function when it becomes available.
|
||||||
|
// Reference: https://github.com/googleapis/googleapis/blob/26b634d2724ac5dd30ae0b0cbfb01f07f2e4050e/google/rpc/code.proto
|
||||||
|
func httpStatusCodeToOCCode(httpStatusCode int) int32 {
|
||||||
|
switch httpStatusCode {
|
||||||
|
case 200:
|
||||||
|
return int32(code.Code_OK)
|
||||||
|
case 499:
|
||||||
|
return int32(code.Code_CANCELLED)
|
||||||
|
case 500:
|
||||||
|
return int32(code.Code_UNKNOWN) // Could also be Code_INTERNAL, Code_DATA_LOSS
|
||||||
|
case 400:
|
||||||
|
return int32(code.Code_INVALID_ARGUMENT) // Could also be Code_OUT_OF_RANGE
|
||||||
|
case 504:
|
||||||
|
return int32(code.Code_DEADLINE_EXCEEDED)
|
||||||
|
case 404:
|
||||||
|
return int32(code.Code_NOT_FOUND)
|
||||||
|
case 409:
|
||||||
|
return int32(code.Code_ALREADY_EXISTS) // Could also be Code_ABORTED
|
||||||
|
case 403:
|
||||||
|
return int32(code.Code_PERMISSION_DENIED)
|
||||||
|
case 401:
|
||||||
|
return int32(code.Code_UNAUTHENTICATED)
|
||||||
|
case 429:
|
||||||
|
return int32(code.Code_RESOURCE_EXHAUSTED)
|
||||||
|
case 501:
|
||||||
|
return int32(code.Code_UNIMPLEMENTED)
|
||||||
|
case 503:
|
||||||
|
return int32(code.Code_UNAVAILABLE)
|
||||||
|
default:
|
||||||
|
return int32(code.Code_UNKNOWN)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: (odeke-em): perhaps just pass around spans due to the cost
|
||||||
|
// incurred from using trace.FromContext(ctx) yet we could avoid
|
||||||
|
// throwing away the work done by ctx, span := trace.StartSpan.
|
||||||
|
func TracePrintf(ctx context.Context, attrMap map[string]interface{}, format string, args ...interface{}) {
|
||||||
|
var attrs []trace.Attribute
|
||||||
|
for k, v := range attrMap {
|
||||||
|
var a trace.Attribute
|
||||||
|
switch v := v.(type) {
|
||||||
|
case string:
|
||||||
|
a = trace.StringAttribute(k, v)
|
||||||
|
case bool:
|
||||||
|
a = trace.BoolAttribute(k, v)
|
||||||
|
case int:
|
||||||
|
a = trace.Int64Attribute(k, int64(v))
|
||||||
|
case int64:
|
||||||
|
a = trace.Int64Attribute(k, v)
|
||||||
|
default:
|
||||||
|
a = trace.StringAttribute(k, fmt.Sprintf("%#v", v))
|
||||||
|
}
|
||||||
|
attrs = append(attrs, a)
|
||||||
|
}
|
||||||
|
trace.FromContext(ctx).Annotatef(attrs, format, args...)
|
||||||
|
}
|
|
@ -0,0 +1,19 @@
|
||||||
|
#!/bin/bash
|
||||||
|
# Copyright 2019 Google LLC
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
today=$(date +%Y%m%d)
|
||||||
|
|
||||||
|
sed -i -r -e 's/const Repo = "([0-9]{8})"/const Repo = "'$today'"/' $GOFILE
|
||||||
|
|
|
@ -0,0 +1,71 @@
|
||||||
|
// Copyright 2016 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
//go:generate ./update_version.sh
|
||||||
|
|
||||||
|
// Package version contains version information for Google Cloud Client
|
||||||
|
// Libraries for Go, as reported in request headers.
|
||||||
|
package version
|
||||||
|
|
||||||
|
import (
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
"unicode"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Repo is the current version of the client libraries in this
|
||||||
|
// repo. It should be a date in YYYYMMDD format.
|
||||||
|
const Repo = "20190802"
|
||||||
|
|
||||||
|
// Go returns the Go runtime version. The returned string
|
||||||
|
// has no whitespace.
|
||||||
|
func Go() string {
|
||||||
|
return goVersion
|
||||||
|
}
|
||||||
|
|
||||||
|
var goVersion = goVer(runtime.Version())
|
||||||
|
|
||||||
|
const develPrefix = "devel +"
|
||||||
|
|
||||||
|
func goVer(s string) string {
|
||||||
|
if strings.HasPrefix(s, develPrefix) {
|
||||||
|
s = s[len(develPrefix):]
|
||||||
|
if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 {
|
||||||
|
s = s[:p]
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
if strings.HasPrefix(s, "go1") {
|
||||||
|
s = s[2:]
|
||||||
|
var prerelease string
|
||||||
|
if p := strings.IndexFunc(s, notSemverRune); p >= 0 {
|
||||||
|
s, prerelease = s[:p], s[p:]
|
||||||
|
}
|
||||||
|
if strings.HasSuffix(s, ".") {
|
||||||
|
s += "0"
|
||||||
|
} else if strings.Count(s, ".") < 2 {
|
||||||
|
s += ".0"
|
||||||
|
}
|
||||||
|
if prerelease != "" {
|
||||||
|
s += "-" + prerelease
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func notSemverRune(r rune) bool {
|
||||||
|
return !strings.ContainsRune("0123456789.", r)
|
||||||
|
}
|
|
@ -0,0 +1,32 @@
|
||||||
|
## Cloud Storage [![GoDoc](https://godoc.org/cloud.google.com/go/storage?status.svg)](https://godoc.org/cloud.google.com/go/storage)
|
||||||
|
|
||||||
|
- [About Cloud Storage](https://cloud.google.com/storage/)
|
||||||
|
- [API documentation](https://cloud.google.com/storage/docs)
|
||||||
|
- [Go client documentation](https://godoc.org/cloud.google.com/go/storage)
|
||||||
|
- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/storage)
|
||||||
|
|
||||||
|
### Example Usage
|
||||||
|
|
||||||
|
First create a `storage.Client` to use throughout your application:
|
||||||
|
|
||||||
|
[snip]:# (storage-1)
|
||||||
|
```go
|
||||||
|
client, err := storage.NewClient(ctx)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
[snip]:# (storage-2)
|
||||||
|
```go
|
||||||
|
// Read the object1 from bucket.
|
||||||
|
rc, err := client.Bucket("bucket").Object("object1").NewReader(ctx)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
defer rc.Close()
|
||||||
|
body, err := ioutil.ReadAll(rc)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
```
|
|
@ -0,0 +1,335 @@
|
||||||
|
// Copyright 2014 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"net/http"
|
||||||
|
"reflect"
|
||||||
|
|
||||||
|
"cloud.google.com/go/internal/trace"
|
||||||
|
"google.golang.org/api/googleapi"
|
||||||
|
raw "google.golang.org/api/storage/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ACLRole is the level of access to grant.
|
||||||
|
type ACLRole string
|
||||||
|
|
||||||
|
const (
|
||||||
|
RoleOwner ACLRole = "OWNER"
|
||||||
|
RoleReader ACLRole = "READER"
|
||||||
|
RoleWriter ACLRole = "WRITER"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ACLEntity refers to a user or group.
|
||||||
|
// They are sometimes referred to as grantees.
|
||||||
|
//
|
||||||
|
// It could be in the form of:
|
||||||
|
// "user-<userId>", "user-<email>", "group-<groupId>", "group-<email>",
|
||||||
|
// "domain-<domain>" and "project-team-<projectId>".
|
||||||
|
//
|
||||||
|
// Or one of the predefined constants: AllUsers, AllAuthenticatedUsers.
|
||||||
|
type ACLEntity string
|
||||||
|
|
||||||
|
const (
|
||||||
|
AllUsers ACLEntity = "allUsers"
|
||||||
|
AllAuthenticatedUsers ACLEntity = "allAuthenticatedUsers"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ACLRule represents a grant for a role to an entity (user, group or team) for a
|
||||||
|
// Google Cloud Storage object or bucket.
|
||||||
|
type ACLRule struct {
|
||||||
|
Entity ACLEntity
|
||||||
|
EntityID string
|
||||||
|
Role ACLRole
|
||||||
|
Domain string
|
||||||
|
Email string
|
||||||
|
ProjectTeam *ProjectTeam
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProjectTeam is the project team associated with the entity, if any.
|
||||||
|
type ProjectTeam struct {
|
||||||
|
ProjectNumber string
|
||||||
|
Team string
|
||||||
|
}
|
||||||
|
|
||||||
|
// ACLHandle provides operations on an access control list for a Google Cloud Storage bucket or object.
|
||||||
|
type ACLHandle struct {
|
||||||
|
c *Client
|
||||||
|
bucket string
|
||||||
|
object string
|
||||||
|
isDefault bool
|
||||||
|
userProject string // for requester-pays buckets
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete permanently deletes the ACL entry for the given entity.
|
||||||
|
func (a *ACLHandle) Delete(ctx context.Context, entity ACLEntity) (err error) {
|
||||||
|
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.ACL.Delete")
|
||||||
|
defer func() { trace.EndSpan(ctx, err) }()
|
||||||
|
|
||||||
|
if a.object != "" {
|
||||||
|
return a.objectDelete(ctx, entity)
|
||||||
|
}
|
||||||
|
if a.isDefault {
|
||||||
|
return a.bucketDefaultDelete(ctx, entity)
|
||||||
|
}
|
||||||
|
return a.bucketDelete(ctx, entity)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set sets the role for the given entity.
|
||||||
|
func (a *ACLHandle) Set(ctx context.Context, entity ACLEntity, role ACLRole) (err error) {
|
||||||
|
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.ACL.Set")
|
||||||
|
defer func() { trace.EndSpan(ctx, err) }()
|
||||||
|
|
||||||
|
if a.object != "" {
|
||||||
|
return a.objectSet(ctx, entity, role, false)
|
||||||
|
}
|
||||||
|
if a.isDefault {
|
||||||
|
return a.objectSet(ctx, entity, role, true)
|
||||||
|
}
|
||||||
|
return a.bucketSet(ctx, entity, role)
|
||||||
|
}
|
||||||
|
|
||||||
|
// List retrieves ACL entries.
|
||||||
|
func (a *ACLHandle) List(ctx context.Context) (rules []ACLRule, err error) {
|
||||||
|
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.ACL.List")
|
||||||
|
defer func() { trace.EndSpan(ctx, err) }()
|
||||||
|
|
||||||
|
if a.object != "" {
|
||||||
|
return a.objectList(ctx)
|
||||||
|
}
|
||||||
|
if a.isDefault {
|
||||||
|
return a.bucketDefaultList(ctx)
|
||||||
|
}
|
||||||
|
return a.bucketList(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *ACLHandle) bucketDefaultList(ctx context.Context) ([]ACLRule, error) {
|
||||||
|
var acls *raw.ObjectAccessControls
|
||||||
|
var err error
|
||||||
|
err = runWithRetry(ctx, func() error {
|
||||||
|
req := a.c.raw.DefaultObjectAccessControls.List(a.bucket)
|
||||||
|
a.configureCall(ctx, req)
|
||||||
|
acls, err = req.Do()
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return toObjectACLRules(acls.Items), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *ACLHandle) bucketDefaultDelete(ctx context.Context, entity ACLEntity) error {
|
||||||
|
return runWithRetry(ctx, func() error {
|
||||||
|
req := a.c.raw.DefaultObjectAccessControls.Delete(a.bucket, string(entity))
|
||||||
|
a.configureCall(ctx, req)
|
||||||
|
return req.Do()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *ACLHandle) bucketList(ctx context.Context) ([]ACLRule, error) {
|
||||||
|
var acls *raw.BucketAccessControls
|
||||||
|
var err error
|
||||||
|
err = runWithRetry(ctx, func() error {
|
||||||
|
req := a.c.raw.BucketAccessControls.List(a.bucket)
|
||||||
|
a.configureCall(ctx, req)
|
||||||
|
acls, err = req.Do()
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return toBucketACLRules(acls.Items), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *ACLHandle) bucketSet(ctx context.Context, entity ACLEntity, role ACLRole) error {
|
||||||
|
acl := &raw.BucketAccessControl{
|
||||||
|
Bucket: a.bucket,
|
||||||
|
Entity: string(entity),
|
||||||
|
Role: string(role),
|
||||||
|
}
|
||||||
|
err := runWithRetry(ctx, func() error {
|
||||||
|
req := a.c.raw.BucketAccessControls.Update(a.bucket, string(entity), acl)
|
||||||
|
a.configureCall(ctx, req)
|
||||||
|
_, err := req.Do()
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *ACLHandle) bucketDelete(ctx context.Context, entity ACLEntity) error {
|
||||||
|
return runWithRetry(ctx, func() error {
|
||||||
|
req := a.c.raw.BucketAccessControls.Delete(a.bucket, string(entity))
|
||||||
|
a.configureCall(ctx, req)
|
||||||
|
return req.Do()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *ACLHandle) objectList(ctx context.Context) ([]ACLRule, error) {
|
||||||
|
var acls *raw.ObjectAccessControls
|
||||||
|
var err error
|
||||||
|
err = runWithRetry(ctx, func() error {
|
||||||
|
req := a.c.raw.ObjectAccessControls.List(a.bucket, a.object)
|
||||||
|
a.configureCall(ctx, req)
|
||||||
|
acls, err = req.Do()
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return toObjectACLRules(acls.Items), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *ACLHandle) objectSet(ctx context.Context, entity ACLEntity, role ACLRole, isBucketDefault bool) error {
|
||||||
|
type setRequest interface {
|
||||||
|
Do(opts ...googleapi.CallOption) (*raw.ObjectAccessControl, error)
|
||||||
|
Header() http.Header
|
||||||
|
}
|
||||||
|
|
||||||
|
acl := &raw.ObjectAccessControl{
|
||||||
|
Bucket: a.bucket,
|
||||||
|
Entity: string(entity),
|
||||||
|
Role: string(role),
|
||||||
|
}
|
||||||
|
var req setRequest
|
||||||
|
if isBucketDefault {
|
||||||
|
req = a.c.raw.DefaultObjectAccessControls.Update(a.bucket, string(entity), acl)
|
||||||
|
} else {
|
||||||
|
req = a.c.raw.ObjectAccessControls.Update(a.bucket, a.object, string(entity), acl)
|
||||||
|
}
|
||||||
|
a.configureCall(ctx, req)
|
||||||
|
return runWithRetry(ctx, func() error {
|
||||||
|
_, err := req.Do()
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *ACLHandle) objectDelete(ctx context.Context, entity ACLEntity) error {
|
||||||
|
return runWithRetry(ctx, func() error {
|
||||||
|
req := a.c.raw.ObjectAccessControls.Delete(a.bucket, a.object, string(entity))
|
||||||
|
a.configureCall(ctx, req)
|
||||||
|
return req.Do()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *ACLHandle) configureCall(ctx context.Context, call interface{ Header() http.Header }) {
|
||||||
|
vc := reflect.ValueOf(call)
|
||||||
|
vc.MethodByName("Context").Call([]reflect.Value{reflect.ValueOf(ctx)})
|
||||||
|
if a.userProject != "" {
|
||||||
|
vc.MethodByName("UserProject").Call([]reflect.Value{reflect.ValueOf(a.userProject)})
|
||||||
|
}
|
||||||
|
setClientHeader(call.Header())
|
||||||
|
}
|
||||||
|
|
||||||
|
func toObjectACLRules(items []*raw.ObjectAccessControl) []ACLRule {
|
||||||
|
var rs []ACLRule
|
||||||
|
for _, item := range items {
|
||||||
|
rs = append(rs, toObjectACLRule(item))
|
||||||
|
}
|
||||||
|
return rs
|
||||||
|
}
|
||||||
|
|
||||||
|
func toBucketACLRules(items []*raw.BucketAccessControl) []ACLRule {
|
||||||
|
var rs []ACLRule
|
||||||
|
for _, item := range items {
|
||||||
|
rs = append(rs, toBucketACLRule(item))
|
||||||
|
}
|
||||||
|
return rs
|
||||||
|
}
|
||||||
|
|
||||||
|
func toObjectACLRule(a *raw.ObjectAccessControl) ACLRule {
|
||||||
|
return ACLRule{
|
||||||
|
Entity: ACLEntity(a.Entity),
|
||||||
|
EntityID: a.EntityId,
|
||||||
|
Role: ACLRole(a.Role),
|
||||||
|
Domain: a.Domain,
|
||||||
|
Email: a.Email,
|
||||||
|
ProjectTeam: toObjectProjectTeam(a.ProjectTeam),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func toBucketACLRule(a *raw.BucketAccessControl) ACLRule {
|
||||||
|
return ACLRule{
|
||||||
|
Entity: ACLEntity(a.Entity),
|
||||||
|
EntityID: a.EntityId,
|
||||||
|
Role: ACLRole(a.Role),
|
||||||
|
Domain: a.Domain,
|
||||||
|
Email: a.Email,
|
||||||
|
ProjectTeam: toBucketProjectTeam(a.ProjectTeam),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func toRawObjectACL(rules []ACLRule) []*raw.ObjectAccessControl {
|
||||||
|
if len(rules) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
r := make([]*raw.ObjectAccessControl, 0, len(rules))
|
||||||
|
for _, rule := range rules {
|
||||||
|
r = append(r, rule.toRawObjectAccessControl("")) // bucket name unnecessary
|
||||||
|
}
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
func toRawBucketACL(rules []ACLRule) []*raw.BucketAccessControl {
|
||||||
|
if len(rules) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
r := make([]*raw.BucketAccessControl, 0, len(rules))
|
||||||
|
for _, rule := range rules {
|
||||||
|
r = append(r, rule.toRawBucketAccessControl("")) // bucket name unnecessary
|
||||||
|
}
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r ACLRule) toRawBucketAccessControl(bucket string) *raw.BucketAccessControl {
|
||||||
|
return &raw.BucketAccessControl{
|
||||||
|
Bucket: bucket,
|
||||||
|
Entity: string(r.Entity),
|
||||||
|
Role: string(r.Role),
|
||||||
|
// The other fields are not settable.
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r ACLRule) toRawObjectAccessControl(bucket string) *raw.ObjectAccessControl {
|
||||||
|
return &raw.ObjectAccessControl{
|
||||||
|
Bucket: bucket,
|
||||||
|
Entity: string(r.Entity),
|
||||||
|
Role: string(r.Role),
|
||||||
|
// The other fields are not settable.
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func toBucketProjectTeam(p *raw.BucketAccessControlProjectTeam) *ProjectTeam {
|
||||||
|
if p == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return &ProjectTeam{
|
||||||
|
ProjectNumber: p.ProjectNumber,
|
||||||
|
Team: p.Team,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func toObjectProjectTeam(p *raw.ObjectAccessControlProjectTeam) *ProjectTeam {
|
||||||
|
if p == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return &ProjectTeam{
|
||||||
|
ProjectNumber: p.ProjectNumber,
|
||||||
|
Team: p.Team,
|
||||||
|
}
|
||||||
|
}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,228 @@
|
||||||
|
// Copyright 2016 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"cloud.google.com/go/internal/trace"
|
||||||
|
raw "google.golang.org/api/storage/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
// CopierFrom creates a Copier that can copy src to dst.
|
||||||
|
// You can immediately call Run on the returned Copier, or
|
||||||
|
// you can configure it first.
|
||||||
|
//
|
||||||
|
// For Requester Pays buckets, the user project of dst is billed, unless it is empty,
|
||||||
|
// in which case the user project of src is billed.
|
||||||
|
func (dst *ObjectHandle) CopierFrom(src *ObjectHandle) *Copier {
|
||||||
|
return &Copier{dst: dst, src: src}
|
||||||
|
}
|
||||||
|
|
||||||
|
// A Copier copies a source object to a destination.
|
||||||
|
type Copier struct {
|
||||||
|
// ObjectAttrs are optional attributes to set on the destination object.
|
||||||
|
// Any attributes must be initialized before any calls on the Copier. Nil
|
||||||
|
// or zero-valued attributes are ignored.
|
||||||
|
ObjectAttrs
|
||||||
|
|
||||||
|
// RewriteToken can be set before calling Run to resume a copy
|
||||||
|
// operation. After Run returns a non-nil error, RewriteToken will
|
||||||
|
// have been updated to contain the value needed to resume the copy.
|
||||||
|
RewriteToken string
|
||||||
|
|
||||||
|
// ProgressFunc can be used to monitor the progress of a multi-RPC copy
|
||||||
|
// operation. If ProgressFunc is not nil and copying requires multiple
|
||||||
|
// calls to the underlying service (see
|
||||||
|
// https://cloud.google.com/storage/docs/json_api/v1/objects/rewrite), then
|
||||||
|
// ProgressFunc will be invoked after each call with the number of bytes of
|
||||||
|
// content copied so far and the total size in bytes of the source object.
|
||||||
|
//
|
||||||
|
// ProgressFunc is intended to make upload progress available to the
|
||||||
|
// application. For example, the implementation of ProgressFunc may update
|
||||||
|
// a progress bar in the application's UI, or log the result of
|
||||||
|
// float64(copiedBytes)/float64(totalBytes).
|
||||||
|
//
|
||||||
|
// ProgressFunc should return quickly without blocking.
|
||||||
|
ProgressFunc func(copiedBytes, totalBytes uint64)
|
||||||
|
|
||||||
|
// The Cloud KMS key, in the form projects/P/locations/L/keyRings/R/cryptoKeys/K,
|
||||||
|
// that will be used to encrypt the object. Overrides the object's KMSKeyName, if
|
||||||
|
// any.
|
||||||
|
//
|
||||||
|
// Providing both a DestinationKMSKeyName and a customer-supplied encryption key
|
||||||
|
// (via ObjectHandle.Key) on the destination object will result in an error when
|
||||||
|
// Run is called.
|
||||||
|
DestinationKMSKeyName string
|
||||||
|
|
||||||
|
dst, src *ObjectHandle
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run performs the copy.
|
||||||
|
func (c *Copier) Run(ctx context.Context) (attrs *ObjectAttrs, err error) {
|
||||||
|
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Copier.Run")
|
||||||
|
defer func() { trace.EndSpan(ctx, err) }()
|
||||||
|
|
||||||
|
if err := c.src.validate(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := c.dst.validate(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if c.DestinationKMSKeyName != "" && c.dst.encryptionKey != nil {
|
||||||
|
return nil, errors.New("storage: cannot use DestinationKMSKeyName with a customer-supplied encryption key")
|
||||||
|
}
|
||||||
|
// Convert destination attributes to raw form, omitting the bucket.
|
||||||
|
// If the bucket is included but name or content-type aren't, the service
|
||||||
|
// returns a 400 with "Required" as the only message. Omitting the bucket
|
||||||
|
// does not cause any problems.
|
||||||
|
rawObject := c.ObjectAttrs.toRawObject("")
|
||||||
|
for {
|
||||||
|
res, err := c.callRewrite(ctx, rawObject)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if c.ProgressFunc != nil {
|
||||||
|
c.ProgressFunc(uint64(res.TotalBytesRewritten), uint64(res.ObjectSize))
|
||||||
|
}
|
||||||
|
if res.Done { // Finished successfully.
|
||||||
|
return newObject(res.Resource), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Copier) callRewrite(ctx context.Context, rawObj *raw.Object) (*raw.RewriteResponse, error) {
|
||||||
|
call := c.dst.c.raw.Objects.Rewrite(c.src.bucket, c.src.object, c.dst.bucket, c.dst.object, rawObj)
|
||||||
|
|
||||||
|
call.Context(ctx).Projection("full")
|
||||||
|
if c.RewriteToken != "" {
|
||||||
|
call.RewriteToken(c.RewriteToken)
|
||||||
|
}
|
||||||
|
if c.DestinationKMSKeyName != "" {
|
||||||
|
call.DestinationKmsKeyName(c.DestinationKMSKeyName)
|
||||||
|
}
|
||||||
|
if c.PredefinedACL != "" {
|
||||||
|
call.DestinationPredefinedAcl(c.PredefinedACL)
|
||||||
|
}
|
||||||
|
if err := applyConds("Copy destination", c.dst.gen, c.dst.conds, call); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if c.dst.userProject != "" {
|
||||||
|
call.UserProject(c.dst.userProject)
|
||||||
|
} else if c.src.userProject != "" {
|
||||||
|
call.UserProject(c.src.userProject)
|
||||||
|
}
|
||||||
|
if err := applySourceConds(c.src.gen, c.src.conds, call); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := setEncryptionHeaders(call.Header(), c.dst.encryptionKey, false); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := setEncryptionHeaders(call.Header(), c.src.encryptionKey, true); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var res *raw.RewriteResponse
|
||||||
|
var err error
|
||||||
|
setClientHeader(call.Header())
|
||||||
|
err = runWithRetry(ctx, func() error { res, err = call.Do(); return err })
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
c.RewriteToken = res.RewriteToken
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ComposerFrom creates a Composer that can compose srcs into dst.
|
||||||
|
// You can immediately call Run on the returned Composer, or you can
|
||||||
|
// configure it first.
|
||||||
|
//
|
||||||
|
// The encryption key for the destination object will be used to decrypt all
|
||||||
|
// source objects and encrypt the destination object. It is an error
|
||||||
|
// to specify an encryption key for any of the source objects.
|
||||||
|
func (dst *ObjectHandle) ComposerFrom(srcs ...*ObjectHandle) *Composer {
|
||||||
|
return &Composer{dst: dst, srcs: srcs}
|
||||||
|
}
|
||||||
|
|
||||||
|
// A Composer composes source objects into a destination object.
|
||||||
|
//
|
||||||
|
// For Requester Pays buckets, the user project of dst is billed.
|
||||||
|
type Composer struct {
|
||||||
|
// ObjectAttrs are optional attributes to set on the destination object.
|
||||||
|
// Any attributes must be initialized before any calls on the Composer. Nil
|
||||||
|
// or zero-valued attributes are ignored.
|
||||||
|
ObjectAttrs
|
||||||
|
|
||||||
|
dst *ObjectHandle
|
||||||
|
srcs []*ObjectHandle
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run performs the compose operation.
|
||||||
|
func (c *Composer) Run(ctx context.Context) (attrs *ObjectAttrs, err error) {
|
||||||
|
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Composer.Run")
|
||||||
|
defer func() { trace.EndSpan(ctx, err) }()
|
||||||
|
|
||||||
|
if err := c.dst.validate(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if len(c.srcs) == 0 {
|
||||||
|
return nil, errors.New("storage: at least one source object must be specified")
|
||||||
|
}
|
||||||
|
|
||||||
|
req := &raw.ComposeRequest{}
|
||||||
|
// Compose requires a non-empty Destination, so we always set it,
|
||||||
|
// even if the caller-provided ObjectAttrs is the zero value.
|
||||||
|
req.Destination = c.ObjectAttrs.toRawObject(c.dst.bucket)
|
||||||
|
for _, src := range c.srcs {
|
||||||
|
if err := src.validate(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if src.bucket != c.dst.bucket {
|
||||||
|
return nil, fmt.Errorf("storage: all source objects must be in bucket %q, found %q", c.dst.bucket, src.bucket)
|
||||||
|
}
|
||||||
|
if src.encryptionKey != nil {
|
||||||
|
return nil, fmt.Errorf("storage: compose source %s.%s must not have encryption key", src.bucket, src.object)
|
||||||
|
}
|
||||||
|
srcObj := &raw.ComposeRequestSourceObjects{
|
||||||
|
Name: src.object,
|
||||||
|
}
|
||||||
|
if err := applyConds("ComposeFrom source", src.gen, src.conds, composeSourceObj{srcObj}); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
req.SourceObjects = append(req.SourceObjects, srcObj)
|
||||||
|
}
|
||||||
|
|
||||||
|
call := c.dst.c.raw.Objects.Compose(c.dst.bucket, c.dst.object, req).Context(ctx)
|
||||||
|
if err := applyConds("ComposeFrom destination", c.dst.gen, c.dst.conds, call); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if c.dst.userProject != "" {
|
||||||
|
call.UserProject(c.dst.userProject)
|
||||||
|
}
|
||||||
|
if c.PredefinedACL != "" {
|
||||||
|
call.DestinationPredefinedAcl(c.PredefinedACL)
|
||||||
|
}
|
||||||
|
if err := setEncryptionHeaders(call.Header(), c.dst.encryptionKey, false); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var obj *raw.Object
|
||||||
|
setClientHeader(call.Header())
|
||||||
|
err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err })
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return newObject(obj), nil
|
||||||
|
}
|
|
@ -0,0 +1,176 @@
|
||||||
|
// Copyright 2016 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
/*
|
||||||
|
Package storage provides an easy way to work with Google Cloud Storage.
|
||||||
|
Google Cloud Storage stores data in named objects, which are grouped into buckets.
|
||||||
|
|
||||||
|
More information about Google Cloud Storage is available at
|
||||||
|
https://cloud.google.com/storage/docs.
|
||||||
|
|
||||||
|
See https://godoc.org/cloud.google.com/go for authentication, timeouts,
|
||||||
|
connection pooling and similar aspects of this package.
|
||||||
|
|
||||||
|
All of the methods of this package use exponential backoff to retry calls that fail
|
||||||
|
with certain errors, as described in
|
||||||
|
https://cloud.google.com/storage/docs/exponential-backoff. Retrying continues
|
||||||
|
indefinitely unless the controlling context is canceled or the client is closed. See
|
||||||
|
context.WithTimeout and context.WithCancel.
|
||||||
|
|
||||||
|
|
||||||
|
Creating a Client
|
||||||
|
|
||||||
|
To start working with this package, create a client:
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
client, err := storage.NewClient(ctx)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
|
||||||
|
The client will use your default application credentials.
|
||||||
|
|
||||||
|
If you only wish to access public data, you can create
|
||||||
|
an unauthenticated client with
|
||||||
|
|
||||||
|
client, err := storage.NewClient(ctx, option.WithoutAuthentication())
|
||||||
|
|
||||||
|
Buckets
|
||||||
|
|
||||||
|
A Google Cloud Storage bucket is a collection of objects. To work with a
|
||||||
|
bucket, make a bucket handle:
|
||||||
|
|
||||||
|
bkt := client.Bucket(bucketName)
|
||||||
|
|
||||||
|
A handle is a reference to a bucket. You can have a handle even if the
|
||||||
|
bucket doesn't exist yet. To create a bucket in Google Cloud Storage,
|
||||||
|
call Create on the handle:
|
||||||
|
|
||||||
|
if err := bkt.Create(ctx, projectID, nil); err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
|
||||||
|
Note that although buckets are associated with projects, bucket names are
|
||||||
|
global across all projects.
|
||||||
|
|
||||||
|
Each bucket has associated metadata, represented in this package by
|
||||||
|
BucketAttrs. The third argument to BucketHandle.Create allows you to set
|
||||||
|
the initial BucketAttrs of a bucket. To retrieve a bucket's attributes, use
|
||||||
|
Attrs:
|
||||||
|
|
||||||
|
attrs, err := bkt.Attrs(ctx)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
fmt.Printf("bucket %s, created at %s, is located in %s with storage class %s\n",
|
||||||
|
attrs.Name, attrs.Created, attrs.Location, attrs.StorageClass)
|
||||||
|
|
||||||
|
Objects
|
||||||
|
|
||||||
|
An object holds arbitrary data as a sequence of bytes, like a file. You
|
||||||
|
refer to objects using a handle, just as with buckets, but unlike buckets
|
||||||
|
you don't explicitly create an object. Instead, the first time you write
|
||||||
|
to an object it will be created. You can use the standard Go io.Reader
|
||||||
|
and io.Writer interfaces to read and write object data:
|
||||||
|
|
||||||
|
obj := bkt.Object("data")
|
||||||
|
// Write something to obj.
|
||||||
|
// w implements io.Writer.
|
||||||
|
w := obj.NewWriter(ctx)
|
||||||
|
// Write some text to obj. This will either create the object or overwrite whatever is there already.
|
||||||
|
if _, err := fmt.Fprintf(w, "This object contains text.\n"); err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
// Close, just like writing a file.
|
||||||
|
if err := w.Close(); err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read it back.
|
||||||
|
r, err := obj.NewReader(ctx)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
defer r.Close()
|
||||||
|
if _, err := io.Copy(os.Stdout, r); err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
// Prints "This object contains text."
|
||||||
|
|
||||||
|
Objects also have attributes, which you can fetch with Attrs:
|
||||||
|
|
||||||
|
objAttrs, err := obj.Attrs(ctx)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
fmt.Printf("object %s has size %d and can be read using %s\n",
|
||||||
|
objAttrs.Name, objAttrs.Size, objAttrs.MediaLink)
|
||||||
|
|
||||||
|
ACLs
|
||||||
|
|
||||||
|
Both objects and buckets have ACLs (Access Control Lists). An ACL is a list of
|
||||||
|
ACLRules, each of which specifies the role of a user, group or project. ACLs
|
||||||
|
are suitable for fine-grained control, but you may prefer using IAM to control
|
||||||
|
access at the project level (see
|
||||||
|
https://cloud.google.com/storage/docs/access-control/iam).
|
||||||
|
|
||||||
|
To list the ACLs of a bucket or object, obtain an ACLHandle and call its List method:
|
||||||
|
|
||||||
|
acls, err := obj.ACL().List(ctx)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
for _, rule := range acls {
|
||||||
|
fmt.Printf("%s has role %s\n", rule.Entity, rule.Role)
|
||||||
|
}
|
||||||
|
|
||||||
|
You can also set and delete ACLs.
|
||||||
|
|
||||||
|
Conditions
|
||||||
|
|
||||||
|
Every object has a generation and a metageneration. The generation changes
|
||||||
|
whenever the content changes, and the metageneration changes whenever the
|
||||||
|
metadata changes. Conditions let you check these values before an operation;
|
||||||
|
the operation only executes if the conditions match. You can use conditions to
|
||||||
|
prevent race conditions in read-modify-write operations.
|
||||||
|
|
||||||
|
For example, say you've read an object's metadata into objAttrs. Now
|
||||||
|
you want to write to that object, but only if its contents haven't changed
|
||||||
|
since you read it. Here is how to express that:
|
||||||
|
|
||||||
|
w = obj.If(storage.Conditions{GenerationMatch: objAttrs.Generation}).NewWriter(ctx)
|
||||||
|
// Proceed with writing as above.
|
||||||
|
|
||||||
|
Signed URLs
|
||||||
|
|
||||||
|
You can obtain a URL that lets anyone read or write an object for a limited time.
|
||||||
|
You don't need to create a client to do this. See the documentation of
|
||||||
|
SignedURL for details.
|
||||||
|
|
||||||
|
url, err := storage.SignedURL(bucketName, "shared-object", opts)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
fmt.Println(url)
|
||||||
|
|
||||||
|
Errors
|
||||||
|
|
||||||
|
Errors returned by this client are often of the type [`googleapi.Error`](https://godoc.org/google.golang.org/api/googleapi#Error).
|
||||||
|
These errors can be introspected for more information by type asserting to the richer `googleapi.Error` type. For example:
|
||||||
|
|
||||||
|
if e, ok := err.(*googleapi.Error); ok {
|
||||||
|
if e.Code == 409 { ... }
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
package storage // import "cloud.google.com/go/storage"
|
|
@ -0,0 +1,32 @@
|
||||||
|
// Copyright 2017 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// +build go1.10
|
||||||
|
|
||||||
|
package storage
|
||||||
|
|
||||||
|
import "google.golang.org/api/googleapi"
|
||||||
|
|
||||||
|
func shouldRetry(err error) bool {
|
||||||
|
switch e := err.(type) {
|
||||||
|
case *googleapi.Error:
|
||||||
|
// Retry on 429 and 5xx, according to
|
||||||
|
// https://cloud.google.com/storage/docs/exponential-backoff.
|
||||||
|
return e.Code == 429 || (e.Code >= 500 && e.Code < 600)
|
||||||
|
case interface{ Temporary() bool }:
|
||||||
|
return e.Temporary()
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,330 @@
|
||||||
|
// Copyright 2019 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"google.golang.org/api/iterator"
|
||||||
|
raw "google.golang.org/api/storage/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
// HMACState is the state of the HMAC key.
|
||||||
|
type HMACState string
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Active is the status for an active key that can be used to sign
|
||||||
|
// requests.
|
||||||
|
Active HMACState = "ACTIVE"
|
||||||
|
|
||||||
|
// Inactive is the status for an inactive key thus requests signed by
|
||||||
|
// this key will be denied.
|
||||||
|
Inactive HMACState = "INACTIVE"
|
||||||
|
|
||||||
|
// Deleted is the status for a key that is deleted.
|
||||||
|
// Once in this state the key cannot key cannot be recovered
|
||||||
|
// and does not count towards key limits. Deleted keys will be cleaned
|
||||||
|
// up later.
|
||||||
|
Deleted HMACState = "DELETED"
|
||||||
|
)
|
||||||
|
|
||||||
|
// HMACKey is the representation of a Google Cloud Storage HMAC key.
|
||||||
|
//
|
||||||
|
// HMAC keys are used to authenticate signed access to objects. To enable HMAC key
|
||||||
|
// authentication, please visit https://cloud.google.com/storage/docs/migrating.
|
||||||
|
//
|
||||||
|
// This type is EXPERIMENTAL and subject to change or removal without notice.
|
||||||
|
type HMACKey struct {
|
||||||
|
// The HMAC's secret key.
|
||||||
|
Secret string
|
||||||
|
|
||||||
|
// AccessID is the ID of the HMAC key.
|
||||||
|
AccessID string
|
||||||
|
|
||||||
|
// Etag is the HTTP/1.1 Entity tag.
|
||||||
|
Etag string
|
||||||
|
|
||||||
|
// ID is the ID of the HMAC key, including the ProjectID and AccessID.
|
||||||
|
ID string
|
||||||
|
|
||||||
|
// ProjectID is the ID of the project that owns the
|
||||||
|
// service account to which the key authenticates.
|
||||||
|
ProjectID string
|
||||||
|
|
||||||
|
// ServiceAccountEmail is the email address
|
||||||
|
// of the key's associated service account.
|
||||||
|
ServiceAccountEmail string
|
||||||
|
|
||||||
|
// CreatedTime is the creation time of the HMAC key.
|
||||||
|
CreatedTime time.Time
|
||||||
|
|
||||||
|
// UpdatedTime is the last modification time of the HMAC key metadata.
|
||||||
|
UpdatedTime time.Time
|
||||||
|
|
||||||
|
// State is the state of the HMAC key.
|
||||||
|
// It can be one of StateActive, StateInactive or StateDeleted.
|
||||||
|
State HMACState
|
||||||
|
}
|
||||||
|
|
||||||
|
// HMACKeyHandle helps provide access and management for HMAC keys.
|
||||||
|
//
|
||||||
|
// This type is EXPERIMENTAL and subject to change or removal without notice.
|
||||||
|
type HMACKeyHandle struct {
|
||||||
|
projectID string
|
||||||
|
accessID string
|
||||||
|
|
||||||
|
raw *raw.ProjectsHmacKeysService
|
||||||
|
}
|
||||||
|
|
||||||
|
// HMACKeyHandle creates a handle that will be used for HMACKey operations.
|
||||||
|
//
|
||||||
|
// This method is EXPERIMENTAL and subject to change or removal without notice.
|
||||||
|
func (c *Client) HMACKeyHandle(projectID, accessID string) *HMACKeyHandle {
|
||||||
|
return &HMACKeyHandle{
|
||||||
|
projectID: projectID,
|
||||||
|
accessID: accessID,
|
||||||
|
raw: raw.NewProjectsHmacKeysService(c.raw),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get invokes an RPC to retrieve the HMAC key referenced by the
|
||||||
|
// HMACKeyHandle's accessID.
|
||||||
|
//
|
||||||
|
// This method is EXPERIMENTAL and subject to change or removal without notice.
|
||||||
|
func (hkh *HMACKeyHandle) Get(ctx context.Context) (*HMACKey, error) {
|
||||||
|
call := hkh.raw.Get(hkh.projectID, hkh.accessID)
|
||||||
|
setClientHeader(call.Header())
|
||||||
|
|
||||||
|
var metadata *raw.HmacKeyMetadata
|
||||||
|
var err error
|
||||||
|
err = runWithRetry(ctx, func() error {
|
||||||
|
metadata, err = call.Context(ctx).Do()
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
hkPb := &raw.HmacKey{
|
||||||
|
Metadata: metadata,
|
||||||
|
}
|
||||||
|
return pbHmacKeyToHMACKey(hkPb, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete invokes an RPC to delete the key referenced by accessID, on Google Cloud Storage.
|
||||||
|
// Only inactive HMAC keys can be deleted.
|
||||||
|
// After deletion, a key cannot be used to authenticate requests.
|
||||||
|
//
|
||||||
|
// This method is EXPERIMENTAL and subject to change or removal without notice.
|
||||||
|
func (hkh *HMACKeyHandle) Delete(ctx context.Context) error {
|
||||||
|
delCall := hkh.raw.Delete(hkh.projectID, hkh.accessID)
|
||||||
|
setClientHeader(delCall.Header())
|
||||||
|
|
||||||
|
return runWithRetry(ctx, func() error {
|
||||||
|
return delCall.Context(ctx).Do()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func pbHmacKeyToHMACKey(pb *raw.HmacKey, updatedTimeCanBeNil bool) (*HMACKey, error) {
|
||||||
|
pbmd := pb.Metadata
|
||||||
|
if pbmd == nil {
|
||||||
|
return nil, errors.New("field Metadata cannot be nil")
|
||||||
|
}
|
||||||
|
createdTime, err := time.Parse(time.RFC3339, pbmd.TimeCreated)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("field CreatedTime: %v", err)
|
||||||
|
}
|
||||||
|
updatedTime, err := time.Parse(time.RFC3339, pbmd.Updated)
|
||||||
|
if err != nil && !updatedTimeCanBeNil {
|
||||||
|
return nil, fmt.Errorf("field UpdatedTime: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
hmk := &HMACKey{
|
||||||
|
AccessID: pbmd.AccessId,
|
||||||
|
Secret: pb.Secret,
|
||||||
|
Etag: pbmd.Etag,
|
||||||
|
ID: pbmd.Id,
|
||||||
|
State: HMACState(pbmd.State),
|
||||||
|
ProjectID: pbmd.ProjectId,
|
||||||
|
CreatedTime: createdTime,
|
||||||
|
UpdatedTime: updatedTime,
|
||||||
|
|
||||||
|
ServiceAccountEmail: pbmd.ServiceAccountEmail,
|
||||||
|
}
|
||||||
|
|
||||||
|
return hmk, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateHMACKey invokes an RPC for Google Cloud Storage to create a new HMACKey.
|
||||||
|
//
|
||||||
|
// This method is EXPERIMENTAL and subject to change or removal without notice.
|
||||||
|
func (c *Client) CreateHMACKey(ctx context.Context, projectID, serviceAccountEmail string) (*HMACKey, error) {
|
||||||
|
if projectID == "" {
|
||||||
|
return nil, errors.New("storage: expecting a non-blank projectID")
|
||||||
|
}
|
||||||
|
if serviceAccountEmail == "" {
|
||||||
|
return nil, errors.New("storage: expecting a non-blank service account email")
|
||||||
|
}
|
||||||
|
|
||||||
|
svc := raw.NewProjectsHmacKeysService(c.raw)
|
||||||
|
call := svc.Create(projectID, serviceAccountEmail)
|
||||||
|
setClientHeader(call.Header())
|
||||||
|
|
||||||
|
var hkPb *raw.HmacKey
|
||||||
|
var err error
|
||||||
|
err = runWithRetry(ctx, func() error {
|
||||||
|
hkPb, err = call.Context(ctx).Do()
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return pbHmacKeyToHMACKey(hkPb, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// HMACKeyAttrsToUpdate defines the attributes of an HMACKey that will be updated.
|
||||||
|
//
|
||||||
|
// This type is EXPERIMENTAL and subject to change or removal without notice.
|
||||||
|
type HMACKeyAttrsToUpdate struct {
|
||||||
|
// State is required and must be either StateActive or StateInactive.
|
||||||
|
State HMACState
|
||||||
|
|
||||||
|
// Etag is an optional field and it is the HTTP/1.1 Entity tag.
|
||||||
|
Etag string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update mutates the HMACKey referred to by accessID.
|
||||||
|
//
|
||||||
|
// This method is EXPERIMENTAL and subject to change or removal without notice.
|
||||||
|
func (h *HMACKeyHandle) Update(ctx context.Context, au HMACKeyAttrsToUpdate) (*HMACKey, error) {
|
||||||
|
if au.State != Active && au.State != Inactive {
|
||||||
|
return nil, fmt.Errorf("storage: invalid state %q for update, must be either %q or %q", au.State, Active, Inactive)
|
||||||
|
}
|
||||||
|
|
||||||
|
call := h.raw.Update(h.projectID, h.accessID, &raw.HmacKeyMetadata{
|
||||||
|
Etag: au.Etag,
|
||||||
|
State: string(au.State),
|
||||||
|
})
|
||||||
|
setClientHeader(call.Header())
|
||||||
|
|
||||||
|
var metadata *raw.HmacKeyMetadata
|
||||||
|
var err error
|
||||||
|
err = runWithRetry(ctx, func() error {
|
||||||
|
metadata, err = call.Context(ctx).Do()
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
hkPb := &raw.HmacKey{
|
||||||
|
Metadata: metadata,
|
||||||
|
}
|
||||||
|
return pbHmacKeyToHMACKey(hkPb, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
// An HMACKeysIterator is an iterator over HMACKeys.
|
||||||
|
//
|
||||||
|
// This type is EXPERIMENTAL and subject to change or removal without notice.
|
||||||
|
type HMACKeysIterator struct {
|
||||||
|
ctx context.Context
|
||||||
|
raw *raw.ProjectsHmacKeysService
|
||||||
|
projectID string
|
||||||
|
hmacKeys []*HMACKey
|
||||||
|
pageInfo *iterator.PageInfo
|
||||||
|
nextFunc func() error
|
||||||
|
index int
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListHMACKeys returns an iterator for listing HMACKeys.
|
||||||
|
//
|
||||||
|
// This method is EXPERIMENTAL and subject to change or removal without notice.
|
||||||
|
func (c *Client) ListHMACKeys(ctx context.Context, projectID string) *HMACKeysIterator {
|
||||||
|
it := &HMACKeysIterator{
|
||||||
|
ctx: ctx,
|
||||||
|
raw: raw.NewProjectsHmacKeysService(c.raw),
|
||||||
|
projectID: projectID,
|
||||||
|
}
|
||||||
|
|
||||||
|
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
|
||||||
|
it.fetch,
|
||||||
|
func() int { return len(it.hmacKeys) - it.index },
|
||||||
|
func() interface{} {
|
||||||
|
prev := it.hmacKeys
|
||||||
|
it.hmacKeys = it.hmacKeys[:0]
|
||||||
|
it.index = 0
|
||||||
|
return prev
|
||||||
|
})
|
||||||
|
return it
|
||||||
|
}
|
||||||
|
|
||||||
|
// Next returns the next result. Its second return value is iterator.Done if
|
||||||
|
// there are no more results. Once Next returns iterator.Done, all subsequent
|
||||||
|
// calls will return iterator.Done.
|
||||||
|
//
|
||||||
|
// This method is EXPERIMENTAL and subject to change or removal without notice.
|
||||||
|
func (it *HMACKeysIterator) Next() (*HMACKey, error) {
|
||||||
|
if err := it.nextFunc(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
key := it.hmacKeys[it.index]
|
||||||
|
it.index++
|
||||||
|
|
||||||
|
return key, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
|
||||||
|
//
|
||||||
|
// This method is EXPERIMENTAL and subject to change or removal without notice.
|
||||||
|
func (it *HMACKeysIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
|
||||||
|
|
||||||
|
func (it *HMACKeysIterator) fetch(pageSize int, pageToken string) (token string, err error) {
|
||||||
|
call := it.raw.List(it.projectID)
|
||||||
|
setClientHeader(call.Header())
|
||||||
|
call = call.PageToken(pageToken)
|
||||||
|
// By default we'll also show deleted keys and then
|
||||||
|
// let users filter on their own.
|
||||||
|
call = call.ShowDeletedKeys(true)
|
||||||
|
if pageSize > 0 {
|
||||||
|
call = call.MaxResults(int64(pageSize))
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := it.ctx
|
||||||
|
var resp *raw.HmacKeysMetadata
|
||||||
|
err = runWithRetry(it.ctx, func() error {
|
||||||
|
resp, err = call.Context(ctx).Do()
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, metadata := range resp.Items {
|
||||||
|
hkPb := &raw.HmacKey{
|
||||||
|
Metadata: metadata,
|
||||||
|
}
|
||||||
|
hkey, err := pbHmacKeyToHMACKey(hkPb, true)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
it.hmacKeys = append(it.hmacKeys, hkey)
|
||||||
|
}
|
||||||
|
return resp.NextPageToken, nil
|
||||||
|
}
|
|
@ -0,0 +1,130 @@
|
||||||
|
// Copyright 2017 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"cloud.google.com/go/iam"
|
||||||
|
"cloud.google.com/go/internal/trace"
|
||||||
|
raw "google.golang.org/api/storage/v1"
|
||||||
|
iampb "google.golang.org/genproto/googleapis/iam/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
// IAM provides access to IAM access control for the bucket.
|
||||||
|
func (b *BucketHandle) IAM() *iam.Handle {
|
||||||
|
return iam.InternalNewHandleClient(&iamClient{
|
||||||
|
raw: b.c.raw,
|
||||||
|
userProject: b.userProject,
|
||||||
|
}, b.name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// iamClient implements the iam.client interface.
|
||||||
|
type iamClient struct {
|
||||||
|
raw *raw.Service
|
||||||
|
userProject string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *iamClient) Get(ctx context.Context, resource string) (p *iampb.Policy, err error) {
|
||||||
|
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Get")
|
||||||
|
defer func() { trace.EndSpan(ctx, err) }()
|
||||||
|
|
||||||
|
call := c.raw.Buckets.GetIamPolicy(resource)
|
||||||
|
setClientHeader(call.Header())
|
||||||
|
if c.userProject != "" {
|
||||||
|
call.UserProject(c.userProject)
|
||||||
|
}
|
||||||
|
var rp *raw.Policy
|
||||||
|
err = runWithRetry(ctx, func() error {
|
||||||
|
rp, err = call.Context(ctx).Do()
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return iamFromStoragePolicy(rp), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *iamClient) Set(ctx context.Context, resource string, p *iampb.Policy) (err error) {
|
||||||
|
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Set")
|
||||||
|
defer func() { trace.EndSpan(ctx, err) }()
|
||||||
|
|
||||||
|
rp := iamToStoragePolicy(p)
|
||||||
|
call := c.raw.Buckets.SetIamPolicy(resource, rp)
|
||||||
|
setClientHeader(call.Header())
|
||||||
|
if c.userProject != "" {
|
||||||
|
call.UserProject(c.userProject)
|
||||||
|
}
|
||||||
|
return runWithRetry(ctx, func() error {
|
||||||
|
_, err := call.Context(ctx).Do()
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *iamClient) Test(ctx context.Context, resource string, perms []string) (permissions []string, err error) {
|
||||||
|
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Test")
|
||||||
|
defer func() { trace.EndSpan(ctx, err) }()
|
||||||
|
|
||||||
|
call := c.raw.Buckets.TestIamPermissions(resource, perms)
|
||||||
|
setClientHeader(call.Header())
|
||||||
|
if c.userProject != "" {
|
||||||
|
call.UserProject(c.userProject)
|
||||||
|
}
|
||||||
|
var res *raw.TestIamPermissionsResponse
|
||||||
|
err = runWithRetry(ctx, func() error {
|
||||||
|
res, err = call.Context(ctx).Do()
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return res.Permissions, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func iamToStoragePolicy(ip *iampb.Policy) *raw.Policy {
|
||||||
|
return &raw.Policy{
|
||||||
|
Bindings: iamToStorageBindings(ip.Bindings),
|
||||||
|
Etag: string(ip.Etag),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func iamToStorageBindings(ibs []*iampb.Binding) []*raw.PolicyBindings {
|
||||||
|
var rbs []*raw.PolicyBindings
|
||||||
|
for _, ib := range ibs {
|
||||||
|
rbs = append(rbs, &raw.PolicyBindings{
|
||||||
|
Role: ib.Role,
|
||||||
|
Members: ib.Members,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return rbs
|
||||||
|
}
|
||||||
|
|
||||||
|
func iamFromStoragePolicy(rp *raw.Policy) *iampb.Policy {
|
||||||
|
return &iampb.Policy{
|
||||||
|
Bindings: iamFromStorageBindings(rp.Bindings),
|
||||||
|
Etag: []byte(rp.Etag),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func iamFromStorageBindings(rbs []*raw.PolicyBindings) []*iampb.Binding {
|
||||||
|
var ibs []*iampb.Binding
|
||||||
|
for _, rb := range rbs {
|
||||||
|
ibs = append(ibs, &iampb.Binding{
|
||||||
|
Role: rb.Role,
|
||||||
|
Members: rb.Members,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return ibs
|
||||||
|
}
|
|
@ -0,0 +1,37 @@
|
||||||
|
// Copyright 2014 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"cloud.google.com/go/internal"
|
||||||
|
gax "github.com/googleapis/gax-go/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
// runWithRetry calls the function until it returns nil or a non-retryable error, or
|
||||||
|
// the context is done.
|
||||||
|
func runWithRetry(ctx context.Context, call func() error) error {
|
||||||
|
return internal.Retry(ctx, gax.Backoff{}, func() (stop bool, err error) {
|
||||||
|
err = call()
|
||||||
|
if err == nil {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
if shouldRetry(err) {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
return true, err
|
||||||
|
})
|
||||||
|
}
|
|
@ -0,0 +1,42 @@
|
||||||
|
// Copyright 2017 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// +build !go1.10
|
||||||
|
|
||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"google.golang.org/api/googleapi"
|
||||||
|
)
|
||||||
|
|
||||||
|
func shouldRetry(err error) bool {
|
||||||
|
switch e := err.(type) {
|
||||||
|
case *googleapi.Error:
|
||||||
|
// Retry on 429 and 5xx, according to
|
||||||
|
// https://cloud.google.com/storage/docs/exponential-backoff.
|
||||||
|
return e.Code == 429 || (e.Code >= 500 && e.Code < 600)
|
||||||
|
case *url.Error:
|
||||||
|
// Retry on REFUSED_STREAM.
|
||||||
|
// Unfortunately the error type is unexported, so we resort to string
|
||||||
|
// matching.
|
||||||
|
return strings.Contains(e.Error(), "REFUSED_STREAM")
|
||||||
|
case interface{ Temporary() bool }:
|
||||||
|
return e.Temporary()
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,188 @@
|
||||||
|
// Copyright 2017 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"regexp"
|
||||||
|
|
||||||
|
"cloud.google.com/go/internal/trace"
|
||||||
|
raw "google.golang.org/api/storage/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A Notification describes how to send Cloud PubSub messages when certain
|
||||||
|
// events occur in a bucket.
|
||||||
|
type Notification struct {
|
||||||
|
//The ID of the notification.
|
||||||
|
ID string
|
||||||
|
|
||||||
|
// The ID of the topic to which this subscription publishes.
|
||||||
|
TopicID string
|
||||||
|
|
||||||
|
// The ID of the project to which the topic belongs.
|
||||||
|
TopicProjectID string
|
||||||
|
|
||||||
|
// Only send notifications about listed event types. If empty, send notifications
|
||||||
|
// for all event types.
|
||||||
|
// See https://cloud.google.com/storage/docs/pubsub-notifications#events.
|
||||||
|
EventTypes []string
|
||||||
|
|
||||||
|
// If present, only apply this notification configuration to object names that
|
||||||
|
// begin with this prefix.
|
||||||
|
ObjectNamePrefix string
|
||||||
|
|
||||||
|
// An optional list of additional attributes to attach to each Cloud PubSub
|
||||||
|
// message published for this notification subscription.
|
||||||
|
CustomAttributes map[string]string
|
||||||
|
|
||||||
|
// The contents of the message payload.
|
||||||
|
// See https://cloud.google.com/storage/docs/pubsub-notifications#payload.
|
||||||
|
PayloadFormat string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Values for Notification.PayloadFormat.
|
||||||
|
const (
|
||||||
|
// Send no payload with notification messages.
|
||||||
|
NoPayload = "NONE"
|
||||||
|
|
||||||
|
// Send object metadata as JSON with notification messages.
|
||||||
|
JSONPayload = "JSON_API_V1"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Values for Notification.EventTypes.
|
||||||
|
const (
|
||||||
|
// Event that occurs when an object is successfully created.
|
||||||
|
ObjectFinalizeEvent = "OBJECT_FINALIZE"
|
||||||
|
|
||||||
|
// Event that occurs when the metadata of an existing object changes.
|
||||||
|
ObjectMetadataUpdateEvent = "OBJECT_METADATA_UPDATE"
|
||||||
|
|
||||||
|
// Event that occurs when an object is permanently deleted.
|
||||||
|
ObjectDeleteEvent = "OBJECT_DELETE"
|
||||||
|
|
||||||
|
// Event that occurs when the live version of an object becomes an
|
||||||
|
// archived version.
|
||||||
|
ObjectArchiveEvent = "OBJECT_ARCHIVE"
|
||||||
|
)
|
||||||
|
|
||||||
|
func toNotification(rn *raw.Notification) *Notification {
|
||||||
|
n := &Notification{
|
||||||
|
ID: rn.Id,
|
||||||
|
EventTypes: rn.EventTypes,
|
||||||
|
ObjectNamePrefix: rn.ObjectNamePrefix,
|
||||||
|
CustomAttributes: rn.CustomAttributes,
|
||||||
|
PayloadFormat: rn.PayloadFormat,
|
||||||
|
}
|
||||||
|
n.TopicProjectID, n.TopicID = parseNotificationTopic(rn.Topic)
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
var topicRE = regexp.MustCompile("^//pubsub.googleapis.com/projects/([^/]+)/topics/([^/]+)")
|
||||||
|
|
||||||
|
// parseNotificationTopic extracts the project and topic IDs from from the full
|
||||||
|
// resource name returned by the service. If the name is malformed, it returns
|
||||||
|
// "?" for both IDs.
|
||||||
|
func parseNotificationTopic(nt string) (projectID, topicID string) {
|
||||||
|
matches := topicRE.FindStringSubmatch(nt)
|
||||||
|
if matches == nil {
|
||||||
|
return "?", "?"
|
||||||
|
}
|
||||||
|
return matches[1], matches[2]
|
||||||
|
}
|
||||||
|
|
||||||
|
func toRawNotification(n *Notification) *raw.Notification {
|
||||||
|
return &raw.Notification{
|
||||||
|
Id: n.ID,
|
||||||
|
Topic: fmt.Sprintf("//pubsub.googleapis.com/projects/%s/topics/%s",
|
||||||
|
n.TopicProjectID, n.TopicID),
|
||||||
|
EventTypes: n.EventTypes,
|
||||||
|
ObjectNamePrefix: n.ObjectNamePrefix,
|
||||||
|
CustomAttributes: n.CustomAttributes,
|
||||||
|
PayloadFormat: string(n.PayloadFormat),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddNotification adds a notification to b. You must set n's TopicProjectID, TopicID
|
||||||
|
// and PayloadFormat, and must not set its ID. The other fields are all optional. The
|
||||||
|
// returned Notification's ID can be used to refer to it.
|
||||||
|
func (b *BucketHandle) AddNotification(ctx context.Context, n *Notification) (ret *Notification, err error) {
|
||||||
|
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.AddNotification")
|
||||||
|
defer func() { trace.EndSpan(ctx, err) }()
|
||||||
|
|
||||||
|
if n.ID != "" {
|
||||||
|
return nil, errors.New("storage: AddNotification: ID must not be set")
|
||||||
|
}
|
||||||
|
if n.TopicProjectID == "" {
|
||||||
|
return nil, errors.New("storage: AddNotification: missing TopicProjectID")
|
||||||
|
}
|
||||||
|
if n.TopicID == "" {
|
||||||
|
return nil, errors.New("storage: AddNotification: missing TopicID")
|
||||||
|
}
|
||||||
|
call := b.c.raw.Notifications.Insert(b.name, toRawNotification(n))
|
||||||
|
setClientHeader(call.Header())
|
||||||
|
if b.userProject != "" {
|
||||||
|
call.UserProject(b.userProject)
|
||||||
|
}
|
||||||
|
rn, err := call.Context(ctx).Do()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return toNotification(rn), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Notifications returns all the Notifications configured for this bucket, as a map
|
||||||
|
// indexed by notification ID.
|
||||||
|
func (b *BucketHandle) Notifications(ctx context.Context) (n map[string]*Notification, err error) {
|
||||||
|
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Notifications")
|
||||||
|
defer func() { trace.EndSpan(ctx, err) }()
|
||||||
|
|
||||||
|
call := b.c.raw.Notifications.List(b.name)
|
||||||
|
setClientHeader(call.Header())
|
||||||
|
if b.userProject != "" {
|
||||||
|
call.UserProject(b.userProject)
|
||||||
|
}
|
||||||
|
var res *raw.Notifications
|
||||||
|
err = runWithRetry(ctx, func() error {
|
||||||
|
res, err = call.Context(ctx).Do()
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return notificationsToMap(res.Items), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func notificationsToMap(rns []*raw.Notification) map[string]*Notification {
|
||||||
|
m := map[string]*Notification{}
|
||||||
|
for _, rn := range rns {
|
||||||
|
m[rn.Id] = toNotification(rn)
|
||||||
|
}
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteNotification deletes the notification with the given ID.
|
||||||
|
func (b *BucketHandle) DeleteNotification(ctx context.Context, id string) (err error) {
|
||||||
|
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.DeleteNotification")
|
||||||
|
defer func() { trace.EndSpan(ctx, err) }()
|
||||||
|
|
||||||
|
call := b.c.raw.Notifications.Delete(b.name, id)
|
||||||
|
setClientHeader(call.Header())
|
||||||
|
if b.userProject != "" {
|
||||||
|
call.UserProject(b.userProject)
|
||||||
|
}
|
||||||
|
return call.Context(ctx).Do()
|
||||||
|
}
|
|
@ -0,0 +1,385 @@
|
||||||
|
// Copyright 2016 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"hash/crc32"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"reflect"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"cloud.google.com/go/internal/trace"
|
||||||
|
"google.golang.org/api/googleapi"
|
||||||
|
)
|
||||||
|
|
||||||
|
var crc32cTable = crc32.MakeTable(crc32.Castagnoli)
|
||||||
|
|
||||||
|
// ReaderObjectAttrs are attributes about the object being read. These are populated
|
||||||
|
// during the New call. This struct only holds a subset of object attributes: to
|
||||||
|
// get the full set of attributes, use ObjectHandle.Attrs.
|
||||||
|
//
|
||||||
|
// Each field is read-only.
|
||||||
|
type ReaderObjectAttrs struct {
|
||||||
|
// Size is the length of the object's content.
|
||||||
|
Size int64
|
||||||
|
|
||||||
|
// ContentType is the MIME type of the object's content.
|
||||||
|
ContentType string
|
||||||
|
|
||||||
|
// ContentEncoding is the encoding of the object's content.
|
||||||
|
ContentEncoding string
|
||||||
|
|
||||||
|
// CacheControl specifies whether and for how long browser and Internet
|
||||||
|
// caches are allowed to cache your objects.
|
||||||
|
CacheControl string
|
||||||
|
|
||||||
|
// LastModified is the time that the object was last modified.
|
||||||
|
LastModified time.Time
|
||||||
|
|
||||||
|
// Generation is the generation number of the object's content.
|
||||||
|
Generation int64
|
||||||
|
|
||||||
|
// Metageneration is the version of the metadata for this object at
|
||||||
|
// this generation. This field is used for preconditions and for
|
||||||
|
// detecting changes in metadata. A metageneration number is only
|
||||||
|
// meaningful in the context of a particular generation of a
|
||||||
|
// particular object.
|
||||||
|
Metageneration int64
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewReader creates a new Reader to read the contents of the
|
||||||
|
// object.
|
||||||
|
// ErrObjectNotExist will be returned if the object is not found.
|
||||||
|
//
|
||||||
|
// The caller must call Close on the returned Reader when done reading.
|
||||||
|
func (o *ObjectHandle) NewReader(ctx context.Context) (*Reader, error) {
|
||||||
|
return o.NewRangeReader(ctx, 0, -1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewRangeReader reads part of an object, reading at most length bytes
|
||||||
|
// starting at the given offset. If length is negative, the object is read
|
||||||
|
// until the end.
|
||||||
|
func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64) (r *Reader, err error) {
|
||||||
|
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.NewRangeReader")
|
||||||
|
defer func() { trace.EndSpan(ctx, err) }()
|
||||||
|
|
||||||
|
if err := o.validate(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if offset < 0 {
|
||||||
|
return nil, fmt.Errorf("storage: invalid offset %d < 0", offset)
|
||||||
|
}
|
||||||
|
if o.conds != nil {
|
||||||
|
if err := o.conds.validate("NewRangeReader"); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
u := &url.URL{
|
||||||
|
Scheme: o.c.scheme,
|
||||||
|
Host: o.c.readHost,
|
||||||
|
Path: fmt.Sprintf("/%s/%s", o.bucket, o.object),
|
||||||
|
}
|
||||||
|
verb := "GET"
|
||||||
|
if length == 0 {
|
||||||
|
verb = "HEAD"
|
||||||
|
}
|
||||||
|
req, err := http.NewRequest(verb, u.String(), nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
req = req.WithContext(ctx)
|
||||||
|
if o.userProject != "" {
|
||||||
|
req.Header.Set("X-Goog-User-Project", o.userProject)
|
||||||
|
}
|
||||||
|
if o.readCompressed {
|
||||||
|
req.Header.Set("Accept-Encoding", "gzip")
|
||||||
|
}
|
||||||
|
if err := setEncryptionHeaders(req.Header, o.encryptionKey, false); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
gen := o.gen
|
||||||
|
|
||||||
|
// Define a function that initiates a Read with offset and length, assuming we
|
||||||
|
// have already read seen bytes.
|
||||||
|
reopen := func(seen int64) (*http.Response, error) {
|
||||||
|
start := offset + seen
|
||||||
|
if length < 0 && start > 0 {
|
||||||
|
req.Header.Set("Range", fmt.Sprintf("bytes=%d-", start))
|
||||||
|
} else if length > 0 {
|
||||||
|
// The end character isn't affected by how many bytes we've seen.
|
||||||
|
req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", start, offset+length-1))
|
||||||
|
}
|
||||||
|
// We wait to assign conditions here because the generation number can change in between reopen() runs.
|
||||||
|
req.URL.RawQuery = conditionsQuery(gen, o.conds)
|
||||||
|
var res *http.Response
|
||||||
|
err = runWithRetry(ctx, func() error {
|
||||||
|
res, err = o.c.hc.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if res.StatusCode == http.StatusNotFound {
|
||||||
|
res.Body.Close()
|
||||||
|
return ErrObjectNotExist
|
||||||
|
}
|
||||||
|
if res.StatusCode < 200 || res.StatusCode > 299 {
|
||||||
|
body, _ := ioutil.ReadAll(res.Body)
|
||||||
|
res.Body.Close()
|
||||||
|
return &googleapi.Error{
|
||||||
|
Code: res.StatusCode,
|
||||||
|
Header: res.Header,
|
||||||
|
Body: string(body),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if start > 0 && length != 0 && res.StatusCode != http.StatusPartialContent {
|
||||||
|
res.Body.Close()
|
||||||
|
return errors.New("storage: partial request not satisfied")
|
||||||
|
}
|
||||||
|
// If a generation hasn't been specified, and this is the first response we get, let's record the
|
||||||
|
// generation. In future requests we'll use this generation as a precondition to avoid data races.
|
||||||
|
if gen < 0 && res.Header.Get("X-Goog-Generation") != "" {
|
||||||
|
gen64, err := strconv.ParseInt(res.Header.Get("X-Goog-Generation"), 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
gen = gen64
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
res, err := reopen(0)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
size int64 // total size of object, even if a range was requested.
|
||||||
|
checkCRC bool
|
||||||
|
crc uint32
|
||||||
|
)
|
||||||
|
if res.StatusCode == http.StatusPartialContent {
|
||||||
|
cr := strings.TrimSpace(res.Header.Get("Content-Range"))
|
||||||
|
if !strings.HasPrefix(cr, "bytes ") || !strings.Contains(cr, "/") {
|
||||||
|
|
||||||
|
return nil, fmt.Errorf("storage: invalid Content-Range %q", cr)
|
||||||
|
}
|
||||||
|
size, err = strconv.ParseInt(cr[strings.LastIndex(cr, "/")+1:], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("storage: invalid Content-Range %q", cr)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
size = res.ContentLength
|
||||||
|
// Check the CRC iff all of the following hold:
|
||||||
|
// - We asked for content (length != 0).
|
||||||
|
// - We got all the content (status != PartialContent).
|
||||||
|
// - The server sent a CRC header.
|
||||||
|
// - The Go http stack did not uncompress the file.
|
||||||
|
// - We were not served compressed data that was uncompressed on download.
|
||||||
|
// The problem with the last two cases is that the CRC will not match -- GCS
|
||||||
|
// computes it on the compressed contents, but we compute it on the
|
||||||
|
// uncompressed contents.
|
||||||
|
if length != 0 && !res.Uncompressed && !uncompressedByServer(res) {
|
||||||
|
crc, checkCRC = parseCRC32c(res)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
remain := res.ContentLength
|
||||||
|
body := res.Body
|
||||||
|
if length == 0 {
|
||||||
|
remain = 0
|
||||||
|
body.Close()
|
||||||
|
body = emptyBody
|
||||||
|
}
|
||||||
|
var metaGen int64
|
||||||
|
if res.Header.Get("X-Goog-Generation") != "" {
|
||||||
|
metaGen, err = strconv.ParseInt(res.Header.Get("X-Goog-Metageneration"), 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var lm time.Time
|
||||||
|
if res.Header.Get("Last-Modified") != "" {
|
||||||
|
lm, err = http.ParseTime(res.Header.Get("Last-Modified"))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
attrs := ReaderObjectAttrs{
|
||||||
|
Size: size,
|
||||||
|
ContentType: res.Header.Get("Content-Type"),
|
||||||
|
ContentEncoding: res.Header.Get("Content-Encoding"),
|
||||||
|
CacheControl: res.Header.Get("Cache-Control"),
|
||||||
|
LastModified: lm,
|
||||||
|
Generation: gen,
|
||||||
|
Metageneration: metaGen,
|
||||||
|
}
|
||||||
|
return &Reader{
|
||||||
|
Attrs: attrs,
|
||||||
|
body: body,
|
||||||
|
size: size,
|
||||||
|
remain: remain,
|
||||||
|
wantCRC: crc,
|
||||||
|
checkCRC: checkCRC,
|
||||||
|
reopen: reopen,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func uncompressedByServer(res *http.Response) bool {
|
||||||
|
// If the data is stored as gzip but is not encoded as gzip, then it
|
||||||
|
// was uncompressed by the server.
|
||||||
|
return res.Header.Get("X-Goog-Stored-Content-Encoding") == "gzip" &&
|
||||||
|
res.Header.Get("Content-Encoding") != "gzip"
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseCRC32c(res *http.Response) (uint32, bool) {
|
||||||
|
const prefix = "crc32c="
|
||||||
|
for _, spec := range res.Header["X-Goog-Hash"] {
|
||||||
|
if strings.HasPrefix(spec, prefix) {
|
||||||
|
c, err := decodeUint32(spec[len(prefix):])
|
||||||
|
if err == nil {
|
||||||
|
return c, true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
|
||||||
|
var emptyBody = ioutil.NopCloser(strings.NewReader(""))
|
||||||
|
|
||||||
|
// Reader reads a Cloud Storage object.
|
||||||
|
// It implements io.Reader.
|
||||||
|
//
|
||||||
|
// Typically, a Reader computes the CRC of the downloaded content and compares it to
|
||||||
|
// the stored CRC, returning an error from Read if there is a mismatch. This integrity check
|
||||||
|
// is skipped if transcoding occurs. See https://cloud.google.com/storage/docs/transcoding.
|
||||||
|
type Reader struct {
|
||||||
|
Attrs ReaderObjectAttrs
|
||||||
|
body io.ReadCloser
|
||||||
|
seen, remain, size int64
|
||||||
|
checkCRC bool // should we check the CRC?
|
||||||
|
wantCRC uint32 // the CRC32c value the server sent in the header
|
||||||
|
gotCRC uint32 // running crc
|
||||||
|
reopen func(seen int64) (*http.Response, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close closes the Reader. It must be called when done reading.
|
||||||
|
func (r *Reader) Close() error {
|
||||||
|
return r.body.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Reader) Read(p []byte) (int, error) {
|
||||||
|
n, err := r.readWithRetry(p)
|
||||||
|
if r.remain != -1 {
|
||||||
|
r.remain -= int64(n)
|
||||||
|
}
|
||||||
|
if r.checkCRC {
|
||||||
|
r.gotCRC = crc32.Update(r.gotCRC, crc32cTable, p[:n])
|
||||||
|
// Check CRC here. It would be natural to check it in Close, but
|
||||||
|
// everybody defers Close on the assumption that it doesn't return
|
||||||
|
// anything worth looking at.
|
||||||
|
if err == io.EOF {
|
||||||
|
if r.gotCRC != r.wantCRC {
|
||||||
|
return n, fmt.Errorf("storage: bad CRC on read: got %d, want %d",
|
||||||
|
r.gotCRC, r.wantCRC)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Reader) readWithRetry(p []byte) (int, error) {
|
||||||
|
n := 0
|
||||||
|
for len(p[n:]) > 0 {
|
||||||
|
m, err := r.body.Read(p[n:])
|
||||||
|
n += m
|
||||||
|
r.seen += int64(m)
|
||||||
|
if !shouldRetryRead(err) {
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
// Read failed, but we will try again. Send a ranged read request that takes
|
||||||
|
// into account the number of bytes we've already seen.
|
||||||
|
res, err := r.reopen(r.seen)
|
||||||
|
if err != nil {
|
||||||
|
// reopen already retries
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
r.body.Close()
|
||||||
|
r.body = res.Body
|
||||||
|
}
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func shouldRetryRead(err error) bool {
|
||||||
|
if err == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return strings.HasSuffix(err.Error(), "INTERNAL_ERROR") && strings.Contains(reflect.TypeOf(err).String(), "http2")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Size returns the size of the object in bytes.
|
||||||
|
// The returned value is always the same and is not affected by
|
||||||
|
// calls to Read or Close.
|
||||||
|
//
|
||||||
|
// Deprecated: use Reader.Attrs.Size.
|
||||||
|
func (r *Reader) Size() int64 {
|
||||||
|
return r.Attrs.Size
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remain returns the number of bytes left to read, or -1 if unknown.
|
||||||
|
func (r *Reader) Remain() int64 {
|
||||||
|
return r.remain
|
||||||
|
}
|
||||||
|
|
||||||
|
// ContentType returns the content type of the object.
|
||||||
|
//
|
||||||
|
// Deprecated: use Reader.Attrs.ContentType.
|
||||||
|
func (r *Reader) ContentType() string {
|
||||||
|
return r.Attrs.ContentType
|
||||||
|
}
|
||||||
|
|
||||||
|
// ContentEncoding returns the content encoding of the object.
|
||||||
|
//
|
||||||
|
// Deprecated: use Reader.Attrs.ContentEncoding.
|
||||||
|
func (r *Reader) ContentEncoding() string {
|
||||||
|
return r.Attrs.ContentEncoding
|
||||||
|
}
|
||||||
|
|
||||||
|
// CacheControl returns the cache control of the object.
|
||||||
|
//
|
||||||
|
// Deprecated: use Reader.Attrs.CacheControl.
|
||||||
|
func (r *Reader) CacheControl() string {
|
||||||
|
return r.Attrs.CacheControl
|
||||||
|
}
|
||||||
|
|
||||||
|
// LastModified returns the value of the Last-Modified header.
|
||||||
|
//
|
||||||
|
// Deprecated: use Reader.Attrs.LastModified.
|
||||||
|
func (r *Reader) LastModified() (time.Time, error) {
|
||||||
|
return r.Attrs.LastModified, nil
|
||||||
|
}
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because one or more lines are too long
|
@ -0,0 +1,265 @@
|
||||||
|
// Copyright 2014 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/base64"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"sync"
|
||||||
|
"unicode/utf8"
|
||||||
|
|
||||||
|
"google.golang.org/api/googleapi"
|
||||||
|
raw "google.golang.org/api/storage/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A Writer writes a Cloud Storage object.
|
||||||
|
type Writer struct {
|
||||||
|
// ObjectAttrs are optional attributes to set on the object. Any attributes
|
||||||
|
// must be initialized before the first Write call. Nil or zero-valued
|
||||||
|
// attributes are ignored.
|
||||||
|
ObjectAttrs
|
||||||
|
|
||||||
|
// SendCRC specifies whether to transmit a CRC32C field. It should be set
|
||||||
|
// to true in addition to setting the Writer's CRC32C field, because zero
|
||||||
|
// is a valid CRC and normally a zero would not be transmitted.
|
||||||
|
// If a CRC32C is sent, and the data written does not match the checksum,
|
||||||
|
// the write will be rejected.
|
||||||
|
SendCRC32C bool
|
||||||
|
|
||||||
|
// ChunkSize controls the maximum number of bytes of the object that the
|
||||||
|
// Writer will attempt to send to the server in a single request. Objects
|
||||||
|
// smaller than the size will be sent in a single request, while larger
|
||||||
|
// objects will be split over multiple requests. The size will be rounded up
|
||||||
|
// to the nearest multiple of 256K. If zero, chunking will be disabled and
|
||||||
|
// the object will be uploaded in a single request.
|
||||||
|
//
|
||||||
|
// ChunkSize will default to a reasonable value. If you perform many concurrent
|
||||||
|
// writes of small objects, you may wish set ChunkSize to a value that matches
|
||||||
|
// your objects' sizes to avoid consuming large amounts of memory.
|
||||||
|
//
|
||||||
|
// ChunkSize must be set before the first Write call.
|
||||||
|
ChunkSize int
|
||||||
|
|
||||||
|
// ProgressFunc can be used to monitor the progress of a large write.
|
||||||
|
// operation. If ProgressFunc is not nil and writing requires multiple
|
||||||
|
// calls to the underlying service (see
|
||||||
|
// https://cloud.google.com/storage/docs/json_api/v1/how-tos/resumable-upload),
|
||||||
|
// then ProgressFunc will be invoked after each call with the number of bytes of
|
||||||
|
// content copied so far.
|
||||||
|
//
|
||||||
|
// ProgressFunc should return quickly without blocking.
|
||||||
|
ProgressFunc func(int64)
|
||||||
|
|
||||||
|
ctx context.Context
|
||||||
|
o *ObjectHandle
|
||||||
|
|
||||||
|
opened bool
|
||||||
|
pw *io.PipeWriter
|
||||||
|
|
||||||
|
donec chan struct{} // closed after err and obj are set.
|
||||||
|
obj *ObjectAttrs
|
||||||
|
|
||||||
|
mu sync.Mutex
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Writer) open() error {
|
||||||
|
attrs := w.ObjectAttrs
|
||||||
|
// Check the developer didn't change the object Name (this is unfortunate, but
|
||||||
|
// we don't want to store an object under the wrong name).
|
||||||
|
if attrs.Name != w.o.object {
|
||||||
|
return fmt.Errorf("storage: Writer.Name %q does not match object name %q", attrs.Name, w.o.object)
|
||||||
|
}
|
||||||
|
if !utf8.ValidString(attrs.Name) {
|
||||||
|
return fmt.Errorf("storage: object name %q is not valid UTF-8", attrs.Name)
|
||||||
|
}
|
||||||
|
if attrs.KMSKeyName != "" && w.o.encryptionKey != nil {
|
||||||
|
return errors.New("storage: cannot use KMSKeyName with a customer-supplied encryption key")
|
||||||
|
}
|
||||||
|
pr, pw := io.Pipe()
|
||||||
|
w.pw = pw
|
||||||
|
w.opened = true
|
||||||
|
|
||||||
|
go w.monitorCancel()
|
||||||
|
|
||||||
|
if w.ChunkSize < 0 {
|
||||||
|
return errors.New("storage: Writer.ChunkSize must be non-negative")
|
||||||
|
}
|
||||||
|
mediaOpts := []googleapi.MediaOption{
|
||||||
|
googleapi.ChunkSize(w.ChunkSize),
|
||||||
|
}
|
||||||
|
if c := attrs.ContentType; c != "" {
|
||||||
|
mediaOpts = append(mediaOpts, googleapi.ContentType(c))
|
||||||
|
}
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
defer close(w.donec)
|
||||||
|
|
||||||
|
rawObj := attrs.toRawObject(w.o.bucket)
|
||||||
|
if w.SendCRC32C {
|
||||||
|
rawObj.Crc32c = encodeUint32(attrs.CRC32C)
|
||||||
|
}
|
||||||
|
if w.MD5 != nil {
|
||||||
|
rawObj.Md5Hash = base64.StdEncoding.EncodeToString(w.MD5)
|
||||||
|
}
|
||||||
|
if w.o.c.envHost != "" {
|
||||||
|
w.o.c.raw.BasePath = fmt.Sprintf("%s://%s", w.o.c.scheme, w.o.c.envHost)
|
||||||
|
}
|
||||||
|
call := w.o.c.raw.Objects.Insert(w.o.bucket, rawObj).
|
||||||
|
Media(pr, mediaOpts...).
|
||||||
|
Projection("full").
|
||||||
|
Context(w.ctx)
|
||||||
|
|
||||||
|
if w.ProgressFunc != nil {
|
||||||
|
call.ProgressUpdater(func(n, _ int64) { w.ProgressFunc(n) })
|
||||||
|
}
|
||||||
|
if attrs.KMSKeyName != "" {
|
||||||
|
call.KmsKeyName(attrs.KMSKeyName)
|
||||||
|
}
|
||||||
|
if attrs.PredefinedACL != "" {
|
||||||
|
call.PredefinedAcl(attrs.PredefinedACL)
|
||||||
|
}
|
||||||
|
if err := setEncryptionHeaders(call.Header(), w.o.encryptionKey, false); err != nil {
|
||||||
|
w.mu.Lock()
|
||||||
|
w.err = err
|
||||||
|
w.mu.Unlock()
|
||||||
|
pr.CloseWithError(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
var resp *raw.Object
|
||||||
|
err := applyConds("NewWriter", w.o.gen, w.o.conds, call)
|
||||||
|
if err == nil {
|
||||||
|
if w.o.userProject != "" {
|
||||||
|
call.UserProject(w.o.userProject)
|
||||||
|
}
|
||||||
|
setClientHeader(call.Header())
|
||||||
|
// If the chunk size is zero, then no chunking is done on the Reader,
|
||||||
|
// which means we cannot retry: the first call will read the data, and if
|
||||||
|
// it fails, there is no way to re-read.
|
||||||
|
if w.ChunkSize == 0 {
|
||||||
|
resp, err = call.Do()
|
||||||
|
} else {
|
||||||
|
// We will only retry here if the initial POST, which obtains a URI for
|
||||||
|
// the resumable upload, fails with a retryable error. The upload itself
|
||||||
|
// has its own retry logic.
|
||||||
|
err = runWithRetry(w.ctx, func() error {
|
||||||
|
var err2 error
|
||||||
|
resp, err2 = call.Do()
|
||||||
|
return err2
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
w.mu.Lock()
|
||||||
|
w.err = err
|
||||||
|
w.mu.Unlock()
|
||||||
|
pr.CloseWithError(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
w.obj = newObject(resp)
|
||||||
|
}()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write appends to w. It implements the io.Writer interface.
|
||||||
|
//
|
||||||
|
// Since writes happen asynchronously, Write may return a nil
|
||||||
|
// error even though the write failed (or will fail). Always
|
||||||
|
// use the error returned from Writer.Close to determine if
|
||||||
|
// the upload was successful.
|
||||||
|
func (w *Writer) Write(p []byte) (n int, err error) {
|
||||||
|
w.mu.Lock()
|
||||||
|
werr := w.err
|
||||||
|
w.mu.Unlock()
|
||||||
|
if werr != nil {
|
||||||
|
return 0, werr
|
||||||
|
}
|
||||||
|
if !w.opened {
|
||||||
|
if err := w.open(); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
n, err = w.pw.Write(p)
|
||||||
|
if err != nil {
|
||||||
|
w.mu.Lock()
|
||||||
|
werr := w.err
|
||||||
|
w.mu.Unlock()
|
||||||
|
// Preserve existing functionality that when context is canceled, Write will return
|
||||||
|
// context.Canceled instead of "io: read/write on closed pipe". This hides the
|
||||||
|
// pipe implementation detail from users and makes Write seem as though it's an RPC.
|
||||||
|
if werr == context.Canceled || werr == context.DeadlineExceeded {
|
||||||
|
return n, werr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close completes the write operation and flushes any buffered data.
|
||||||
|
// If Close doesn't return an error, metadata about the written object
|
||||||
|
// can be retrieved by calling Attrs.
|
||||||
|
func (w *Writer) Close() error {
|
||||||
|
if !w.opened {
|
||||||
|
if err := w.open(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Closing either the read or write causes the entire pipe to close.
|
||||||
|
if err := w.pw.Close(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
<-w.donec
|
||||||
|
w.mu.Lock()
|
||||||
|
defer w.mu.Unlock()
|
||||||
|
return w.err
|
||||||
|
}
|
||||||
|
|
||||||
|
// monitorCancel is intended to be used as a background goroutine. It monitors the
|
||||||
|
// the context, and when it observes that the context has been canceled, it manually
|
||||||
|
// closes things that do not take a context.
|
||||||
|
func (w *Writer) monitorCancel() {
|
||||||
|
select {
|
||||||
|
case <-w.ctx.Done():
|
||||||
|
w.mu.Lock()
|
||||||
|
werr := w.ctx.Err()
|
||||||
|
w.err = werr
|
||||||
|
w.mu.Unlock()
|
||||||
|
|
||||||
|
// Closing either the read or write causes the entire pipe to close.
|
||||||
|
w.CloseWithError(werr)
|
||||||
|
case <-w.donec:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// CloseWithError aborts the write operation with the provided error.
|
||||||
|
// CloseWithError always returns nil.
|
||||||
|
//
|
||||||
|
// Deprecated: cancel the context passed to NewWriter instead.
|
||||||
|
func (w *Writer) CloseWithError(err error) error {
|
||||||
|
if !w.opened {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return w.pw.CloseWithError(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Attrs returns metadata about a successfully-written object.
|
||||||
|
// It's only valid to call it after Close returns nil.
|
||||||
|
func (w *Writer) Attrs() *ObjectAttrs {
|
||||||
|
return w.obj
|
||||||
|
}
|
2812
vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go
generated
vendored
Normal file
2812
vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
872
vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto
generated
vendored
Normal file
872
vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto
generated
vendored
Normal file
|
@ -0,0 +1,872 @@
|
||||||
|
// Protocol Buffers - Google's data interchange format
|
||||||
|
// Copyright 2008 Google Inc. All rights reserved.
|
||||||
|
// https://developers.google.com/protocol-buffers/
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following disclaimer
|
||||||
|
// in the documentation and/or other materials provided with the
|
||||||
|
// distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived from
|
||||||
|
// this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
// Author: kenton@google.com (Kenton Varda)
|
||||||
|
// Based on original Protocol Buffers design by
|
||||||
|
// Sanjay Ghemawat, Jeff Dean, and others.
|
||||||
|
//
|
||||||
|
// The messages in this file describe the definitions found in .proto files.
|
||||||
|
// A valid .proto file can be translated directly to a FileDescriptorProto
|
||||||
|
// without any other information (e.g. without reading its imports).
|
||||||
|
|
||||||
|
|
||||||
|
syntax = "proto2";
|
||||||
|
|
||||||
|
package google.protobuf;
|
||||||
|
option go_package = "github.com/golang/protobuf/protoc-gen-go/descriptor;descriptor";
|
||||||
|
option java_package = "com.google.protobuf";
|
||||||
|
option java_outer_classname = "DescriptorProtos";
|
||||||
|
option csharp_namespace = "Google.Protobuf.Reflection";
|
||||||
|
option objc_class_prefix = "GPB";
|
||||||
|
option cc_enable_arenas = true;
|
||||||
|
|
||||||
|
// descriptor.proto must be optimized for speed because reflection-based
|
||||||
|
// algorithms don't work during bootstrapping.
|
||||||
|
option optimize_for = SPEED;
|
||||||
|
|
||||||
|
// The protocol compiler can output a FileDescriptorSet containing the .proto
|
||||||
|
// files it parses.
|
||||||
|
message FileDescriptorSet {
|
||||||
|
repeated FileDescriptorProto file = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Describes a complete .proto file.
|
||||||
|
message FileDescriptorProto {
|
||||||
|
optional string name = 1; // file name, relative to root of source tree
|
||||||
|
optional string package = 2; // e.g. "foo", "foo.bar", etc.
|
||||||
|
|
||||||
|
// Names of files imported by this file.
|
||||||
|
repeated string dependency = 3;
|
||||||
|
// Indexes of the public imported files in the dependency list above.
|
||||||
|
repeated int32 public_dependency = 10;
|
||||||
|
// Indexes of the weak imported files in the dependency list.
|
||||||
|
// For Google-internal migration only. Do not use.
|
||||||
|
repeated int32 weak_dependency = 11;
|
||||||
|
|
||||||
|
// All top-level definitions in this file.
|
||||||
|
repeated DescriptorProto message_type = 4;
|
||||||
|
repeated EnumDescriptorProto enum_type = 5;
|
||||||
|
repeated ServiceDescriptorProto service = 6;
|
||||||
|
repeated FieldDescriptorProto extension = 7;
|
||||||
|
|
||||||
|
optional FileOptions options = 8;
|
||||||
|
|
||||||
|
// This field contains optional information about the original source code.
|
||||||
|
// You may safely remove this entire field without harming runtime
|
||||||
|
// functionality of the descriptors -- the information is needed only by
|
||||||
|
// development tools.
|
||||||
|
optional SourceCodeInfo source_code_info = 9;
|
||||||
|
|
||||||
|
// The syntax of the proto file.
|
||||||
|
// The supported values are "proto2" and "proto3".
|
||||||
|
optional string syntax = 12;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Describes a message type.
|
||||||
|
message DescriptorProto {
|
||||||
|
optional string name = 1;
|
||||||
|
|
||||||
|
repeated FieldDescriptorProto field = 2;
|
||||||
|
repeated FieldDescriptorProto extension = 6;
|
||||||
|
|
||||||
|
repeated DescriptorProto nested_type = 3;
|
||||||
|
repeated EnumDescriptorProto enum_type = 4;
|
||||||
|
|
||||||
|
message ExtensionRange {
|
||||||
|
optional int32 start = 1;
|
||||||
|
optional int32 end = 2;
|
||||||
|
|
||||||
|
optional ExtensionRangeOptions options = 3;
|
||||||
|
}
|
||||||
|
repeated ExtensionRange extension_range = 5;
|
||||||
|
|
||||||
|
repeated OneofDescriptorProto oneof_decl = 8;
|
||||||
|
|
||||||
|
optional MessageOptions options = 7;
|
||||||
|
|
||||||
|
// Range of reserved tag numbers. Reserved tag numbers may not be used by
|
||||||
|
// fields or extension ranges in the same message. Reserved ranges may
|
||||||
|
// not overlap.
|
||||||
|
message ReservedRange {
|
||||||
|
optional int32 start = 1; // Inclusive.
|
||||||
|
optional int32 end = 2; // Exclusive.
|
||||||
|
}
|
||||||
|
repeated ReservedRange reserved_range = 9;
|
||||||
|
// Reserved field names, which may not be used by fields in the same message.
|
||||||
|
// A given name may only be reserved once.
|
||||||
|
repeated string reserved_name = 10;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ExtensionRangeOptions {
|
||||||
|
// The parser stores options it doesn't recognize here. See above.
|
||||||
|
repeated UninterpretedOption uninterpreted_option = 999;
|
||||||
|
|
||||||
|
// Clients can define custom options in extensions of this message. See above.
|
||||||
|
extensions 1000 to max;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Describes a field within a message.
|
||||||
|
message FieldDescriptorProto {
|
||||||
|
enum Type {
|
||||||
|
// 0 is reserved for errors.
|
||||||
|
// Order is weird for historical reasons.
|
||||||
|
TYPE_DOUBLE = 1;
|
||||||
|
TYPE_FLOAT = 2;
|
||||||
|
// Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if
|
||||||
|
// negative values are likely.
|
||||||
|
TYPE_INT64 = 3;
|
||||||
|
TYPE_UINT64 = 4;
|
||||||
|
// Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if
|
||||||
|
// negative values are likely.
|
||||||
|
TYPE_INT32 = 5;
|
||||||
|
TYPE_FIXED64 = 6;
|
||||||
|
TYPE_FIXED32 = 7;
|
||||||
|
TYPE_BOOL = 8;
|
||||||
|
TYPE_STRING = 9;
|
||||||
|
// Tag-delimited aggregate.
|
||||||
|
// Group type is deprecated and not supported in proto3. However, Proto3
|
||||||
|
// implementations should still be able to parse the group wire format and
|
||||||
|
// treat group fields as unknown fields.
|
||||||
|
TYPE_GROUP = 10;
|
||||||
|
TYPE_MESSAGE = 11; // Length-delimited aggregate.
|
||||||
|
|
||||||
|
// New in version 2.
|
||||||
|
TYPE_BYTES = 12;
|
||||||
|
TYPE_UINT32 = 13;
|
||||||
|
TYPE_ENUM = 14;
|
||||||
|
TYPE_SFIXED32 = 15;
|
||||||
|
TYPE_SFIXED64 = 16;
|
||||||
|
TYPE_SINT32 = 17; // Uses ZigZag encoding.
|
||||||
|
TYPE_SINT64 = 18; // Uses ZigZag encoding.
|
||||||
|
};
|
||||||
|
|
||||||
|
enum Label {
|
||||||
|
// 0 is reserved for errors
|
||||||
|
LABEL_OPTIONAL = 1;
|
||||||
|
LABEL_REQUIRED = 2;
|
||||||
|
LABEL_REPEATED = 3;
|
||||||
|
};
|
||||||
|
|
||||||
|
optional string name = 1;
|
||||||
|
optional int32 number = 3;
|
||||||
|
optional Label label = 4;
|
||||||
|
|
||||||
|
// If type_name is set, this need not be set. If both this and type_name
|
||||||
|
// are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP.
|
||||||
|
optional Type type = 5;
|
||||||
|
|
||||||
|
// For message and enum types, this is the name of the type. If the name
|
||||||
|
// starts with a '.', it is fully-qualified. Otherwise, C++-like scoping
|
||||||
|
// rules are used to find the type (i.e. first the nested types within this
|
||||||
|
// message are searched, then within the parent, on up to the root
|
||||||
|
// namespace).
|
||||||
|
optional string type_name = 6;
|
||||||
|
|
||||||
|
// For extensions, this is the name of the type being extended. It is
|
||||||
|
// resolved in the same manner as type_name.
|
||||||
|
optional string extendee = 2;
|
||||||
|
|
||||||
|
// For numeric types, contains the original text representation of the value.
|
||||||
|
// For booleans, "true" or "false".
|
||||||
|
// For strings, contains the default text contents (not escaped in any way).
|
||||||
|
// For bytes, contains the C escaped value. All bytes >= 128 are escaped.
|
||||||
|
// TODO(kenton): Base-64 encode?
|
||||||
|
optional string default_value = 7;
|
||||||
|
|
||||||
|
// If set, gives the index of a oneof in the containing type's oneof_decl
|
||||||
|
// list. This field is a member of that oneof.
|
||||||
|
optional int32 oneof_index = 9;
|
||||||
|
|
||||||
|
// JSON name of this field. The value is set by protocol compiler. If the
|
||||||
|
// user has set a "json_name" option on this field, that option's value
|
||||||
|
// will be used. Otherwise, it's deduced from the field's name by converting
|
||||||
|
// it to camelCase.
|
||||||
|
optional string json_name = 10;
|
||||||
|
|
||||||
|
optional FieldOptions options = 8;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Describes a oneof.
|
||||||
|
message OneofDescriptorProto {
|
||||||
|
optional string name = 1;
|
||||||
|
optional OneofOptions options = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Describes an enum type.
|
||||||
|
message EnumDescriptorProto {
|
||||||
|
optional string name = 1;
|
||||||
|
|
||||||
|
repeated EnumValueDescriptorProto value = 2;
|
||||||
|
|
||||||
|
optional EnumOptions options = 3;
|
||||||
|
|
||||||
|
// Range of reserved numeric values. Reserved values may not be used by
|
||||||
|
// entries in the same enum. Reserved ranges may not overlap.
|
||||||
|
//
|
||||||
|
// Note that this is distinct from DescriptorProto.ReservedRange in that it
|
||||||
|
// is inclusive such that it can appropriately represent the entire int32
|
||||||
|
// domain.
|
||||||
|
message EnumReservedRange {
|
||||||
|
optional int32 start = 1; // Inclusive.
|
||||||
|
optional int32 end = 2; // Inclusive.
|
||||||
|
}
|
||||||
|
|
||||||
|
// Range of reserved numeric values. Reserved numeric values may not be used
|
||||||
|
// by enum values in the same enum declaration. Reserved ranges may not
|
||||||
|
// overlap.
|
||||||
|
repeated EnumReservedRange reserved_range = 4;
|
||||||
|
|
||||||
|
// Reserved enum value names, which may not be reused. A given name may only
|
||||||
|
// be reserved once.
|
||||||
|
repeated string reserved_name = 5;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Describes a value within an enum.
|
||||||
|
message EnumValueDescriptorProto {
|
||||||
|
optional string name = 1;
|
||||||
|
optional int32 number = 2;
|
||||||
|
|
||||||
|
optional EnumValueOptions options = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Describes a service.
|
||||||
|
message ServiceDescriptorProto {
|
||||||
|
optional string name = 1;
|
||||||
|
repeated MethodDescriptorProto method = 2;
|
||||||
|
|
||||||
|
optional ServiceOptions options = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Describes a method of a service.
|
||||||
|
message MethodDescriptorProto {
|
||||||
|
optional string name = 1;
|
||||||
|
|
||||||
|
// Input and output type names. These are resolved in the same way as
|
||||||
|
// FieldDescriptorProto.type_name, but must refer to a message type.
|
||||||
|
optional string input_type = 2;
|
||||||
|
optional string output_type = 3;
|
||||||
|
|
||||||
|
optional MethodOptions options = 4;
|
||||||
|
|
||||||
|
// Identifies if client streams multiple client messages
|
||||||
|
optional bool client_streaming = 5 [default=false];
|
||||||
|
// Identifies if server streams multiple server messages
|
||||||
|
optional bool server_streaming = 6 [default=false];
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// ===================================================================
|
||||||
|
// Options
|
||||||
|
|
||||||
|
// Each of the definitions above may have "options" attached. These are
|
||||||
|
// just annotations which may cause code to be generated slightly differently
|
||||||
|
// or may contain hints for code that manipulates protocol messages.
|
||||||
|
//
|
||||||
|
// Clients may define custom options as extensions of the *Options messages.
|
||||||
|
// These extensions may not yet be known at parsing time, so the parser cannot
|
||||||
|
// store the values in them. Instead it stores them in a field in the *Options
|
||||||
|
// message called uninterpreted_option. This field must have the same name
|
||||||
|
// across all *Options messages. We then use this field to populate the
|
||||||
|
// extensions when we build a descriptor, at which point all protos have been
|
||||||
|
// parsed and so all extensions are known.
|
||||||
|
//
|
||||||
|
// Extension numbers for custom options may be chosen as follows:
|
||||||
|
// * For options which will only be used within a single application or
|
||||||
|
// organization, or for experimental options, use field numbers 50000
|
||||||
|
// through 99999. It is up to you to ensure that you do not use the
|
||||||
|
// same number for multiple options.
|
||||||
|
// * For options which will be published and used publicly by multiple
|
||||||
|
// independent entities, e-mail protobuf-global-extension-registry@google.com
|
||||||
|
// to reserve extension numbers. Simply provide your project name (e.g.
|
||||||
|
// Objective-C plugin) and your project website (if available) -- there's no
|
||||||
|
// need to explain how you intend to use them. Usually you only need one
|
||||||
|
// extension number. You can declare multiple options with only one extension
|
||||||
|
// number by putting them in a sub-message. See the Custom Options section of
|
||||||
|
// the docs for examples:
|
||||||
|
// https://developers.google.com/protocol-buffers/docs/proto#options
|
||||||
|
// If this turns out to be popular, a web service will be set up
|
||||||
|
// to automatically assign option numbers.
|
||||||
|
|
||||||
|
|
||||||
|
message FileOptions {
|
||||||
|
|
||||||
|
// Sets the Java package where classes generated from this .proto will be
|
||||||
|
// placed. By default, the proto package is used, but this is often
|
||||||
|
// inappropriate because proto packages do not normally start with backwards
|
||||||
|
// domain names.
|
||||||
|
optional string java_package = 1;
|
||||||
|
|
||||||
|
|
||||||
|
// If set, all the classes from the .proto file are wrapped in a single
|
||||||
|
// outer class with the given name. This applies to both Proto1
|
||||||
|
// (equivalent to the old "--one_java_file" option) and Proto2 (where
|
||||||
|
// a .proto always translates to a single class, but you may want to
|
||||||
|
// explicitly choose the class name).
|
||||||
|
optional string java_outer_classname = 8;
|
||||||
|
|
||||||
|
// If set true, then the Java code generator will generate a separate .java
|
||||||
|
// file for each top-level message, enum, and service defined in the .proto
|
||||||
|
// file. Thus, these types will *not* be nested inside the outer class
|
||||||
|
// named by java_outer_classname. However, the outer class will still be
|
||||||
|
// generated to contain the file's getDescriptor() method as well as any
|
||||||
|
// top-level extensions defined in the file.
|
||||||
|
optional bool java_multiple_files = 10 [default=false];
|
||||||
|
|
||||||
|
// This option does nothing.
|
||||||
|
optional bool java_generate_equals_and_hash = 20 [deprecated=true];
|
||||||
|
|
||||||
|
// If set true, then the Java2 code generator will generate code that
|
||||||
|
// throws an exception whenever an attempt is made to assign a non-UTF-8
|
||||||
|
// byte sequence to a string field.
|
||||||
|
// Message reflection will do the same.
|
||||||
|
// However, an extension field still accepts non-UTF-8 byte sequences.
|
||||||
|
// This option has no effect on when used with the lite runtime.
|
||||||
|
optional bool java_string_check_utf8 = 27 [default=false];
|
||||||
|
|
||||||
|
|
||||||
|
// Generated classes can be optimized for speed or code size.
|
||||||
|
enum OptimizeMode {
|
||||||
|
SPEED = 1; // Generate complete code for parsing, serialization,
|
||||||
|
// etc.
|
||||||
|
CODE_SIZE = 2; // Use ReflectionOps to implement these methods.
|
||||||
|
LITE_RUNTIME = 3; // Generate code using MessageLite and the lite runtime.
|
||||||
|
}
|
||||||
|
optional OptimizeMode optimize_for = 9 [default=SPEED];
|
||||||
|
|
||||||
|
// Sets the Go package where structs generated from this .proto will be
|
||||||
|
// placed. If omitted, the Go package will be derived from the following:
|
||||||
|
// - The basename of the package import path, if provided.
|
||||||
|
// - Otherwise, the package statement in the .proto file, if present.
|
||||||
|
// - Otherwise, the basename of the .proto file, without extension.
|
||||||
|
optional string go_package = 11;
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
// Should generic services be generated in each language? "Generic" services
|
||||||
|
// are not specific to any particular RPC system. They are generated by the
|
||||||
|
// main code generators in each language (without additional plugins).
|
||||||
|
// Generic services were the only kind of service generation supported by
|
||||||
|
// early versions of google.protobuf.
|
||||||
|
//
|
||||||
|
// Generic services are now considered deprecated in favor of using plugins
|
||||||
|
// that generate code specific to your particular RPC system. Therefore,
|
||||||
|
// these default to false. Old code which depends on generic services should
|
||||||
|
// explicitly set them to true.
|
||||||
|
optional bool cc_generic_services = 16 [default=false];
|
||||||
|
optional bool java_generic_services = 17 [default=false];
|
||||||
|
optional bool py_generic_services = 18 [default=false];
|
||||||
|
optional bool php_generic_services = 42 [default=false];
|
||||||
|
|
||||||
|
// Is this file deprecated?
|
||||||
|
// Depending on the target platform, this can emit Deprecated annotations
|
||||||
|
// for everything in the file, or it will be completely ignored; in the very
|
||||||
|
// least, this is a formalization for deprecating files.
|
||||||
|
optional bool deprecated = 23 [default=false];
|
||||||
|
|
||||||
|
// Enables the use of arenas for the proto messages in this file. This applies
|
||||||
|
// only to generated classes for C++.
|
||||||
|
optional bool cc_enable_arenas = 31 [default=false];
|
||||||
|
|
||||||
|
|
||||||
|
// Sets the objective c class prefix which is prepended to all objective c
|
||||||
|
// generated classes from this .proto. There is no default.
|
||||||
|
optional string objc_class_prefix = 36;
|
||||||
|
|
||||||
|
// Namespace for generated classes; defaults to the package.
|
||||||
|
optional string csharp_namespace = 37;
|
||||||
|
|
||||||
|
// By default Swift generators will take the proto package and CamelCase it
|
||||||
|
// replacing '.' with underscore and use that to prefix the types/symbols
|
||||||
|
// defined. When this options is provided, they will use this value instead
|
||||||
|
// to prefix the types/symbols defined.
|
||||||
|
optional string swift_prefix = 39;
|
||||||
|
|
||||||
|
// Sets the php class prefix which is prepended to all php generated classes
|
||||||
|
// from this .proto. Default is empty.
|
||||||
|
optional string php_class_prefix = 40;
|
||||||
|
|
||||||
|
// Use this option to change the namespace of php generated classes. Default
|
||||||
|
// is empty. When this option is empty, the package name will be used for
|
||||||
|
// determining the namespace.
|
||||||
|
optional string php_namespace = 41;
|
||||||
|
|
||||||
|
// The parser stores options it doesn't recognize here.
|
||||||
|
// See the documentation for the "Options" section above.
|
||||||
|
repeated UninterpretedOption uninterpreted_option = 999;
|
||||||
|
|
||||||
|
// Clients can define custom options in extensions of this message.
|
||||||
|
// See the documentation for the "Options" section above.
|
||||||
|
extensions 1000 to max;
|
||||||
|
|
||||||
|
reserved 38;
|
||||||
|
}
|
||||||
|
|
||||||
|
message MessageOptions {
|
||||||
|
// Set true to use the old proto1 MessageSet wire format for extensions.
|
||||||
|
// This is provided for backwards-compatibility with the MessageSet wire
|
||||||
|
// format. You should not use this for any other reason: It's less
|
||||||
|
// efficient, has fewer features, and is more complicated.
|
||||||
|
//
|
||||||
|
// The message must be defined exactly as follows:
|
||||||
|
// message Foo {
|
||||||
|
// option message_set_wire_format = true;
|
||||||
|
// extensions 4 to max;
|
||||||
|
// }
|
||||||
|
// Note that the message cannot have any defined fields; MessageSets only
|
||||||
|
// have extensions.
|
||||||
|
//
|
||||||
|
// All extensions of your type must be singular messages; e.g. they cannot
|
||||||
|
// be int32s, enums, or repeated messages.
|
||||||
|
//
|
||||||
|
// Because this is an option, the above two restrictions are not enforced by
|
||||||
|
// the protocol compiler.
|
||||||
|
optional bool message_set_wire_format = 1 [default=false];
|
||||||
|
|
||||||
|
// Disables the generation of the standard "descriptor()" accessor, which can
|
||||||
|
// conflict with a field of the same name. This is meant to make migration
|
||||||
|
// from proto1 easier; new code should avoid fields named "descriptor".
|
||||||
|
optional bool no_standard_descriptor_accessor = 2 [default=false];
|
||||||
|
|
||||||
|
// Is this message deprecated?
|
||||||
|
// Depending on the target platform, this can emit Deprecated annotations
|
||||||
|
// for the message, or it will be completely ignored; in the very least,
|
||||||
|
// this is a formalization for deprecating messages.
|
||||||
|
optional bool deprecated = 3 [default=false];
|
||||||
|
|
||||||
|
// Whether the message is an automatically generated map entry type for the
|
||||||
|
// maps field.
|
||||||
|
//
|
||||||
|
// For maps fields:
|
||||||
|
// map<KeyType, ValueType> map_field = 1;
|
||||||
|
// The parsed descriptor looks like:
|
||||||
|
// message MapFieldEntry {
|
||||||
|
// option map_entry = true;
|
||||||
|
// optional KeyType key = 1;
|
||||||
|
// optional ValueType value = 2;
|
||||||
|
// }
|
||||||
|
// repeated MapFieldEntry map_field = 1;
|
||||||
|
//
|
||||||
|
// Implementations may choose not to generate the map_entry=true message, but
|
||||||
|
// use a native map in the target language to hold the keys and values.
|
||||||
|
// The reflection APIs in such implementions still need to work as
|
||||||
|
// if the field is a repeated message field.
|
||||||
|
//
|
||||||
|
// NOTE: Do not set the option in .proto files. Always use the maps syntax
|
||||||
|
// instead. The option should only be implicitly set by the proto compiler
|
||||||
|
// parser.
|
||||||
|
optional bool map_entry = 7;
|
||||||
|
|
||||||
|
reserved 8; // javalite_serializable
|
||||||
|
reserved 9; // javanano_as_lite
|
||||||
|
|
||||||
|
// The parser stores options it doesn't recognize here. See above.
|
||||||
|
repeated UninterpretedOption uninterpreted_option = 999;
|
||||||
|
|
||||||
|
// Clients can define custom options in extensions of this message. See above.
|
||||||
|
extensions 1000 to max;
|
||||||
|
}
|
||||||
|
|
||||||
|
message FieldOptions {
|
||||||
|
// The ctype option instructs the C++ code generator to use a different
|
||||||
|
// representation of the field than it normally would. See the specific
|
||||||
|
// options below. This option is not yet implemented in the open source
|
||||||
|
// release -- sorry, we'll try to include it in a future version!
|
||||||
|
optional CType ctype = 1 [default = STRING];
|
||||||
|
enum CType {
|
||||||
|
// Default mode.
|
||||||
|
STRING = 0;
|
||||||
|
|
||||||
|
CORD = 1;
|
||||||
|
|
||||||
|
STRING_PIECE = 2;
|
||||||
|
}
|
||||||
|
// The packed option can be enabled for repeated primitive fields to enable
|
||||||
|
// a more efficient representation on the wire. Rather than repeatedly
|
||||||
|
// writing the tag and type for each element, the entire array is encoded as
|
||||||
|
// a single length-delimited blob. In proto3, only explicit setting it to
|
||||||
|
// false will avoid using packed encoding.
|
||||||
|
optional bool packed = 2;
|
||||||
|
|
||||||
|
// The jstype option determines the JavaScript type used for values of the
|
||||||
|
// field. The option is permitted only for 64 bit integral and fixed types
|
||||||
|
// (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING
|
||||||
|
// is represented as JavaScript string, which avoids loss of precision that
|
||||||
|
// can happen when a large value is converted to a floating point JavaScript.
|
||||||
|
// Specifying JS_NUMBER for the jstype causes the generated JavaScript code to
|
||||||
|
// use the JavaScript "number" type. The behavior of the default option
|
||||||
|
// JS_NORMAL is implementation dependent.
|
||||||
|
//
|
||||||
|
// This option is an enum to permit additional types to be added, e.g.
|
||||||
|
// goog.math.Integer.
|
||||||
|
optional JSType jstype = 6 [default = JS_NORMAL];
|
||||||
|
enum JSType {
|
||||||
|
// Use the default type.
|
||||||
|
JS_NORMAL = 0;
|
||||||
|
|
||||||
|
// Use JavaScript strings.
|
||||||
|
JS_STRING = 1;
|
||||||
|
|
||||||
|
// Use JavaScript numbers.
|
||||||
|
JS_NUMBER = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Should this field be parsed lazily? Lazy applies only to message-type
|
||||||
|
// fields. It means that when the outer message is initially parsed, the
|
||||||
|
// inner message's contents will not be parsed but instead stored in encoded
|
||||||
|
// form. The inner message will actually be parsed when it is first accessed.
|
||||||
|
//
|
||||||
|
// This is only a hint. Implementations are free to choose whether to use
|
||||||
|
// eager or lazy parsing regardless of the value of this option. However,
|
||||||
|
// setting this option true suggests that the protocol author believes that
|
||||||
|
// using lazy parsing on this field is worth the additional bookkeeping
|
||||||
|
// overhead typically needed to implement it.
|
||||||
|
//
|
||||||
|
// This option does not affect the public interface of any generated code;
|
||||||
|
// all method signatures remain the same. Furthermore, thread-safety of the
|
||||||
|
// interface is not affected by this option; const methods remain safe to
|
||||||
|
// call from multiple threads concurrently, while non-const methods continue
|
||||||
|
// to require exclusive access.
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// Note that implementations may choose not to check required fields within
|
||||||
|
// a lazy sub-message. That is, calling IsInitialized() on the outer message
|
||||||
|
// may return true even if the inner message has missing required fields.
|
||||||
|
// This is necessary because otherwise the inner message would have to be
|
||||||
|
// parsed in order to perform the check, defeating the purpose of lazy
|
||||||
|
// parsing. An implementation which chooses not to check required fields
|
||||||
|
// must be consistent about it. That is, for any particular sub-message, the
|
||||||
|
// implementation must either *always* check its required fields, or *never*
|
||||||
|
// check its required fields, regardless of whether or not the message has
|
||||||
|
// been parsed.
|
||||||
|
optional bool lazy = 5 [default=false];
|
||||||
|
|
||||||
|
// Is this field deprecated?
|
||||||
|
// Depending on the target platform, this can emit Deprecated annotations
|
||||||
|
// for accessors, or it will be completely ignored; in the very least, this
|
||||||
|
// is a formalization for deprecating fields.
|
||||||
|
optional bool deprecated = 3 [default=false];
|
||||||
|
|
||||||
|
// For Google-internal migration only. Do not use.
|
||||||
|
optional bool weak = 10 [default=false];
|
||||||
|
|
||||||
|
|
||||||
|
// The parser stores options it doesn't recognize here. See above.
|
||||||
|
repeated UninterpretedOption uninterpreted_option = 999;
|
||||||
|
|
||||||
|
// Clients can define custom options in extensions of this message. See above.
|
||||||
|
extensions 1000 to max;
|
||||||
|
|
||||||
|
reserved 4; // removed jtype
|
||||||
|
}
|
||||||
|
|
||||||
|
message OneofOptions {
|
||||||
|
// The parser stores options it doesn't recognize here. See above.
|
||||||
|
repeated UninterpretedOption uninterpreted_option = 999;
|
||||||
|
|
||||||
|
// Clients can define custom options in extensions of this message. See above.
|
||||||
|
extensions 1000 to max;
|
||||||
|
}
|
||||||
|
|
||||||
|
message EnumOptions {
|
||||||
|
|
||||||
|
// Set this option to true to allow mapping different tag names to the same
|
||||||
|
// value.
|
||||||
|
optional bool allow_alias = 2;
|
||||||
|
|
||||||
|
// Is this enum deprecated?
|
||||||
|
// Depending on the target platform, this can emit Deprecated annotations
|
||||||
|
// for the enum, or it will be completely ignored; in the very least, this
|
||||||
|
// is a formalization for deprecating enums.
|
||||||
|
optional bool deprecated = 3 [default=false];
|
||||||
|
|
||||||
|
reserved 5; // javanano_as_lite
|
||||||
|
|
||||||
|
// The parser stores options it doesn't recognize here. See above.
|
||||||
|
repeated UninterpretedOption uninterpreted_option = 999;
|
||||||
|
|
||||||
|
// Clients can define custom options in extensions of this message. See above.
|
||||||
|
extensions 1000 to max;
|
||||||
|
}
|
||||||
|
|
||||||
|
message EnumValueOptions {
|
||||||
|
// Is this enum value deprecated?
|
||||||
|
// Depending on the target platform, this can emit Deprecated annotations
|
||||||
|
// for the enum value, or it will be completely ignored; in the very least,
|
||||||
|
// this is a formalization for deprecating enum values.
|
||||||
|
optional bool deprecated = 1 [default=false];
|
||||||
|
|
||||||
|
// The parser stores options it doesn't recognize here. See above.
|
||||||
|
repeated UninterpretedOption uninterpreted_option = 999;
|
||||||
|
|
||||||
|
// Clients can define custom options in extensions of this message. See above.
|
||||||
|
extensions 1000 to max;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ServiceOptions {
|
||||||
|
|
||||||
|
// Note: Field numbers 1 through 32 are reserved for Google's internal RPC
|
||||||
|
// framework. We apologize for hoarding these numbers to ourselves, but
|
||||||
|
// we were already using them long before we decided to release Protocol
|
||||||
|
// Buffers.
|
||||||
|
|
||||||
|
// Is this service deprecated?
|
||||||
|
// Depending on the target platform, this can emit Deprecated annotations
|
||||||
|
// for the service, or it will be completely ignored; in the very least,
|
||||||
|
// this is a formalization for deprecating services.
|
||||||
|
optional bool deprecated = 33 [default=false];
|
||||||
|
|
||||||
|
// The parser stores options it doesn't recognize here. See above.
|
||||||
|
repeated UninterpretedOption uninterpreted_option = 999;
|
||||||
|
|
||||||
|
// Clients can define custom options in extensions of this message. See above.
|
||||||
|
extensions 1000 to max;
|
||||||
|
}
|
||||||
|
|
||||||
|
message MethodOptions {
|
||||||
|
|
||||||
|
// Note: Field numbers 1 through 32 are reserved for Google's internal RPC
|
||||||
|
// framework. We apologize for hoarding these numbers to ourselves, but
|
||||||
|
// we were already using them long before we decided to release Protocol
|
||||||
|
// Buffers.
|
||||||
|
|
||||||
|
// Is this method deprecated?
|
||||||
|
// Depending on the target platform, this can emit Deprecated annotations
|
||||||
|
// for the method, or it will be completely ignored; in the very least,
|
||||||
|
// this is a formalization for deprecating methods.
|
||||||
|
optional bool deprecated = 33 [default=false];
|
||||||
|
|
||||||
|
// Is this method side-effect-free (or safe in HTTP parlance), or idempotent,
|
||||||
|
// or neither? HTTP based RPC implementation may choose GET verb for safe
|
||||||
|
// methods, and PUT verb for idempotent methods instead of the default POST.
|
||||||
|
enum IdempotencyLevel {
|
||||||
|
IDEMPOTENCY_UNKNOWN = 0;
|
||||||
|
NO_SIDE_EFFECTS = 1; // implies idempotent
|
||||||
|
IDEMPOTENT = 2; // idempotent, but may have side effects
|
||||||
|
}
|
||||||
|
optional IdempotencyLevel idempotency_level =
|
||||||
|
34 [default=IDEMPOTENCY_UNKNOWN];
|
||||||
|
|
||||||
|
// The parser stores options it doesn't recognize here. See above.
|
||||||
|
repeated UninterpretedOption uninterpreted_option = 999;
|
||||||
|
|
||||||
|
// Clients can define custom options in extensions of this message. See above.
|
||||||
|
extensions 1000 to max;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// A message representing a option the parser does not recognize. This only
|
||||||
|
// appears in options protos created by the compiler::Parser class.
|
||||||
|
// DescriptorPool resolves these when building Descriptor objects. Therefore,
|
||||||
|
// options protos in descriptor objects (e.g. returned by Descriptor::options(),
|
||||||
|
// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions
|
||||||
|
// in them.
|
||||||
|
message UninterpretedOption {
|
||||||
|
// The name of the uninterpreted option. Each string represents a segment in
|
||||||
|
// a dot-separated name. is_extension is true iff a segment represents an
|
||||||
|
// extension (denoted with parentheses in options specs in .proto files).
|
||||||
|
// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents
|
||||||
|
// "foo.(bar.baz).qux".
|
||||||
|
message NamePart {
|
||||||
|
required string name_part = 1;
|
||||||
|
required bool is_extension = 2;
|
||||||
|
}
|
||||||
|
repeated NamePart name = 2;
|
||||||
|
|
||||||
|
// The value of the uninterpreted option, in whatever type the tokenizer
|
||||||
|
// identified it as during parsing. Exactly one of these should be set.
|
||||||
|
optional string identifier_value = 3;
|
||||||
|
optional uint64 positive_int_value = 4;
|
||||||
|
optional int64 negative_int_value = 5;
|
||||||
|
optional double double_value = 6;
|
||||||
|
optional bytes string_value = 7;
|
||||||
|
optional string aggregate_value = 8;
|
||||||
|
}
|
||||||
|
|
||||||
|
// ===================================================================
|
||||||
|
// Optional source code info
|
||||||
|
|
||||||
|
// Encapsulates information about the original source file from which a
|
||||||
|
// FileDescriptorProto was generated.
|
||||||
|
message SourceCodeInfo {
|
||||||
|
// A Location identifies a piece of source code in a .proto file which
|
||||||
|
// corresponds to a particular definition. This information is intended
|
||||||
|
// to be useful to IDEs, code indexers, documentation generators, and similar
|
||||||
|
// tools.
|
||||||
|
//
|
||||||
|
// For example, say we have a file like:
|
||||||
|
// message Foo {
|
||||||
|
// optional string foo = 1;
|
||||||
|
// }
|
||||||
|
// Let's look at just the field definition:
|
||||||
|
// optional string foo = 1;
|
||||||
|
// ^ ^^ ^^ ^ ^^^
|
||||||
|
// a bc de f ghi
|
||||||
|
// We have the following locations:
|
||||||
|
// span path represents
|
||||||
|
// [a,i) [ 4, 0, 2, 0 ] The whole field definition.
|
||||||
|
// [a,b) [ 4, 0, 2, 0, 4 ] The label (optional).
|
||||||
|
// [c,d) [ 4, 0, 2, 0, 5 ] The type (string).
|
||||||
|
// [e,f) [ 4, 0, 2, 0, 1 ] The name (foo).
|
||||||
|
// [g,h) [ 4, 0, 2, 0, 3 ] The number (1).
|
||||||
|
//
|
||||||
|
// Notes:
|
||||||
|
// - A location may refer to a repeated field itself (i.e. not to any
|
||||||
|
// particular index within it). This is used whenever a set of elements are
|
||||||
|
// logically enclosed in a single code segment. For example, an entire
|
||||||
|
// extend block (possibly containing multiple extension definitions) will
|
||||||
|
// have an outer location whose path refers to the "extensions" repeated
|
||||||
|
// field without an index.
|
||||||
|
// - Multiple locations may have the same path. This happens when a single
|
||||||
|
// logical declaration is spread out across multiple places. The most
|
||||||
|
// obvious example is the "extend" block again -- there may be multiple
|
||||||
|
// extend blocks in the same scope, each of which will have the same path.
|
||||||
|
// - A location's span is not always a subset of its parent's span. For
|
||||||
|
// example, the "extendee" of an extension declaration appears at the
|
||||||
|
// beginning of the "extend" block and is shared by all extensions within
|
||||||
|
// the block.
|
||||||
|
// - Just because a location's span is a subset of some other location's span
|
||||||
|
// does not mean that it is a descendent. For example, a "group" defines
|
||||||
|
// both a type and a field in a single declaration. Thus, the locations
|
||||||
|
// corresponding to the type and field and their components will overlap.
|
||||||
|
// - Code which tries to interpret locations should probably be designed to
|
||||||
|
// ignore those that it doesn't understand, as more types of locations could
|
||||||
|
// be recorded in the future.
|
||||||
|
repeated Location location = 1;
|
||||||
|
message Location {
|
||||||
|
// Identifies which part of the FileDescriptorProto was defined at this
|
||||||
|
// location.
|
||||||
|
//
|
||||||
|
// Each element is a field number or an index. They form a path from
|
||||||
|
// the root FileDescriptorProto to the place where the definition. For
|
||||||
|
// example, this path:
|
||||||
|
// [ 4, 3, 2, 7, 1 ]
|
||||||
|
// refers to:
|
||||||
|
// file.message_type(3) // 4, 3
|
||||||
|
// .field(7) // 2, 7
|
||||||
|
// .name() // 1
|
||||||
|
// This is because FileDescriptorProto.message_type has field number 4:
|
||||||
|
// repeated DescriptorProto message_type = 4;
|
||||||
|
// and DescriptorProto.field has field number 2:
|
||||||
|
// repeated FieldDescriptorProto field = 2;
|
||||||
|
// and FieldDescriptorProto.name has field number 1:
|
||||||
|
// optional string name = 1;
|
||||||
|
//
|
||||||
|
// Thus, the above path gives the location of a field name. If we removed
|
||||||
|
// the last element:
|
||||||
|
// [ 4, 3, 2, 7 ]
|
||||||
|
// this path refers to the whole field declaration (from the beginning
|
||||||
|
// of the label to the terminating semicolon).
|
||||||
|
repeated int32 path = 1 [packed=true];
|
||||||
|
|
||||||
|
// Always has exactly three or four elements: start line, start column,
|
||||||
|
// end line (optional, otherwise assumed same as start line), end column.
|
||||||
|
// These are packed into a single field for efficiency. Note that line
|
||||||
|
// and column numbers are zero-based -- typically you will want to add
|
||||||
|
// 1 to each before displaying to a user.
|
||||||
|
repeated int32 span = 2 [packed=true];
|
||||||
|
|
||||||
|
// If this SourceCodeInfo represents a complete declaration, these are any
|
||||||
|
// comments appearing before and after the declaration which appear to be
|
||||||
|
// attached to the declaration.
|
||||||
|
//
|
||||||
|
// A series of line comments appearing on consecutive lines, with no other
|
||||||
|
// tokens appearing on those lines, will be treated as a single comment.
|
||||||
|
//
|
||||||
|
// leading_detached_comments will keep paragraphs of comments that appear
|
||||||
|
// before (but not connected to) the current element. Each paragraph,
|
||||||
|
// separated by empty lines, will be one comment element in the repeated
|
||||||
|
// field.
|
||||||
|
//
|
||||||
|
// Only the comment content is provided; comment markers (e.g. //) are
|
||||||
|
// stripped out. For block comments, leading whitespace and an asterisk
|
||||||
|
// will be stripped from the beginning of each line other than the first.
|
||||||
|
// Newlines are included in the output.
|
||||||
|
//
|
||||||
|
// Examples:
|
||||||
|
//
|
||||||
|
// optional int32 foo = 1; // Comment attached to foo.
|
||||||
|
// // Comment attached to bar.
|
||||||
|
// optional int32 bar = 2;
|
||||||
|
//
|
||||||
|
// optional string baz = 3;
|
||||||
|
// // Comment attached to baz.
|
||||||
|
// // Another line attached to baz.
|
||||||
|
//
|
||||||
|
// // Comment attached to qux.
|
||||||
|
// //
|
||||||
|
// // Another line attached to qux.
|
||||||
|
// optional double qux = 4;
|
||||||
|
//
|
||||||
|
// // Detached comment for corge. This is not leading or trailing comments
|
||||||
|
// // to qux or corge because there are blank lines separating it from
|
||||||
|
// // both.
|
||||||
|
//
|
||||||
|
// // Detached comment for corge paragraph 2.
|
||||||
|
//
|
||||||
|
// optional string corge = 5;
|
||||||
|
// /* Block comment attached
|
||||||
|
// * to corge. Leading asterisks
|
||||||
|
// * will be removed. */
|
||||||
|
// /* Block comment attached to
|
||||||
|
// * grault. */
|
||||||
|
// optional int32 grault = 6;
|
||||||
|
//
|
||||||
|
// // ignored detached comments.
|
||||||
|
optional string leading_comments = 3;
|
||||||
|
optional string trailing_comments = 4;
|
||||||
|
repeated string leading_detached_comments = 6;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Describes the relationship between generated code and its original source
|
||||||
|
// file. A GeneratedCodeInfo message is associated with only one generated
|
||||||
|
// source file, but may contain references to different source .proto files.
|
||||||
|
message GeneratedCodeInfo {
|
||||||
|
// An Annotation connects some span of text in generated code to an element
|
||||||
|
// of its generating .proto file.
|
||||||
|
repeated Annotation annotation = 1;
|
||||||
|
message Annotation {
|
||||||
|
// Identifies the element in the original source .proto file. This field
|
||||||
|
// is formatted the same as SourceCodeInfo.Location.path.
|
||||||
|
repeated int32 path = 1 [packed=true];
|
||||||
|
|
||||||
|
// Identifies the filesystem path to the original source .proto.
|
||||||
|
optional string source_file = 2;
|
||||||
|
|
||||||
|
// Identifies the starting offset in bytes in the generated code
|
||||||
|
// that relates to the identified object.
|
||||||
|
optional int32 begin = 3;
|
||||||
|
|
||||||
|
// Identifies the ending offset in bytes in the generated code that
|
||||||
|
// relates to the identified offset. The end offset should be one past
|
||||||
|
// the last relevant byte (so the length of the text = end - begin).
|
||||||
|
optional int32 end = 4;
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,27 @@
|
||||||
|
Copyright 2016, Google Inc.
|
||||||
|
All rights reserved.
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are
|
||||||
|
met:
|
||||||
|
|
||||||
|
* Redistributions of source code must retain the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer.
|
||||||
|
* Redistributions in binary form must reproduce the above
|
||||||
|
copyright notice, this list of conditions and the following disclaimer
|
||||||
|
in the documentation and/or other materials provided with the
|
||||||
|
distribution.
|
||||||
|
* Neither the name of Google Inc. nor the names of its
|
||||||
|
contributors may be used to endorse or promote products derived from
|
||||||
|
this software without specific prior written permission.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
@ -0,0 +1,161 @@
|
||||||
|
// Copyright 2016, Google Inc.
|
||||||
|
// All rights reserved.
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following disclaimer
|
||||||
|
// in the documentation and/or other materials provided with the
|
||||||
|
// distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived from
|
||||||
|
// this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
package gax
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math/rand"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
|
"google.golang.org/grpc/status"
|
||||||
|
)
|
||||||
|
|
||||||
|
// CallOption is an option used by Invoke to control behaviors of RPC calls.
|
||||||
|
// CallOption works by modifying relevant fields of CallSettings.
|
||||||
|
type CallOption interface {
|
||||||
|
// Resolve applies the option by modifying cs.
|
||||||
|
Resolve(cs *CallSettings)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Retryer is used by Invoke to determine retry behavior.
|
||||||
|
type Retryer interface {
|
||||||
|
// Retry reports whether a request should be retriedand how long to pause before retrying
|
||||||
|
// if the previous attempt returned with err. Invoke never calls Retry with nil error.
|
||||||
|
Retry(err error) (pause time.Duration, shouldRetry bool)
|
||||||
|
}
|
||||||
|
|
||||||
|
type retryerOption func() Retryer
|
||||||
|
|
||||||
|
func (o retryerOption) Resolve(s *CallSettings) {
|
||||||
|
s.Retry = o
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithRetry sets CallSettings.Retry to fn.
|
||||||
|
func WithRetry(fn func() Retryer) CallOption {
|
||||||
|
return retryerOption(fn)
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnCodes returns a Retryer that retries if and only if
|
||||||
|
// the previous attempt returns a GRPC error whose error code is stored in cc.
|
||||||
|
// Pause times between retries are specified by bo.
|
||||||
|
//
|
||||||
|
// bo is only used for its parameters; each Retryer has its own copy.
|
||||||
|
func OnCodes(cc []codes.Code, bo Backoff) Retryer {
|
||||||
|
return &boRetryer{
|
||||||
|
backoff: bo,
|
||||||
|
codes: append([]codes.Code(nil), cc...),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type boRetryer struct {
|
||||||
|
backoff Backoff
|
||||||
|
codes []codes.Code
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *boRetryer) Retry(err error) (time.Duration, bool) {
|
||||||
|
st, ok := status.FromError(err)
|
||||||
|
if !ok {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
c := st.Code()
|
||||||
|
for _, rc := range r.codes {
|
||||||
|
if c == rc {
|
||||||
|
return r.backoff.Pause(), true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Backoff implements exponential backoff.
|
||||||
|
// The wait time between retries is a random value between 0 and the "retry envelope".
|
||||||
|
// The envelope starts at Initial and increases by the factor of Multiplier every retry,
|
||||||
|
// but is capped at Max.
|
||||||
|
type Backoff struct {
|
||||||
|
// Initial is the initial value of the retry envelope, defaults to 1 second.
|
||||||
|
Initial time.Duration
|
||||||
|
|
||||||
|
// Max is the maximum value of the retry envelope, defaults to 30 seconds.
|
||||||
|
Max time.Duration
|
||||||
|
|
||||||
|
// Multiplier is the factor by which the retry envelope increases.
|
||||||
|
// It should be greater than 1 and defaults to 2.
|
||||||
|
Multiplier float64
|
||||||
|
|
||||||
|
// cur is the current retry envelope
|
||||||
|
cur time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pause returns the next time.Duration that the caller should use to backoff.
|
||||||
|
func (bo *Backoff) Pause() time.Duration {
|
||||||
|
if bo.Initial == 0 {
|
||||||
|
bo.Initial = time.Second
|
||||||
|
}
|
||||||
|
if bo.cur == 0 {
|
||||||
|
bo.cur = bo.Initial
|
||||||
|
}
|
||||||
|
if bo.Max == 0 {
|
||||||
|
bo.Max = 30 * time.Second
|
||||||
|
}
|
||||||
|
if bo.Multiplier < 1 {
|
||||||
|
bo.Multiplier = 2
|
||||||
|
}
|
||||||
|
// Select a duration between 1ns and the current max. It might seem
|
||||||
|
// counterintuitive to have so much jitter, but
|
||||||
|
// https://www.awsarchitectureblog.com/2015/03/backoff.html argues that
|
||||||
|
// that is the best strategy.
|
||||||
|
d := time.Duration(1 + rand.Int63n(int64(bo.cur)))
|
||||||
|
bo.cur = time.Duration(float64(bo.cur) * bo.Multiplier)
|
||||||
|
if bo.cur > bo.Max {
|
||||||
|
bo.cur = bo.Max
|
||||||
|
}
|
||||||
|
return d
|
||||||
|
}
|
||||||
|
|
||||||
|
type grpcOpt []grpc.CallOption
|
||||||
|
|
||||||
|
func (o grpcOpt) Resolve(s *CallSettings) {
|
||||||
|
s.GRPC = o
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithGRPCOptions allows passing gRPC call options during client creation.
|
||||||
|
func WithGRPCOptions(opt ...grpc.CallOption) CallOption {
|
||||||
|
return grpcOpt(append([]grpc.CallOption(nil), opt...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CallSettings allow fine-grained control over how calls are made.
|
||||||
|
type CallSettings struct {
|
||||||
|
// Retry returns a Retryer to be used to control retry logic of a method call.
|
||||||
|
// If Retry is nil or the returned Retryer is nil, the call will not be retried.
|
||||||
|
Retry func() Retryer
|
||||||
|
|
||||||
|
// CallOptions to be forwarded to GRPC.
|
||||||
|
GRPC []grpc.CallOption
|
||||||
|
}
|
|
@ -0,0 +1,39 @@
|
||||||
|
// Copyright 2016, Google Inc.
|
||||||
|
// All rights reserved.
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following disclaimer
|
||||||
|
// in the documentation and/or other materials provided with the
|
||||||
|
// distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived from
|
||||||
|
// this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
// Package gax contains a set of modules which aid the development of APIs
|
||||||
|
// for clients and servers based on gRPC and Google API conventions.
|
||||||
|
//
|
||||||
|
// Application code will rarely need to use this library directly.
|
||||||
|
// However, code generated automatically from API definition files can use it
|
||||||
|
// to simplify code generation and to provide more convenient and idiomatic API surfaces.
|
||||||
|
package gax
|
||||||
|
|
||||||
|
// Version specifies the gax-go version being used.
|
||||||
|
const Version = "2.0.4"
|
|
@ -0,0 +1,3 @@
|
||||||
|
module github.com/googleapis/gax-go/v2
|
||||||
|
|
||||||
|
require google.golang.org/grpc v1.19.0
|
|
@ -0,0 +1,25 @@
|
||||||
|
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||||
|
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||||
|
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||||
|
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
|
||||||
|
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||||
|
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||||
|
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
|
||||||
|
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
|
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||||
|
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d h1:g9qWBGx4puODJTMVyoPrpoxPFgVGd+z1DZwjfRu4d0I=
|
||||||
|
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
|
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA=
|
||||||
|
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522 h1:Ve1ORMCxvRmSXBwJK+t3Oy+V2vRW2OetUQBq4rJIkZE=
|
||||||
|
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
||||||
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
|
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
|
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||||
|
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc=
|
||||||
|
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||||
|
google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8=
|
||||||
|
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||||
|
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
|
@ -0,0 +1,53 @@
|
||||||
|
// Copyright 2018, Google Inc.
|
||||||
|
// All rights reserved.
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following disclaimer
|
||||||
|
// in the documentation and/or other materials provided with the
|
||||||
|
// distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived from
|
||||||
|
// this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
package gax
|
||||||
|
|
||||||
|
import "bytes"
|
||||||
|
|
||||||
|
// XGoogHeader is for use by the Google Cloud Libraries only.
|
||||||
|
//
|
||||||
|
// XGoogHeader formats key-value pairs.
|
||||||
|
// The resulting string is suitable for x-goog-api-client header.
|
||||||
|
func XGoogHeader(keyval ...string) string {
|
||||||
|
if len(keyval) == 0 {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
if len(keyval)%2 != 0 {
|
||||||
|
panic("gax.Header: odd argument count")
|
||||||
|
}
|
||||||
|
var buf bytes.Buffer
|
||||||
|
for i := 0; i < len(keyval); i += 2 {
|
||||||
|
buf.WriteByte(' ')
|
||||||
|
buf.WriteString(keyval[i])
|
||||||
|
buf.WriteByte('/')
|
||||||
|
buf.WriteString(keyval[i+1])
|
||||||
|
}
|
||||||
|
return buf.String()[1:]
|
||||||
|
}
|
|
@ -0,0 +1,99 @@
|
||||||
|
// Copyright 2016, Google Inc.
|
||||||
|
// All rights reserved.
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following disclaimer
|
||||||
|
// in the documentation and/or other materials provided with the
|
||||||
|
// distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived from
|
||||||
|
// this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
package gax
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// APICall is a user defined call stub.
|
||||||
|
type APICall func(context.Context, CallSettings) error
|
||||||
|
|
||||||
|
// Invoke calls the given APICall,
|
||||||
|
// performing retries as specified by opts, if any.
|
||||||
|
func Invoke(ctx context.Context, call APICall, opts ...CallOption) error {
|
||||||
|
var settings CallSettings
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt.Resolve(&settings)
|
||||||
|
}
|
||||||
|
return invoke(ctx, call, settings, Sleep)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sleep is similar to time.Sleep, but it can be interrupted by ctx.Done() closing.
|
||||||
|
// If interrupted, Sleep returns ctx.Err().
|
||||||
|
func Sleep(ctx context.Context, d time.Duration) error {
|
||||||
|
t := time.NewTimer(d)
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
t.Stop()
|
||||||
|
return ctx.Err()
|
||||||
|
case <-t.C:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type sleeper func(ctx context.Context, d time.Duration) error
|
||||||
|
|
||||||
|
// invoke implements Invoke, taking an additional sleeper argument for testing.
|
||||||
|
func invoke(ctx context.Context, call APICall, settings CallSettings, sp sleeper) error {
|
||||||
|
var retryer Retryer
|
||||||
|
for {
|
||||||
|
err := call(ctx, settings)
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if settings.Retry == nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Never retry permanent certificate errors. (e.x. if ca-certificates
|
||||||
|
// are not installed). We should only make very few, targeted
|
||||||
|
// exceptions: many (other) status=Unavailable should be retried, such
|
||||||
|
// as if there's a network hiccup, or the internet goes out for a
|
||||||
|
// minute. This is also why here we are doing string parsing instead of
|
||||||
|
// simply making Unavailable a non-retried code elsewhere.
|
||||||
|
if strings.Contains(err.Error(), "x509: certificate signed by unknown authority") {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if retryer == nil {
|
||||||
|
if r := settings.Retry(); r != nil {
|
||||||
|
retryer = r
|
||||||
|
} else {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if d, ok := retryer.Retry(err); !ok {
|
||||||
|
return err
|
||||||
|
} else if err = sp(ctx, d); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -71,6 +71,7 @@ can be augmented at runtime by implementing the `Getter` interface.
|
||||||
* Mercurial
|
* Mercurial
|
||||||
* HTTP
|
* HTTP
|
||||||
* Amazon S3
|
* Amazon S3
|
||||||
|
* Google GCP
|
||||||
|
|
||||||
In addition to the above protocols, go-getter has what are called "detectors."
|
In addition to the above protocols, go-getter has what are called "detectors."
|
||||||
These take a URL and attempt to automatically choose the best protocol for
|
These take a URL and attempt to automatically choose the best protocol for
|
||||||
|
@ -127,14 +128,14 @@ go-getter will first download the URL specified _before_ the double-slash
|
||||||
path after the double slash into the target directory.
|
path after the double slash into the target directory.
|
||||||
|
|
||||||
For example, if you're downloading this GitHub repository, but you only
|
For example, if you're downloading this GitHub repository, but you only
|
||||||
want to download the `test-fixtures` directory, you can do the following:
|
want to download the `testdata` directory, you can do the following:
|
||||||
|
|
||||||
```
|
```
|
||||||
https://github.com/hashicorp/go-getter.git//test-fixtures
|
https://github.com/hashicorp/go-getter.git//testdata
|
||||||
```
|
```
|
||||||
|
|
||||||
If you downloaded this to the `/tmp` directory, then the file
|
If you downloaded this to the `/tmp` directory, then the file
|
||||||
`/tmp/archive.gz` would exist. Notice that this file is in the `test-fixtures`
|
`/tmp/archive.gz` would exist. Notice that this file is in the `testdata`
|
||||||
directory in this repository, but because we specified a subdirectory,
|
directory in this repository, but because we specified a subdirectory,
|
||||||
go-getter automatically copied only that directory contents.
|
go-getter automatically copied only that directory contents.
|
||||||
|
|
||||||
|
@ -279,6 +280,16 @@ None
|
||||||
* `depth` - The Git clone depth. The provided number specifies the last `n`
|
* `depth` - The Git clone depth. The provided number specifies the last `n`
|
||||||
revisions to clone from the repository.
|
revisions to clone from the repository.
|
||||||
|
|
||||||
|
|
||||||
|
The `git` getter accepts both URL-style SSH addresses like
|
||||||
|
`git::ssh://git@example.com/foo/bar`, and "scp-style" addresses like
|
||||||
|
`git::git@example.com/foo/bar`. In the latter case, omitting the `git::`
|
||||||
|
force prefix is allowed if the username prefix is exactly `git@`.
|
||||||
|
|
||||||
|
The "scp-style" addresses _cannot_ be used in conjunction with the `ssh://`
|
||||||
|
scheme prefix, because in that case the colon is used to mark an optional
|
||||||
|
port number to connect on, rather than to delimit the path from the host.
|
||||||
|
|
||||||
### Mercurial (`hg`)
|
### Mercurial (`hg`)
|
||||||
|
|
||||||
* `rev` - The Mercurial revision to checkout.
|
* `rev` - The Mercurial revision to checkout.
|
||||||
|
@ -334,3 +345,14 @@ Some examples for these addressing schemes:
|
||||||
- bucket.s3-eu-west-1.amazonaws.com/foo/bar
|
- bucket.s3-eu-west-1.amazonaws.com/foo/bar
|
||||||
- "s3::http://127.0.0.1:9000/test-bucket/hello.txt?aws_access_key_id=KEYID&aws_access_key_secret=SECRETKEY®ion=us-east-2"
|
- "s3::http://127.0.0.1:9000/test-bucket/hello.txt?aws_access_key_id=KEYID&aws_access_key_secret=SECRETKEY®ion=us-east-2"
|
||||||
|
|
||||||
|
### GCS (`gcs`)
|
||||||
|
|
||||||
|
#### GCS Authentication
|
||||||
|
|
||||||
|
In order to access to GCS, authentication credentials should be provided. More information can be found [here](https://cloud.google.com/docs/authentication/getting-started)
|
||||||
|
|
||||||
|
#### GCS Bucket Examples
|
||||||
|
|
||||||
|
- gcs::https://www.googleapis.com/storage/v1/bucket
|
||||||
|
- gcs::https://www.googleapis.com/storage/v1/bucket/foo.zip
|
||||||
|
- www.googleapis.com/storage/v1/bucket/foo
|
||||||
|
|
|
@ -19,17 +19,38 @@ import (
|
||||||
urlhelper "github.com/hashicorp/go-getter/helper/url"
|
urlhelper "github.com/hashicorp/go-getter/helper/url"
|
||||||
)
|
)
|
||||||
|
|
||||||
// fileChecksum helps verifying the checksum for a file.
|
// FileChecksum helps verifying the checksum for a file.
|
||||||
type fileChecksum struct {
|
type FileChecksum struct {
|
||||||
Type string
|
Type string
|
||||||
Hash hash.Hash
|
Hash hash.Hash
|
||||||
Value []byte
|
Value []byte
|
||||||
Filename string
|
Filename string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// A ChecksumError is returned when a checksum differs
|
||||||
|
type ChecksumError struct {
|
||||||
|
Hash hash.Hash
|
||||||
|
Actual []byte
|
||||||
|
Expected []byte
|
||||||
|
File string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cerr *ChecksumError) Error() string {
|
||||||
|
if cerr == nil {
|
||||||
|
return "<nil>"
|
||||||
|
}
|
||||||
|
return fmt.Sprintf(
|
||||||
|
"Checksums did not match for %s.\nExpected: %s\nGot: %s\n%T",
|
||||||
|
cerr.File,
|
||||||
|
hex.EncodeToString(cerr.Expected),
|
||||||
|
hex.EncodeToString(cerr.Actual),
|
||||||
|
cerr.Hash, // ex: *sha256.digest
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
// checksum is a simple method to compute the checksum of a source file
|
// checksum is a simple method to compute the checksum of a source file
|
||||||
// and compare it to the given expected value.
|
// and compare it to the given expected value.
|
||||||
func (c *fileChecksum) checksum(source string) error {
|
func (c *FileChecksum) checksum(source string) error {
|
||||||
f, err := os.Open(source)
|
f, err := os.Open(source)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Failed to open file for checksum: %s", err)
|
return fmt.Errorf("Failed to open file for checksum: %s", err)
|
||||||
|
@ -42,16 +63,18 @@ func (c *fileChecksum) checksum(source string) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if actual := c.Hash.Sum(nil); !bytes.Equal(actual, c.Value) {
|
if actual := c.Hash.Sum(nil); !bytes.Equal(actual, c.Value) {
|
||||||
return fmt.Errorf(
|
return &ChecksumError{
|
||||||
"Checksums did not match.\nExpected: %s\nGot: %s",
|
Hash: c.Hash,
|
||||||
hex.EncodeToString(c.Value),
|
Actual: actual,
|
||||||
hex.EncodeToString(actual))
|
Expected: c.Value,
|
||||||
|
File: source,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// extractChecksum will return a fileChecksum based on the 'checksum'
|
// extractChecksum will return a FileChecksum based on the 'checksum'
|
||||||
// parameter of u.
|
// parameter of u.
|
||||||
// ex:
|
// ex:
|
||||||
// http://hashicorp.com/terraform?checksum=<checksumValue>
|
// http://hashicorp.com/terraform?checksum=<checksumValue>
|
||||||
|
@ -70,7 +93,7 @@ func (c *fileChecksum) checksum(source string) error {
|
||||||
// <checksum> *file2
|
// <checksum> *file2
|
||||||
//
|
//
|
||||||
// see parseChecksumLine for more detail on checksum file parsing
|
// see parseChecksumLine for more detail on checksum file parsing
|
||||||
func (c *Client) extractChecksum(u *url.URL) (*fileChecksum, error) {
|
func (c *Client) extractChecksum(u *url.URL) (*FileChecksum, error) {
|
||||||
q := u.Query()
|
q := u.Query()
|
||||||
v := q.Get("checksum")
|
v := q.Get("checksum")
|
||||||
|
|
||||||
|
@ -92,14 +115,14 @@ func (c *Client) extractChecksum(u *url.URL) (*fileChecksum, error) {
|
||||||
|
|
||||||
switch checksumType {
|
switch checksumType {
|
||||||
case "file":
|
case "file":
|
||||||
return c.checksumFromFile(checksumValue, u)
|
return c.ChecksumFromFile(checksumValue, u)
|
||||||
default:
|
default:
|
||||||
return newChecksumFromType(checksumType, checksumValue, filepath.Base(u.EscapedPath()))
|
return newChecksumFromType(checksumType, checksumValue, filepath.Base(u.EscapedPath()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func newChecksum(checksumValue, filename string) (*fileChecksum, error) {
|
func newChecksum(checksumValue, filename string) (*FileChecksum, error) {
|
||||||
c := &fileChecksum{
|
c := &FileChecksum{
|
||||||
Filename: filename,
|
Filename: filename,
|
||||||
}
|
}
|
||||||
var err error
|
var err error
|
||||||
|
@ -110,7 +133,7 @@ func newChecksum(checksumValue, filename string) (*fileChecksum, error) {
|
||||||
return c, nil
|
return c, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func newChecksumFromType(checksumType, checksumValue, filename string) (*fileChecksum, error) {
|
func newChecksumFromType(checksumType, checksumValue, filename string) (*FileChecksum, error) {
|
||||||
c, err := newChecksum(checksumValue, filename)
|
c, err := newChecksum(checksumValue, filename)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -134,7 +157,7 @@ func newChecksumFromType(checksumType, checksumValue, filename string) (*fileChe
|
||||||
return c, nil
|
return c, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func newChecksumFromValue(checksumValue, filename string) (*fileChecksum, error) {
|
func newChecksumFromValue(checksumValue, filename string) (*FileChecksum, error) {
|
||||||
c, err := newChecksum(checksumValue, filename)
|
c, err := newChecksum(checksumValue, filename)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -160,14 +183,14 @@ func newChecksumFromValue(checksumValue, filename string) (*fileChecksum, error)
|
||||||
return c, nil
|
return c, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// checksumsFromFile will return all the fileChecksums found in file
|
// ChecksumFromFile will return all the FileChecksums found in file
|
||||||
//
|
//
|
||||||
// checksumsFromFile will try to guess the hashing algorithm based on content
|
// ChecksumFromFile will try to guess the hashing algorithm based on content
|
||||||
// of checksum file
|
// of checksum file
|
||||||
//
|
//
|
||||||
// checksumsFromFile will only return checksums for files that match file
|
// ChecksumFromFile will only return checksums for files that match file
|
||||||
// behind src
|
// behind src
|
||||||
func (c *Client) checksumFromFile(checksumFile string, src *url.URL) (*fileChecksum, error) {
|
func (c *Client) ChecksumFromFile(checksumFile string, src *url.URL) (*FileChecksum, error) {
|
||||||
checksumFileURL, err := urlhelper.Parse(checksumFile)
|
checksumFileURL, err := urlhelper.Parse(checksumFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -263,7 +286,7 @@ func (c *Client) checksumFromFile(checksumFile string, src *url.URL) (*fileCheck
|
||||||
// of a line.
|
// of a line.
|
||||||
// for BSD type sums parseChecksumLine guesses the hashing algorithm
|
// for BSD type sums parseChecksumLine guesses the hashing algorithm
|
||||||
// by checking the length of the checksum.
|
// by checking the length of the checksum.
|
||||||
func parseChecksumLine(line string) (*fileChecksum, error) {
|
func parseChecksumLine(line string) (*FileChecksum, error) {
|
||||||
parts := strings.Fields(line)
|
parts := strings.Fields(line)
|
||||||
|
|
||||||
switch len(parts) {
|
switch len(parts) {
|
||||||
|
|
|
@ -8,7 +8,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
// ZipDecompressor is an implementation of Decompressor that can
|
// ZipDecompressor is an implementation of Decompressor that can
|
||||||
// decompress tar.gzip files.
|
// decompress zip files.
|
||||||
type ZipDecompressor struct{}
|
type ZipDecompressor struct{}
|
||||||
|
|
||||||
func (d *ZipDecompressor) Decompress(dst, src string, dir bool, umask os.FileMode) error {
|
func (d *ZipDecompressor) Decompress(dst, src string, dir bool, umask os.FileMode) error {
|
||||||
|
|
|
@ -26,6 +26,7 @@ func init() {
|
||||||
new(GitDetector),
|
new(GitDetector),
|
||||||
new(BitBucketDetector),
|
new(BitBucketDetector),
|
||||||
new(S3Detector),
|
new(S3Detector),
|
||||||
|
new(GCSDetector),
|
||||||
new(FileDetector),
|
new(FileDetector),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,43 @@
|
||||||
|
package getter
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GCSDetector implements Detector to detect GCS URLs and turn
|
||||||
|
// them into URLs that the GCSGetter can understand.
|
||||||
|
type GCSDetector struct{}
|
||||||
|
|
||||||
|
func (d *GCSDetector) Detect(src, _ string) (string, bool, error) {
|
||||||
|
if len(src) == 0 {
|
||||||
|
return "", false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if strings.Contains(src, "googleapis.com/") {
|
||||||
|
return d.detectHTTP(src)
|
||||||
|
}
|
||||||
|
|
||||||
|
return "", false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *GCSDetector) detectHTTP(src string) (string, bool, error) {
|
||||||
|
|
||||||
|
parts := strings.Split(src, "/")
|
||||||
|
if len(parts) < 5 {
|
||||||
|
return "", false, fmt.Errorf(
|
||||||
|
"URL is not a valid GCS URL")
|
||||||
|
}
|
||||||
|
version := parts[2]
|
||||||
|
bucket := parts[3]
|
||||||
|
object := strings.Join(parts[4:], "/")
|
||||||
|
|
||||||
|
url, err := url.Parse(fmt.Sprintf("https://www.googleapis.com/storage/%s/%s/%s",
|
||||||
|
version, bucket, object))
|
||||||
|
if err != nil {
|
||||||
|
return "", false, fmt.Errorf("error parsing GCS URL: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return "gcs::" + url.String(), true, nil
|
||||||
|
}
|
|
@ -67,6 +67,7 @@ func init() {
|
||||||
Getters = map[string]Getter{
|
Getters = map[string]Getter{
|
||||||
"file": new(FileGetter),
|
"file": new(FileGetter),
|
||||||
"git": new(GitGetter),
|
"git": new(GitGetter),
|
||||||
|
"gcs": new(GCSGetter),
|
||||||
"hg": new(HgGetter),
|
"hg": new(HgGetter),
|
||||||
"s3": new(S3Getter),
|
"s3": new(S3Getter),
|
||||||
"http": httpGetter,
|
"http": httpGetter,
|
||||||
|
|
|
@ -112,19 +112,7 @@ func (g *FileGetter) GetFile(dst string, u *url.URL) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy
|
// Copy
|
||||||
srcF, err := os.Open(path)
|
_, err = copyFile(ctx, dst, path, 0666, g.client.umask())
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer srcF.Close()
|
|
||||||
|
|
||||||
dstF, err := os.Create(dst)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer dstF.Close()
|
|
||||||
|
|
||||||
_, err = Copy(ctx, dstF, srcF)
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,165 @@
|
||||||
|
package getter
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net/url"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"cloud.google.com/go/storage"
|
||||||
|
"google.golang.org/api/iterator"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GCSGetter is a Getter implementation that will download a module from
|
||||||
|
// a GCS bucket.
|
||||||
|
type GCSGetter struct {
|
||||||
|
getter
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *GCSGetter) ClientMode(u *url.URL) (ClientMode, error) {
|
||||||
|
ctx := g.Context()
|
||||||
|
|
||||||
|
// Parse URL
|
||||||
|
bucket, object, err := g.parseURL(u)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
client, err := storage.NewClient(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
iter := client.Bucket(bucket).Objects(ctx, &storage.Query{Prefix: object})
|
||||||
|
for {
|
||||||
|
obj, err := iter.Next()
|
||||||
|
if err != nil && err != iterator.Done {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err == iterator.Done {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if strings.HasSuffix(obj.Name, "/") {
|
||||||
|
// A directory matched the prefix search, so this must be a directory
|
||||||
|
return ClientModeDir, nil
|
||||||
|
} else if obj.Name != object {
|
||||||
|
// A file matched the prefix search and doesn't have the same name
|
||||||
|
// as the query, so this must be a directory
|
||||||
|
return ClientModeDir, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// There are no directories or subdirectories, and if a match was returned,
|
||||||
|
// it was exactly equal to the prefix search. So return File mode
|
||||||
|
return ClientModeFile, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *GCSGetter) Get(dst string, u *url.URL) error {
|
||||||
|
ctx := g.Context()
|
||||||
|
|
||||||
|
// Parse URL
|
||||||
|
bucket, object, err := g.parseURL(u)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove destination if it already exists
|
||||||
|
_, err = os.Stat(dst)
|
||||||
|
if err != nil && !os.IsNotExist(err) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
// Remove the destination
|
||||||
|
if err := os.RemoveAll(dst); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create all the parent directories
|
||||||
|
if err := os.MkdirAll(filepath.Dir(dst), g.client.mode(0755)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
client, err := storage.NewClient(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Iterate through all matching objects.
|
||||||
|
iter := client.Bucket(bucket).Objects(ctx, &storage.Query{Prefix: object})
|
||||||
|
for {
|
||||||
|
obj, err := iter.Next()
|
||||||
|
if err != nil && err != iterator.Done {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err == iterator.Done {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if !strings.HasSuffix(obj.Name, "/") {
|
||||||
|
// Get the object destination path
|
||||||
|
objDst, err := filepath.Rel(object, obj.Name)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
objDst = filepath.Join(dst, objDst)
|
||||||
|
// Download the matching object.
|
||||||
|
err = g.getObject(ctx, client, objDst, bucket, obj.Name)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *GCSGetter) GetFile(dst string, u *url.URL) error {
|
||||||
|
ctx := g.Context()
|
||||||
|
|
||||||
|
// Parse URL
|
||||||
|
bucket, object, err := g.parseURL(u)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
client, err := storage.NewClient(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return g.getObject(ctx, client, dst, bucket, object)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *GCSGetter) getObject(ctx context.Context, client *storage.Client, dst, bucket, object string) error {
|
||||||
|
rc, err := client.Bucket(bucket).Object(object).NewReader(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer rc.Close()
|
||||||
|
|
||||||
|
// Create all the parent directories
|
||||||
|
if err := os.MkdirAll(filepath.Dir(dst), g.client.mode(0755)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return copyReader(dst, rc, 0666, g.client.umask())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *GCSGetter) parseURL(u *url.URL) (bucket, path string, err error) {
|
||||||
|
if strings.Contains(u.Host, "googleapis.com") {
|
||||||
|
hostParts := strings.Split(u.Host, ".")
|
||||||
|
if len(hostParts) != 3 {
|
||||||
|
err = fmt.Errorf("URL is not a valid GCS URL")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
pathParts := strings.SplitN(u.Path, "/", 5)
|
||||||
|
if len(pathParts) != 5 {
|
||||||
|
err = fmt.Errorf("URL is not a valid GCS URL")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
bucket = pathParts[3]
|
||||||
|
path = pathParts[4]
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
|
@ -34,6 +34,15 @@ func (g *GitGetter) Get(dst string, u *url.URL) error {
|
||||||
return fmt.Errorf("git must be available and on the PATH")
|
return fmt.Errorf("git must be available and on the PATH")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// The port number must be parseable as an integer. If not, the user
|
||||||
|
// was probably trying to use a scp-style address, in which case the
|
||||||
|
// ssh:// prefix must be removed to indicate that.
|
||||||
|
if portStr := u.Port(); portStr != "" {
|
||||||
|
if _, err := strconv.ParseUint(portStr, 10, 16); err != nil {
|
||||||
|
return fmt.Errorf("invalid port number %q; if using the \"scp-like\" git address scheme where a colon introduces the path instead, remove the ssh:// portion and use just the git:: prefix", portStr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Extract some query parameters we use
|
// Extract some query parameters we use
|
||||||
var ref, sshKey string
|
var ref, sshKey string
|
||||||
var depth int
|
var depth int
|
||||||
|
@ -90,26 +99,6 @@ func (g *GitGetter) Get(dst string, u *url.URL) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// For SSH-style URLs, if they use the SCP syntax of host:path, then
|
|
||||||
// the URL will be mangled. We detect that here and correct the path.
|
|
||||||
// Example: host:path/bar will turn into host/path/bar
|
|
||||||
if u.Scheme == "ssh" {
|
|
||||||
if idx := strings.Index(u.Host, ":"); idx > -1 {
|
|
||||||
// Copy the URL so we don't modify the input
|
|
||||||
var newU url.URL = *u
|
|
||||||
u = &newU
|
|
||||||
|
|
||||||
// Path includes the part after the ':'.
|
|
||||||
u.Path = u.Host[idx+1:] + u.Path
|
|
||||||
if u.Path[0] != '/' {
|
|
||||||
u.Path = "/" + u.Path
|
|
||||||
}
|
|
||||||
|
|
||||||
// Host trims up to the :
|
|
||||||
u.Host = u.Host[:idx]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clone or update the repository
|
// Clone or update the repository
|
||||||
_, err := os.Stat(dst)
|
_, err := os.Stat(dst)
|
||||||
if err != nil && !os.IsNotExist(err) {
|
if err != nil && !os.IsNotExist(err) {
|
||||||
|
|
|
@ -169,14 +169,7 @@ func (g *S3Getter) getObject(ctx context.Context, client *s3.S3, dst, bucket, ke
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
f, err := os.Create(dst)
|
return copyReader(dst, resp.Body, 0666, g.client.umask())
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
|
|
||||||
_, err = Copy(ctx, f, resp.Body)
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g *S3Getter) getAWSConfig(region string, url *url.URL, creds *credentials.Credentials) *aws.Config {
|
func (g *S3Getter) getAWSConfig(region string, url *url.URL, creds *credentials.Credentials) *aws.Config {
|
||||||
|
|
|
@ -1,10 +1,10 @@
|
||||||
module github.com/hashicorp/go-getter
|
module github.com/hashicorp/go-getter
|
||||||
|
|
||||||
require (
|
require (
|
||||||
|
cloud.google.com/go v0.36.0
|
||||||
github.com/aws/aws-sdk-go v1.15.78
|
github.com/aws/aws-sdk-go v1.15.78
|
||||||
github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d
|
github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d
|
||||||
github.com/cheggaaa/pb v1.0.27
|
github.com/cheggaaa/pb v1.0.27
|
||||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
|
||||||
github.com/fatih/color v1.7.0 // indirect
|
github.com/fatih/color v1.7.0 // indirect
|
||||||
github.com/hashicorp/go-cleanhttp v0.5.0
|
github.com/hashicorp/go-cleanhttp v0.5.0
|
||||||
github.com/hashicorp/go-safetemp v1.0.0
|
github.com/hashicorp/go-safetemp v1.0.0
|
||||||
|
@ -14,11 +14,9 @@ require (
|
||||||
github.com/mattn/go-runewidth v0.0.4 // indirect
|
github.com/mattn/go-runewidth v0.0.4 // indirect
|
||||||
github.com/mitchellh/go-homedir v1.0.0
|
github.com/mitchellh/go-homedir v1.0.0
|
||||||
github.com/mitchellh/go-testing-interface v1.0.0
|
github.com/mitchellh/go-testing-interface v1.0.0
|
||||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
|
||||||
github.com/stretchr/testify v1.2.2 // indirect
|
|
||||||
github.com/ulikunitz/xz v0.5.5
|
github.com/ulikunitz/xz v0.5.5
|
||||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a // indirect
|
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a // indirect
|
||||||
golang.org/x/sys v0.0.0-20181213200352-4d1cda033e06 // indirect
|
golang.org/x/sys v0.0.0-20181213200352-4d1cda033e06 // indirect
|
||||||
golang.org/x/text v0.3.0 // indirect
|
google.golang.org/api v0.1.0
|
||||||
gopkg.in/cheggaaa/pb.v1 v1.0.27 // indirect
|
gopkg.in/cheggaaa/pb.v1 v1.0.27 // indirect
|
||||||
)
|
)
|
||||||
|
|
|
@ -1,44 +1,182 @@
|
||||||
|
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||||
|
cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||||
|
cloud.google.com/go v0.36.0 h1:+aCSj7tOo2LODWVEuZDZeGCckdt6MlSF+X/rB3wUiS8=
|
||||||
|
cloud.google.com/go v0.36.0/go.mod h1:RUoy9p/M4ge0HzT8L+SDZ8jg+Q6fth0CiBuhFJpSV40=
|
||||||
|
dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU=
|
||||||
|
dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU=
|
||||||
|
dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4=
|
||||||
|
dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU=
|
||||||
|
git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg=
|
||||||
|
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||||
|
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
|
||||||
github.com/aws/aws-sdk-go v1.15.78 h1:LaXy6lWR0YK7LKyuU0QWy2ws/LWTPfYV/UgfiBu4tvY=
|
github.com/aws/aws-sdk-go v1.15.78 h1:LaXy6lWR0YK7LKyuU0QWy2ws/LWTPfYV/UgfiBu4tvY=
|
||||||
github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM=
|
github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM=
|
||||||
|
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||||
github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas=
|
github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas=
|
||||||
github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4=
|
github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4=
|
||||||
|
github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g=
|
||||||
github.com/cheggaaa/pb v1.0.27 h1:wIkZHkNfC7R6GI5w7l/PdAdzXzlrbcI3p8OAlnkTsnc=
|
github.com/cheggaaa/pb v1.0.27 h1:wIkZHkNfC7R6GI5w7l/PdAdzXzlrbcI3p8OAlnkTsnc=
|
||||||
github.com/cheggaaa/pb v1.0.27/go.mod h1:pQciLPpbU0oxA0h+VJYYLxO+XeDQb5pZijXscXHm81s=
|
github.com/cheggaaa/pb v1.0.27/go.mod h1:pQciLPpbU0oxA0h+VJYYLxO+XeDQb5pZijXscXHm81s=
|
||||||
|
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||||
|
github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||||
github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys=
|
github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys=
|
||||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||||
|
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
|
||||||
|
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||||
|
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||||
|
github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
|
||||||
|
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||||
|
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
|
||||||
|
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||||
|
github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E=
|
||||||
|
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||||
|
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||||
|
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
|
||||||
|
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
|
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||||
|
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||||
|
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
|
||||||
|
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
|
||||||
|
github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=
|
||||||
|
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||||
|
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||||
|
github.com/googleapis/gax-go v2.0.0+incompatible h1:j0GKcs05QVmm7yesiZq2+9cxHkNK9YM6zKx4D2qucQU=
|
||||||
|
github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
|
||||||
|
github.com/googleapis/gax-go/v2 v2.0.3 h1:siORttZ36U2R/WjiJuDz8znElWBiAlO9rVt+mqJt0Cc=
|
||||||
|
github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg=
|
||||||
|
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||||
|
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||||
|
github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
|
||||||
github.com/hashicorp/go-cleanhttp v0.5.0 h1:wvCrVc9TjDls6+YGAF2hAifE1E5U1+b4tH6KdvN3Gig=
|
github.com/hashicorp/go-cleanhttp v0.5.0 h1:wvCrVc9TjDls6+YGAF2hAifE1E5U1+b4tH6KdvN3Gig=
|
||||||
github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
|
github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
|
||||||
github.com/hashicorp/go-safetemp v1.0.0 h1:2HR189eFNrjHQyENnQMMpCiBAsRxzbTMIgBhEyExpmo=
|
github.com/hashicorp/go-safetemp v1.0.0 h1:2HR189eFNrjHQyENnQMMpCiBAsRxzbTMIgBhEyExpmo=
|
||||||
github.com/hashicorp/go-safetemp v1.0.0/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoDHxNAB65b+Rj1I=
|
github.com/hashicorp/go-safetemp v1.0.0/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoDHxNAB65b+Rj1I=
|
||||||
github.com/hashicorp/go-version v1.0.0 h1:21MVWPKDphxa7ineQQTrCU5brh7OuVVAzGOCnnCPtE8=
|
|
||||||
github.com/hashicorp/go-version v1.0.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
|
||||||
github.com/hashicorp/go-version v1.1.0 h1:bPIoEKD27tNdebFGGxxYwcL4nepeY4j1QP23PFRGzg0=
|
github.com/hashicorp/go-version v1.1.0 h1:bPIoEKD27tNdebFGGxxYwcL4nepeY4j1QP23PFRGzg0=
|
||||||
github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
||||||
|
github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU=
|
||||||
github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8 h1:12VvqtR6Aowv3l/EQUlocDHW2Cp4G9WJVH7uyH8QFJE=
|
github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8 h1:12VvqtR6Aowv3l/EQUlocDHW2Cp4G9WJVH7uyH8QFJE=
|
||||||
github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||||
|
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||||
|
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||||
|
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||||
|
github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||||
|
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||||
github.com/mattn/go-colorable v0.0.9 h1:UVL0vNpWh04HeJXV0KLcaT7r06gOH2l4OW6ddYRUIY4=
|
github.com/mattn/go-colorable v0.0.9 h1:UVL0vNpWh04HeJXV0KLcaT7r06gOH2l4OW6ddYRUIY4=
|
||||||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||||
github.com/mattn/go-isatty v0.0.4 h1:bnP0vzxcAdeI1zdubAl5PjU6zsERjGZb7raWodagDYs=
|
github.com/mattn/go-isatty v0.0.4 h1:bnP0vzxcAdeI1zdubAl5PjU6zsERjGZb7raWodagDYs=
|
||||||
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||||
github.com/mattn/go-runewidth v0.0.4 h1:2BvfKmzob6Bmd4YsL0zygOqfdFnK7GR4QL06Do4/p7Y=
|
github.com/mattn/go-runewidth v0.0.4 h1:2BvfKmzob6Bmd4YsL0zygOqfdFnK7GR4QL06Do4/p7Y=
|
||||||
github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||||
|
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||||
|
github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4=
|
||||||
github.com/mitchellh/go-homedir v1.0.0 h1:vKb8ShqSby24Yrqr/yDYkuFz8d0WUjys40rvnGC8aR0=
|
github.com/mitchellh/go-homedir v1.0.0 h1:vKb8ShqSby24Yrqr/yDYkuFz8d0WUjys40rvnGC8aR0=
|
||||||
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||||
github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0=
|
github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0=
|
||||||
github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
|
github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
|
||||||
|
github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo=
|
||||||
|
github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM=
|
||||||
|
github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
|
github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||||
|
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||||
|
github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||||
|
github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||||
|
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||||
|
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
||||||
|
github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY=
|
||||||
|
github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM=
|
||||||
|
github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0=
|
||||||
|
github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk=
|
||||||
|
github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ=
|
||||||
|
github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw=
|
||||||
|
github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI=
|
||||||
|
github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU=
|
||||||
|
github.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag=
|
||||||
|
github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9ARFAs37qieuu7ohDNQ3gds9msbT2yn85sg=
|
||||||
|
github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw=
|
||||||
|
github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y=
|
||||||
|
github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
|
||||||
|
github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q=
|
||||||
|
github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ=
|
||||||
|
github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I=
|
||||||
|
github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b5uSkrEVM1jQUspwbixRBhaIjIzL2xazXp6kntxYle0=
|
||||||
|
github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ=
|
||||||
|
github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk=
|
||||||
|
github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||||
|
github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4=
|
||||||
|
github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw=
|
||||||
|
github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE=
|
||||||
|
github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA=
|
||||||
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
|
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
|
||||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||||
|
github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
|
||||||
github.com/ulikunitz/xz v0.5.5 h1:pFrO0lVpTBXLpYw+pnLj6TbvHuyjXMfjGeCwSqCVwok=
|
github.com/ulikunitz/xz v0.5.5 h1:pFrO0lVpTBXLpYw+pnLj6TbvHuyjXMfjGeCwSqCVwok=
|
||||||
github.com/ulikunitz/xz v0.5.5/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8=
|
github.com/ulikunitz/xz v0.5.5/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8=
|
||||||
|
go.opencensus.io v0.18.0 h1:Mk5rgZcggtbvtAun5aJzAtjKKN/t0R3jJPlWILlv938=
|
||||||
|
go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA=
|
||||||
|
go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE=
|
||||||
|
golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw=
|
||||||
|
golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||||
|
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
|
golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||||
|
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||||
|
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a h1:gOpx8G595UYyvj8UK4+OFyY4rx037g3fmfhe5SasG3U=
|
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a h1:gOpx8G595UYyvj8UK4+OFyY4rx037g3fmfhe5SasG3U=
|
||||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890 h1:uESlIz09WIHT2I+pasSXcpLYqYK8wHcdCetU3VuMBJE=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
|
golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw=
|
||||||
|
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTmV7VDcZyvRZ+QQXkXTZQ=
|
||||||
|
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20181213200352-4d1cda033e06 h1:0oC8rFnE+74kEmuHZ46F6KHsMr5Gx2gUQPuNz28iQZM=
|
golang.org/x/sys v0.0.0-20181213200352-4d1cda033e06 h1:0oC8rFnE+74kEmuHZ46F6KHsMr5Gx2gUQPuNz28iQZM=
|
||||||
golang.org/x/sys v0.0.0-20181213200352-4d1cda033e06/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20181213200352-4d1cda033e06/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
|
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2 h1:z99zHgr7hKfrUcX/KsoJk5FJfjTceCKIp96+biqP4To=
|
||||||
|
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
|
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
|
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
|
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
|
golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
|
google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
|
||||||
|
google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
|
||||||
|
google.golang.org/api v0.1.0 h1:K6z2u68e86TPdSdefXdzvXgR1zEMa+459vBSfWYAZkI=
|
||||||
|
google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y=
|
||||||
|
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||||
|
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||||
|
google.golang.org/appengine v1.3.0 h1:FBSsiFRMz3LBeXIomRnVzrQwSDj4ibvcRexLG0LZGQk=
|
||||||
|
google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||||
|
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||||
|
google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||||
|
google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||||
|
google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg=
|
||||||
|
google.golang.org/genproto v0.0.0-20190201180003-4b09977fb922 h1:mBVYJnbrXLA/ZCBTCe7PtEgAUP+1bg92qTaFoPHdz+8=
|
||||||
|
google.golang.org/genproto v0.0.0-20190201180003-4b09977fb922/go.mod h1:L3J43x8/uS+qIUoksaLKe6OS3nUKxOKuIFz1sl2/jx4=
|
||||||
|
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||||
|
google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
|
||||||
|
google.golang.org/grpc v1.17.0 h1:TRJYBgMclJvGYn2rIMjj+h9KtMt5r1Ij7ODVRIZkwhk=
|
||||||
|
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||||
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/cheggaaa/pb.v1 v1.0.27 h1:kJdccidYzt3CaHD1crCFTS1hxyhSi059NhOFUf03YFo=
|
gopkg.in/cheggaaa/pb.v1 v1.0.27 h1:kJdccidYzt3CaHD1crCFTS1hxyhSi059NhOFUf03YFo=
|
||||||
gopkg.in/cheggaaa/pb.v1 v1.0.27/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
|
gopkg.in/cheggaaa/pb.v1 v1.0.27/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
|
||||||
|
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||||
|
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o=
|
||||||
|
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
|
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
|
sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck=
|
||||||
|
sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0=
|
||||||
|
|
|
@ -0,0 +1 @@
|
||||||
|
Google Inc.
|
|
@ -0,0 +1,63 @@
|
||||||
|
# How to contribute
|
||||||
|
|
||||||
|
We'd love to accept your patches and contributions to this project. There are
|
||||||
|
just a few small guidelines you need to follow.
|
||||||
|
|
||||||
|
## Contributor License Agreement
|
||||||
|
|
||||||
|
Contributions to this project must be accompanied by a Contributor License
|
||||||
|
Agreement. You (or your employer) retain the copyright to your contribution,
|
||||||
|
this simply gives us permission to use and redistribute your contributions as
|
||||||
|
part of the project. Head over to <https://cla.developers.google.com/> to see
|
||||||
|
your current agreements on file or to sign a new one.
|
||||||
|
|
||||||
|
You generally only need to submit a CLA once, so if you've already submitted one
|
||||||
|
(even if it was for a different project), you probably don't need to do it
|
||||||
|
again.
|
||||||
|
|
||||||
|
## Code reviews
|
||||||
|
|
||||||
|
All submissions, including submissions by project members, require review. We
|
||||||
|
use GitHub pull requests for this purpose. Consult [GitHub Help] for more
|
||||||
|
information on using pull requests.
|
||||||
|
|
||||||
|
[GitHub Help]: https://help.github.com/articles/about-pull-requests/
|
||||||
|
|
||||||
|
## Instructions
|
||||||
|
|
||||||
|
Fork the repo, checkout the upstream repo to your GOPATH by:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ go get -d go.opencensus.io
|
||||||
|
```
|
||||||
|
|
||||||
|
Add your fork as an origin:
|
||||||
|
|
||||||
|
```
|
||||||
|
cd $(go env GOPATH)/src/go.opencensus.io
|
||||||
|
git remote add fork git@github.com:YOUR_GITHUB_USERNAME/opencensus-go.git
|
||||||
|
```
|
||||||
|
|
||||||
|
Run tests:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ make install-tools # Only first time.
|
||||||
|
$ make
|
||||||
|
```
|
||||||
|
|
||||||
|
Checkout a new branch, make modifications and push the branch to your fork:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ git checkout -b feature
|
||||||
|
# edit files
|
||||||
|
$ git commit
|
||||||
|
$ git push fork feature
|
||||||
|
```
|
||||||
|
|
||||||
|
Open a pull request against the main opencensus-go repo.
|
||||||
|
|
||||||
|
## General Notes
|
||||||
|
This project uses Appveyor and Travis for CI.
|
||||||
|
|
||||||
|
The dependencies are managed with `go mod` if you work with the sources under your
|
||||||
|
`$GOPATH` you need to set the environment variable `GO111MODULE=on`.
|
|
@ -0,0 +1,231 @@
|
||||||
|
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
||||||
|
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
digest = "1:eee9386329f4fcdf8d6c0def0c9771b634bdd5ba460d888aa98c17d59b37a76c"
|
||||||
|
name = "git.apache.org/thrift.git"
|
||||||
|
packages = ["lib/go/thrift"]
|
||||||
|
pruneopts = "UT"
|
||||||
|
revision = "6e67faa92827ece022380b211c2caaadd6145bf5"
|
||||||
|
source = "github.com/apache/thrift"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
digest = "1:d6afaeed1502aa28e80a4ed0981d570ad91b2579193404256ce672ed0a609e0d"
|
||||||
|
name = "github.com/beorn7/perks"
|
||||||
|
packages = ["quantile"]
|
||||||
|
pruneopts = "UT"
|
||||||
|
revision = "3a771d992973f24aa725d07868b467d1ddfceafb"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:4c0989ca0bcd10799064318923b9bc2db6b4d6338dd75f3f2d86c3511aaaf5cf"
|
||||||
|
name = "github.com/golang/protobuf"
|
||||||
|
packages = [
|
||||||
|
"proto",
|
||||||
|
"ptypes",
|
||||||
|
"ptypes/any",
|
||||||
|
"ptypes/duration",
|
||||||
|
"ptypes/timestamp",
|
||||||
|
]
|
||||||
|
pruneopts = "UT"
|
||||||
|
revision = "aa810b61a9c79d51363740d207bb46cf8e620ed5"
|
||||||
|
version = "v1.2.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:ff5ebae34cfbf047d505ee150de27e60570e8c394b3b8fdbb720ff6ac71985fc"
|
||||||
|
name = "github.com/matttproud/golang_protobuf_extensions"
|
||||||
|
packages = ["pbutil"]
|
||||||
|
pruneopts = "UT"
|
||||||
|
revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c"
|
||||||
|
version = "v1.0.1"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:824c8f3aa4c5f23928fa84ebbd5ed2e9443b3f0cb958a40c1f2fbed5cf5e64b1"
|
||||||
|
name = "github.com/openzipkin/zipkin-go"
|
||||||
|
packages = [
|
||||||
|
".",
|
||||||
|
"idgenerator",
|
||||||
|
"model",
|
||||||
|
"propagation",
|
||||||
|
"reporter",
|
||||||
|
"reporter/http",
|
||||||
|
]
|
||||||
|
pruneopts = "UT"
|
||||||
|
revision = "d455a5674050831c1e187644faa4046d653433c2"
|
||||||
|
version = "v0.1.1"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:d14a5f4bfecf017cb780bdde1b6483e5deb87e12c332544d2c430eda58734bcb"
|
||||||
|
name = "github.com/prometheus/client_golang"
|
||||||
|
packages = [
|
||||||
|
"prometheus",
|
||||||
|
"prometheus/promhttp",
|
||||||
|
]
|
||||||
|
pruneopts = "UT"
|
||||||
|
revision = "c5b7fccd204277076155f10851dad72b76a49317"
|
||||||
|
version = "v0.8.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
digest = "1:2d5cd61daa5565187e1d96bae64dbbc6080dacf741448e9629c64fd93203b0d4"
|
||||||
|
name = "github.com/prometheus/client_model"
|
||||||
|
packages = ["go"]
|
||||||
|
pruneopts = "UT"
|
||||||
|
revision = "5c3871d89910bfb32f5fcab2aa4b9ec68e65a99f"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
digest = "1:63b68062b8968092eb86bedc4e68894bd096ea6b24920faca8b9dcf451f54bb5"
|
||||||
|
name = "github.com/prometheus/common"
|
||||||
|
packages = [
|
||||||
|
"expfmt",
|
||||||
|
"internal/bitbucket.org/ww/goautoneg",
|
||||||
|
"model",
|
||||||
|
]
|
||||||
|
pruneopts = "UT"
|
||||||
|
revision = "c7de2306084e37d54b8be01f3541a8464345e9a5"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
digest = "1:8c49953a1414305f2ff5465147ee576dd705487c35b15918fcd4efdc0cb7a290"
|
||||||
|
name = "github.com/prometheus/procfs"
|
||||||
|
packages = [
|
||||||
|
".",
|
||||||
|
"internal/util",
|
||||||
|
"nfs",
|
||||||
|
"xfs",
|
||||||
|
]
|
||||||
|
pruneopts = "UT"
|
||||||
|
revision = "05ee40e3a273f7245e8777337fc7b46e533a9a92"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
digest = "1:deafe4ab271911fec7de5b693d7faae3f38796d9eb8622e2b9e7df42bb3dfea9"
|
||||||
|
name = "golang.org/x/net"
|
||||||
|
packages = [
|
||||||
|
"context",
|
||||||
|
"http/httpguts",
|
||||||
|
"http2",
|
||||||
|
"http2/hpack",
|
||||||
|
"idna",
|
||||||
|
"internal/timeseries",
|
||||||
|
"trace",
|
||||||
|
]
|
||||||
|
pruneopts = "UT"
|
||||||
|
revision = "922f4815f713f213882e8ef45e0d315b164d705c"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
digest = "1:e0140c0c868c6e0f01c0380865194592c011fe521d6e12d78bfd33e756fe018a"
|
||||||
|
name = "golang.org/x/sync"
|
||||||
|
packages = ["semaphore"]
|
||||||
|
pruneopts = "UT"
|
||||||
|
revision = "1d60e4601c6fd243af51cc01ddf169918a5407ca"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
digest = "1:a3f00ac457c955fe86a41e1495e8f4c54cb5399d609374c5cc26aa7d72e542c8"
|
||||||
|
name = "golang.org/x/sys"
|
||||||
|
packages = ["unix"]
|
||||||
|
pruneopts = "UT"
|
||||||
|
revision = "3b58ed4ad3395d483fc92d5d14123ce2c3581fec"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:a2ab62866c75542dd18d2b069fec854577a20211d7c0ea6ae746072a1dccdd18"
|
||||||
|
name = "golang.org/x/text"
|
||||||
|
packages = [
|
||||||
|
"collate",
|
||||||
|
"collate/build",
|
||||||
|
"internal/colltab",
|
||||||
|
"internal/gen",
|
||||||
|
"internal/tag",
|
||||||
|
"internal/triegen",
|
||||||
|
"internal/ucd",
|
||||||
|
"language",
|
||||||
|
"secure/bidirule",
|
||||||
|
"transform",
|
||||||
|
"unicode/bidi",
|
||||||
|
"unicode/cldr",
|
||||||
|
"unicode/norm",
|
||||||
|
"unicode/rangetable",
|
||||||
|
]
|
||||||
|
pruneopts = "UT"
|
||||||
|
revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
|
||||||
|
version = "v0.3.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
digest = "1:c0c17c94fe8bc1ab34e7f586a4a8b788c5e1f4f9f750ff23395b8b2f5a523530"
|
||||||
|
name = "google.golang.org/api"
|
||||||
|
packages = ["support/bundler"]
|
||||||
|
pruneopts = "UT"
|
||||||
|
revision = "e21acd801f91da814261b938941d193bb036441a"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
digest = "1:077c1c599507b3b3e9156d17d36e1e61928ee9b53a5b420f10f28ebd4a0b275c"
|
||||||
|
name = "google.golang.org/genproto"
|
||||||
|
packages = ["googleapis/rpc/status"]
|
||||||
|
pruneopts = "UT"
|
||||||
|
revision = "c66870c02cf823ceb633bcd05be3c7cda29976f4"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:3dd7996ce6bf52dec6a2f69fa43e7c4cefea1d4dfa3c8ab7a5f8a9f7434e239d"
|
||||||
|
name = "google.golang.org/grpc"
|
||||||
|
packages = [
|
||||||
|
".",
|
||||||
|
"balancer",
|
||||||
|
"balancer/base",
|
||||||
|
"balancer/roundrobin",
|
||||||
|
"codes",
|
||||||
|
"connectivity",
|
||||||
|
"credentials",
|
||||||
|
"encoding",
|
||||||
|
"encoding/proto",
|
||||||
|
"grpclog",
|
||||||
|
"internal",
|
||||||
|
"internal/backoff",
|
||||||
|
"internal/channelz",
|
||||||
|
"internal/envconfig",
|
||||||
|
"internal/grpcrand",
|
||||||
|
"internal/transport",
|
||||||
|
"keepalive",
|
||||||
|
"metadata",
|
||||||
|
"naming",
|
||||||
|
"peer",
|
||||||
|
"resolver",
|
||||||
|
"resolver/dns",
|
||||||
|
"resolver/passthrough",
|
||||||
|
"stats",
|
||||||
|
"status",
|
||||||
|
"tap",
|
||||||
|
]
|
||||||
|
pruneopts = "UT"
|
||||||
|
revision = "32fb0ac620c32ba40a4626ddf94d90d12cce3455"
|
||||||
|
version = "v1.14.0"
|
||||||
|
|
||||||
|
[solve-meta]
|
||||||
|
analyzer-name = "dep"
|
||||||
|
analyzer-version = 1
|
||||||
|
input-imports = [
|
||||||
|
"git.apache.org/thrift.git/lib/go/thrift",
|
||||||
|
"github.com/golang/protobuf/proto",
|
||||||
|
"github.com/openzipkin/zipkin-go",
|
||||||
|
"github.com/openzipkin/zipkin-go/model",
|
||||||
|
"github.com/openzipkin/zipkin-go/reporter",
|
||||||
|
"github.com/openzipkin/zipkin-go/reporter/http",
|
||||||
|
"github.com/prometheus/client_golang/prometheus",
|
||||||
|
"github.com/prometheus/client_golang/prometheus/promhttp",
|
||||||
|
"golang.org/x/net/context",
|
||||||
|
"golang.org/x/net/http2",
|
||||||
|
"google.golang.org/api/support/bundler",
|
||||||
|
"google.golang.org/grpc",
|
||||||
|
"google.golang.org/grpc/codes",
|
||||||
|
"google.golang.org/grpc/grpclog",
|
||||||
|
"google.golang.org/grpc/metadata",
|
||||||
|
"google.golang.org/grpc/stats",
|
||||||
|
"google.golang.org/grpc/status",
|
||||||
|
]
|
||||||
|
solver-name = "gps-cdcl"
|
||||||
|
solver-version = 1
|
|
@ -0,0 +1,36 @@
|
||||||
|
# For v0.x.y dependencies, prefer adding a constraints of the form: version=">= 0.x.y"
|
||||||
|
# to avoid locking to a particular minor version which can cause dep to not be
|
||||||
|
# able to find a satisfying dependency graph.
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
branch = "master"
|
||||||
|
name = "git.apache.org/thrift.git"
|
||||||
|
source = "github.com/apache/thrift"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/golang/protobuf"
|
||||||
|
version = "1.0.0"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/openzipkin/zipkin-go"
|
||||||
|
version = ">=0.1.0"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/prometheus/client_golang"
|
||||||
|
version = ">=0.8.0"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
branch = "master"
|
||||||
|
name = "golang.org/x/net"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
branch = "master"
|
||||||
|
name = "google.golang.org/api"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "google.golang.org/grpc"
|
||||||
|
version = "1.11.3"
|
||||||
|
|
||||||
|
[prune]
|
||||||
|
go-tests = true
|
||||||
|
unused-packages = true
|
|
@ -0,0 +1,202 @@
|
||||||
|
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright [yyyy] [name of copyright owner]
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
|
@ -0,0 +1,96 @@
|
||||||
|
# TODO: Fix this on windows.
|
||||||
|
ALL_SRC := $(shell find . -name '*.go' \
|
||||||
|
-not -path './vendor/*' \
|
||||||
|
-not -path '*/gen-go/*' \
|
||||||
|
-type f | sort)
|
||||||
|
ALL_PKGS := $(shell go list $(sort $(dir $(ALL_SRC))))
|
||||||
|
|
||||||
|
GOTEST_OPT?=-v -race -timeout 30s
|
||||||
|
GOTEST_OPT_WITH_COVERAGE = $(GOTEST_OPT) -coverprofile=coverage.txt -covermode=atomic
|
||||||
|
GOTEST=go test
|
||||||
|
GOFMT=gofmt
|
||||||
|
GOLINT=golint
|
||||||
|
GOVET=go vet
|
||||||
|
EMBEDMD=embedmd
|
||||||
|
# TODO decide if we need to change these names.
|
||||||
|
TRACE_ID_LINT_EXCEPTION="type name will be used as trace.TraceID by other packages"
|
||||||
|
TRACE_OPTION_LINT_EXCEPTION="type name will be used as trace.TraceOptions by other packages"
|
||||||
|
README_FILES := $(shell find . -name '*README.md' | sort | tr '\n' ' ')
|
||||||
|
|
||||||
|
.DEFAULT_GOAL := fmt-lint-vet-embedmd-test
|
||||||
|
|
||||||
|
.PHONY: fmt-lint-vet-embedmd-test
|
||||||
|
fmt-lint-vet-embedmd-test: fmt lint vet embedmd test
|
||||||
|
|
||||||
|
# TODO enable test-with-coverage in tavis
|
||||||
|
.PHONY: travis-ci
|
||||||
|
travis-ci: fmt lint vet embedmd test test-386
|
||||||
|
|
||||||
|
all-pkgs:
|
||||||
|
@echo $(ALL_PKGS) | tr ' ' '\n' | sort
|
||||||
|
|
||||||
|
all-srcs:
|
||||||
|
@echo $(ALL_SRC) | tr ' ' '\n' | sort
|
||||||
|
|
||||||
|
.PHONY: test
|
||||||
|
test:
|
||||||
|
$(GOTEST) $(GOTEST_OPT) $(ALL_PKGS)
|
||||||
|
|
||||||
|
.PHONY: test-386
|
||||||
|
test-386:
|
||||||
|
GOARCH=386 $(GOTEST) -v -timeout 30s $(ALL_PKGS)
|
||||||
|
|
||||||
|
.PHONY: test-with-coverage
|
||||||
|
test-with-coverage:
|
||||||
|
$(GOTEST) $(GOTEST_OPT_WITH_COVERAGE) $(ALL_PKGS)
|
||||||
|
|
||||||
|
.PHONY: fmt
|
||||||
|
fmt:
|
||||||
|
@FMTOUT=`$(GOFMT) -s -l $(ALL_SRC) 2>&1`; \
|
||||||
|
if [ "$$FMTOUT" ]; then \
|
||||||
|
echo "$(GOFMT) FAILED => gofmt the following files:\n"; \
|
||||||
|
echo "$$FMTOUT\n"; \
|
||||||
|
exit 1; \
|
||||||
|
else \
|
||||||
|
echo "Fmt finished successfully"; \
|
||||||
|
fi
|
||||||
|
|
||||||
|
.PHONY: lint
|
||||||
|
lint:
|
||||||
|
@LINTOUT=`$(GOLINT) $(ALL_PKGS) | grep -v $(TRACE_ID_LINT_EXCEPTION) | grep -v $(TRACE_OPTION_LINT_EXCEPTION) 2>&1`; \
|
||||||
|
if [ "$$LINTOUT" ]; then \
|
||||||
|
echo "$(GOLINT) FAILED => clean the following lint errors:\n"; \
|
||||||
|
echo "$$LINTOUT\n"; \
|
||||||
|
exit 1; \
|
||||||
|
else \
|
||||||
|
echo "Lint finished successfully"; \
|
||||||
|
fi
|
||||||
|
|
||||||
|
.PHONY: vet
|
||||||
|
vet:
|
||||||
|
# TODO: Understand why go vet downloads "github.com/google/go-cmp v0.2.0"
|
||||||
|
@VETOUT=`$(GOVET) ./... | grep -v "go: downloading" 2>&1`; \
|
||||||
|
if [ "$$VETOUT" ]; then \
|
||||||
|
echo "$(GOVET) FAILED => go vet the following files:\n"; \
|
||||||
|
echo "$$VETOUT\n"; \
|
||||||
|
exit 1; \
|
||||||
|
else \
|
||||||
|
echo "Vet finished successfully"; \
|
||||||
|
fi
|
||||||
|
|
||||||
|
.PHONY: embedmd
|
||||||
|
embedmd:
|
||||||
|
@EMBEDMDOUT=`$(EMBEDMD) -d $(README_FILES) 2>&1`; \
|
||||||
|
if [ "$$EMBEDMDOUT" ]; then \
|
||||||
|
echo "$(EMBEDMD) FAILED => embedmd the following files:\n"; \
|
||||||
|
echo "$$EMBEDMDOUT\n"; \
|
||||||
|
exit 1; \
|
||||||
|
else \
|
||||||
|
echo "Embedmd finished successfully"; \
|
||||||
|
fi
|
||||||
|
|
||||||
|
.PHONY: install-tools
|
||||||
|
install-tools:
|
||||||
|
go get -u golang.org/x/tools/cmd/cover
|
||||||
|
go get -u golang.org/x/lint/golint
|
||||||
|
go get -u github.com/rakyll/embedmd
|
|
@ -0,0 +1,263 @@
|
||||||
|
# OpenCensus Libraries for Go
|
||||||
|
|
||||||
|
[![Build Status][travis-image]][travis-url]
|
||||||
|
[![Windows Build Status][appveyor-image]][appveyor-url]
|
||||||
|
[![GoDoc][godoc-image]][godoc-url]
|
||||||
|
[![Gitter chat][gitter-image]][gitter-url]
|
||||||
|
|
||||||
|
OpenCensus Go is a Go implementation of OpenCensus, a toolkit for
|
||||||
|
collecting application performance and behavior monitoring data.
|
||||||
|
Currently it consists of three major components: tags, stats and tracing.
|
||||||
|
|
||||||
|
## Installation
|
||||||
|
|
||||||
|
```
|
||||||
|
$ go get -u go.opencensus.io
|
||||||
|
```
|
||||||
|
|
||||||
|
The API of this project is still evolving, see: [Deprecation Policy](#deprecation-policy).
|
||||||
|
The use of vendoring or a dependency management tool is recommended.
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
|
||||||
|
OpenCensus Go libraries require Go 1.8 or later.
|
||||||
|
|
||||||
|
## Getting Started
|
||||||
|
|
||||||
|
The easiest way to get started using OpenCensus in your application is to use an existing
|
||||||
|
integration with your RPC framework:
|
||||||
|
|
||||||
|
* [net/http](https://godoc.org/go.opencensus.io/plugin/ochttp)
|
||||||
|
* [gRPC](https://godoc.org/go.opencensus.io/plugin/ocgrpc)
|
||||||
|
* [database/sql](https://godoc.org/github.com/opencensus-integrations/ocsql)
|
||||||
|
* [Go kit](https://godoc.org/github.com/go-kit/kit/tracing/opencensus)
|
||||||
|
* [Groupcache](https://godoc.org/github.com/orijtech/groupcache)
|
||||||
|
* [Caddy webserver](https://godoc.org/github.com/orijtech/caddy)
|
||||||
|
* [MongoDB](https://godoc.org/github.com/orijtech/mongo-go-driver)
|
||||||
|
* [Redis gomodule/redigo](https://godoc.org/github.com/orijtech/redigo)
|
||||||
|
* [Redis goredis/redis](https://godoc.org/github.com/orijtech/redis)
|
||||||
|
* [Memcache](https://godoc.org/github.com/orijtech/gomemcache)
|
||||||
|
|
||||||
|
If you're using a framework not listed here, you could either implement your own middleware for your
|
||||||
|
framework or use [custom stats](#stats) and [spans](#spans) directly in your application.
|
||||||
|
|
||||||
|
## Exporters
|
||||||
|
|
||||||
|
OpenCensus can export instrumentation data to various backends.
|
||||||
|
OpenCensus has exporter implementations for the following, users
|
||||||
|
can implement their own exporters by implementing the exporter interfaces
|
||||||
|
([stats](https://godoc.org/go.opencensus.io/stats/view#Exporter),
|
||||||
|
[trace](https://godoc.org/go.opencensus.io/trace#Exporter)):
|
||||||
|
|
||||||
|
* [Prometheus][exporter-prom] for stats
|
||||||
|
* [OpenZipkin][exporter-zipkin] for traces
|
||||||
|
* [Stackdriver][exporter-stackdriver] Monitoring for stats and Trace for traces
|
||||||
|
* [Jaeger][exporter-jaeger] for traces
|
||||||
|
* [AWS X-Ray][exporter-xray] for traces
|
||||||
|
* [Datadog][exporter-datadog] for stats and traces
|
||||||
|
* [Graphite][exporter-graphite] for stats
|
||||||
|
* [Honeycomb][exporter-honeycomb] for traces
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
![OpenCensus Overview](https://i.imgur.com/cf4ElHE.jpg)
|
||||||
|
|
||||||
|
In a microservices environment, a user request may go through
|
||||||
|
multiple services until there is a response. OpenCensus allows
|
||||||
|
you to instrument your services and collect diagnostics data all
|
||||||
|
through your services end-to-end.
|
||||||
|
|
||||||
|
## Tags
|
||||||
|
|
||||||
|
Tags represent propagated key-value pairs. They are propagated using `context.Context`
|
||||||
|
in the same process or can be encoded to be transmitted on the wire. Usually, this will
|
||||||
|
be handled by an integration plugin, e.g. `ocgrpc.ServerHandler` and `ocgrpc.ClientHandler`
|
||||||
|
for gRPC.
|
||||||
|
|
||||||
|
Package `tag` allows adding or modifying tags in the current context.
|
||||||
|
|
||||||
|
[embedmd]:# (internal/readme/tags.go new)
|
||||||
|
```go
|
||||||
|
ctx, err := tag.New(ctx,
|
||||||
|
tag.Insert(osKey, "macOS-10.12.5"),
|
||||||
|
tag.Upsert(userIDKey, "cde36753ed"),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Stats
|
||||||
|
|
||||||
|
OpenCensus is a low-overhead framework even if instrumentation is always enabled.
|
||||||
|
In order to be so, it is optimized to make recording of data points fast
|
||||||
|
and separate from the data aggregation.
|
||||||
|
|
||||||
|
OpenCensus stats collection happens in two stages:
|
||||||
|
|
||||||
|
* Definition of measures and recording of data points
|
||||||
|
* Definition of views and aggregation of the recorded data
|
||||||
|
|
||||||
|
### Recording
|
||||||
|
|
||||||
|
Measurements are data points associated with a measure.
|
||||||
|
Recording implicitly tags the set of Measurements with the tags from the
|
||||||
|
provided context:
|
||||||
|
|
||||||
|
[embedmd]:# (internal/readme/stats.go record)
|
||||||
|
```go
|
||||||
|
stats.Record(ctx, videoSize.M(102478))
|
||||||
|
```
|
||||||
|
|
||||||
|
### Views
|
||||||
|
|
||||||
|
Views are how Measures are aggregated. You can think of them as queries over the
|
||||||
|
set of recorded data points (measurements).
|
||||||
|
|
||||||
|
Views have two parts: the tags to group by and the aggregation type used.
|
||||||
|
|
||||||
|
Currently three types of aggregations are supported:
|
||||||
|
* CountAggregation is used to count the number of times a sample was recorded.
|
||||||
|
* DistributionAggregation is used to provide a histogram of the values of the samples.
|
||||||
|
* SumAggregation is used to sum up all sample values.
|
||||||
|
|
||||||
|
[embedmd]:# (internal/readme/stats.go aggs)
|
||||||
|
```go
|
||||||
|
distAgg := view.Distribution(1<<32, 2<<32, 3<<32)
|
||||||
|
countAgg := view.Count()
|
||||||
|
sumAgg := view.Sum()
|
||||||
|
```
|
||||||
|
|
||||||
|
Here we create a view with the DistributionAggregation over our measure.
|
||||||
|
|
||||||
|
[embedmd]:# (internal/readme/stats.go view)
|
||||||
|
```go
|
||||||
|
if err := view.Register(&view.View{
|
||||||
|
Name: "example.com/video_size_distribution",
|
||||||
|
Description: "distribution of processed video size over time",
|
||||||
|
Measure: videoSize,
|
||||||
|
Aggregation: view.Distribution(1<<32, 2<<32, 3<<32),
|
||||||
|
}); err != nil {
|
||||||
|
log.Fatalf("Failed to register view: %v", err)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Register begins collecting data for the view. Registered views' data will be
|
||||||
|
exported via the registered exporters.
|
||||||
|
|
||||||
|
## Traces
|
||||||
|
|
||||||
|
A distributed trace tracks the progression of a single user request as
|
||||||
|
it is handled by the services and processes that make up an application.
|
||||||
|
Each step is called a span in the trace. Spans include metadata about the step,
|
||||||
|
including especially the time spent in the step, called the span’s latency.
|
||||||
|
|
||||||
|
Below you see a trace and several spans underneath it.
|
||||||
|
|
||||||
|
![Traces and spans](https://i.imgur.com/7hZwRVj.png)
|
||||||
|
|
||||||
|
### Spans
|
||||||
|
|
||||||
|
Span is the unit step in a trace. Each span has a name, latency, status and
|
||||||
|
additional metadata.
|
||||||
|
|
||||||
|
Below we are starting a span for a cache read and ending it
|
||||||
|
when we are done:
|
||||||
|
|
||||||
|
[embedmd]:# (internal/readme/trace.go startend)
|
||||||
|
```go
|
||||||
|
ctx, span := trace.StartSpan(ctx, "cache.Get")
|
||||||
|
defer span.End()
|
||||||
|
|
||||||
|
// Do work to get from cache.
|
||||||
|
```
|
||||||
|
|
||||||
|
### Propagation
|
||||||
|
|
||||||
|
Spans can have parents or can be root spans if they don't have any parents.
|
||||||
|
The current span is propagated in-process and across the network to allow associating
|
||||||
|
new child spans with the parent.
|
||||||
|
|
||||||
|
In the same process, `context.Context` is used to propagate spans.
|
||||||
|
`trace.StartSpan` creates a new span as a root if the current context
|
||||||
|
doesn't contain a span. Or, it creates a child of the span that is
|
||||||
|
already in current context. The returned context can be used to keep
|
||||||
|
propagating the newly created span in the current context.
|
||||||
|
|
||||||
|
[embedmd]:# (internal/readme/trace.go startend)
|
||||||
|
```go
|
||||||
|
ctx, span := trace.StartSpan(ctx, "cache.Get")
|
||||||
|
defer span.End()
|
||||||
|
|
||||||
|
// Do work to get from cache.
|
||||||
|
```
|
||||||
|
|
||||||
|
Across the network, OpenCensus provides different propagation
|
||||||
|
methods for different protocols.
|
||||||
|
|
||||||
|
* gRPC integrations use the OpenCensus' [binary propagation format](https://godoc.org/go.opencensus.io/trace/propagation).
|
||||||
|
* HTTP integrations use Zipkin's [B3](https://github.com/openzipkin/b3-propagation)
|
||||||
|
by default but can be configured to use a custom propagation method by setting another
|
||||||
|
[propagation.HTTPFormat](https://godoc.org/go.opencensus.io/trace/propagation#HTTPFormat).
|
||||||
|
|
||||||
|
## Execution Tracer
|
||||||
|
|
||||||
|
With Go 1.11, OpenCensus Go will support integration with the Go execution tracer.
|
||||||
|
See [Debugging Latency in Go](https://medium.com/observability/debugging-latency-in-go-1-11-9f97a7910d68)
|
||||||
|
for an example of their mutual use.
|
||||||
|
|
||||||
|
## Profiles
|
||||||
|
|
||||||
|
OpenCensus tags can be applied as profiler labels
|
||||||
|
for users who are on Go 1.9 and above.
|
||||||
|
|
||||||
|
[embedmd]:# (internal/readme/tags.go profiler)
|
||||||
|
```go
|
||||||
|
ctx, err = tag.New(ctx,
|
||||||
|
tag.Insert(osKey, "macOS-10.12.5"),
|
||||||
|
tag.Insert(userIDKey, "fff0989878"),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
tag.Do(ctx, func(ctx context.Context) {
|
||||||
|
// Do work.
|
||||||
|
// When profiling is on, samples will be
|
||||||
|
// recorded with the key/values from the tag map.
|
||||||
|
})
|
||||||
|
```
|
||||||
|
|
||||||
|
A screenshot of the CPU profile from the program above:
|
||||||
|
|
||||||
|
![CPU profile](https://i.imgur.com/jBKjlkw.png)
|
||||||
|
|
||||||
|
## Deprecation Policy
|
||||||
|
|
||||||
|
Before version 1.0.0, the following deprecation policy will be observed:
|
||||||
|
|
||||||
|
No backwards-incompatible changes will be made except for the removal of symbols that have
|
||||||
|
been marked as *Deprecated* for at least one minor release (e.g. 0.9.0 to 0.10.0). A release
|
||||||
|
removing the *Deprecated* functionality will be made no sooner than 28 days after the first
|
||||||
|
release in which the functionality was marked *Deprecated*.
|
||||||
|
|
||||||
|
[travis-image]: https://travis-ci.org/census-instrumentation/opencensus-go.svg?branch=master
|
||||||
|
[travis-url]: https://travis-ci.org/census-instrumentation/opencensus-go
|
||||||
|
[appveyor-image]: https://ci.appveyor.com/api/projects/status/vgtt29ps1783ig38?svg=true
|
||||||
|
[appveyor-url]: https://ci.appveyor.com/project/opencensusgoteam/opencensus-go/branch/master
|
||||||
|
[godoc-image]: https://godoc.org/go.opencensus.io?status.svg
|
||||||
|
[godoc-url]: https://godoc.org/go.opencensus.io
|
||||||
|
[gitter-image]: https://badges.gitter.im/census-instrumentation/lobby.svg
|
||||||
|
[gitter-url]: https://gitter.im/census-instrumentation/lobby?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge
|
||||||
|
|
||||||
|
|
||||||
|
[new-ex]: https://godoc.org/go.opencensus.io/tag#example-NewMap
|
||||||
|
[new-replace-ex]: https://godoc.org/go.opencensus.io/tag#example-NewMap--Replace
|
||||||
|
|
||||||
|
[exporter-prom]: https://godoc.org/contrib.go.opencensus.io/exporter/prometheus
|
||||||
|
[exporter-stackdriver]: https://godoc.org/contrib.go.opencensus.io/exporter/stackdriver
|
||||||
|
[exporter-zipkin]: https://godoc.org/contrib.go.opencensus.io/exporter/zipkin
|
||||||
|
[exporter-jaeger]: https://godoc.org/contrib.go.opencensus.io/exporter/jaeger
|
||||||
|
[exporter-xray]: https://github.com/census-ecosystem/opencensus-go-exporter-aws
|
||||||
|
[exporter-datadog]: https://github.com/DataDog/opencensus-go-exporter-datadog
|
||||||
|
[exporter-graphite]: https://github.com/census-ecosystem/opencensus-go-exporter-graphite
|
||||||
|
[exporter-honeycomb]: https://github.com/honeycombio/opencensus-exporter
|
|
@ -0,0 +1,25 @@
|
||||||
|
version: "{build}"
|
||||||
|
|
||||||
|
platform: x64
|
||||||
|
|
||||||
|
clone_folder: c:\gopath\src\go.opencensus.io
|
||||||
|
|
||||||
|
environment:
|
||||||
|
GOPATH: 'c:\gopath'
|
||||||
|
GOVERSION: '1.11'
|
||||||
|
GO111MODULE: 'on'
|
||||||
|
CGO_ENABLED: '0' # See: https://github.com/appveyor/ci/issues/2613
|
||||||
|
|
||||||
|
install:
|
||||||
|
- set PATH=%GOPATH%\bin;c:\go\bin;%PATH%
|
||||||
|
- choco upgrade golang --version 1.11.5 # Temporary fix because of a go.sum bug in 1.11
|
||||||
|
- go version
|
||||||
|
- go env
|
||||||
|
|
||||||
|
build: false
|
||||||
|
deploy: false
|
||||||
|
|
||||||
|
test_script:
|
||||||
|
- cd %APPVEYOR_BUILD_FOLDER%
|
||||||
|
- go build -v .\...
|
||||||
|
- go test -v .\... # No -race because cgo is disabled
|
|
@ -0,0 +1,12 @@
|
||||||
|
module go.opencensus.io
|
||||||
|
|
||||||
|
require (
|
||||||
|
github.com/golang/protobuf v1.3.1
|
||||||
|
github.com/google/go-cmp v0.3.0
|
||||||
|
github.com/hashicorp/golang-lru v0.5.1
|
||||||
|
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09
|
||||||
|
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd // indirect
|
||||||
|
golang.org/x/text v0.3.2 // indirect
|
||||||
|
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb // indirect
|
||||||
|
google.golang.org/grpc v1.20.1
|
||||||
|
)
|
|
@ -0,0 +1,61 @@
|
||||||
|
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||||
|
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||||
|
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||||
|
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
|
||||||
|
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||||
|
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||||
|
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
|
||||||
|
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
|
github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
|
||||||
|
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
|
github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
|
||||||
|
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||||
|
github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
|
||||||
|
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||||
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
|
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
|
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||||
|
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||||
|
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||||
|
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628=
|
||||||
|
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
|
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09 h1:KaQtG+aDELoNmXYas3TVkGNYRuq8JQ1aa7LJt8EXVyo=
|
||||||
|
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
|
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA=
|
||||||
|
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTmV7VDcZyvRZ+QQXkXTZQ=
|
||||||
|
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6 h1:bjcUS9ztw9kFmmIxJInhon/0Is3p+EHBKNgquIzo1OI=
|
||||||
|
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
|
||||||
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd h1:r7DufRZuZbWB7j439YfAzP8RPDa9unLkpwQKUYbIMPI=
|
||||||
|
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
||||||
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
|
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
||||||
|
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||||
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
|
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
|
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
|
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||||
|
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||||
|
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||||
|
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||||
|
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||||
|
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc=
|
||||||
|
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||||
|
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb h1:i1Ppqkc3WQXikh8bXiwHqAN5Rv3/qDCcRk0/Otx73BY=
|
||||||
|
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||||
|
google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8=
|
||||||
|
google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8=
|
||||||
|
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||||
|
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||||
|
google.golang.org/grpc v1.20.1 h1:Hz2g2wirWK7H0qIIhGIqRGTuMwTE8HEKFnDZZ7lm9NU=
|
||||||
|
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||||
|
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
|
@ -0,0 +1,37 @@
|
||||||
|
// Copyright 2017, OpenCensus Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package internal // import "go.opencensus.io/internal"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
opencensus "go.opencensus.io"
|
||||||
|
)
|
||||||
|
|
||||||
|
// UserAgent is the user agent to be added to the outgoing
|
||||||
|
// requests from the exporters.
|
||||||
|
var UserAgent = fmt.Sprintf("opencensus-go/%s", opencensus.Version())
|
||||||
|
|
||||||
|
// MonotonicEndTime returns the end time at present
|
||||||
|
// but offset from start, monotonically.
|
||||||
|
//
|
||||||
|
// The monotonic clock is used in subtractions hence
|
||||||
|
// the duration since start added back to start gives
|
||||||
|
// end as a monotonic time.
|
||||||
|
// See https://golang.org/pkg/time/#hdr-Monotonic_Clocks
|
||||||
|
func MonotonicEndTime(start time.Time) time.Time {
|
||||||
|
return start.Add(time.Now().Sub(start))
|
||||||
|
}
|
|
@ -0,0 +1,50 @@
|
||||||
|
// Copyright 2017, OpenCensus Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package internal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
"unicode"
|
||||||
|
)
|
||||||
|
|
||||||
|
const labelKeySizeLimit = 100
|
||||||
|
|
||||||
|
// Sanitize returns a string that is trunacated to 100 characters if it's too
|
||||||
|
// long, and replaces non-alphanumeric characters to underscores.
|
||||||
|
func Sanitize(s string) string {
|
||||||
|
if len(s) == 0 {
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
if len(s) > labelKeySizeLimit {
|
||||||
|
s = s[:labelKeySizeLimit]
|
||||||
|
}
|
||||||
|
s = strings.Map(sanitizeRune, s)
|
||||||
|
if unicode.IsDigit(rune(s[0])) {
|
||||||
|
s = "key_" + s
|
||||||
|
}
|
||||||
|
if s[0] == '_' {
|
||||||
|
s = "key" + s
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// converts anything that is not a letter or digit to an underscore
|
||||||
|
func sanitizeRune(r rune) rune {
|
||||||
|
if unicode.IsLetter(r) || unicode.IsDigit(r) {
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
// Everything else turns into an underscore
|
||||||
|
return '_'
|
||||||
|
}
|
|
@ -0,0 +1,75 @@
|
||||||
|
// Copyright 2017, OpenCensus Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
//
|
||||||
|
|
||||||
|
// Package tagencoding contains the tag encoding
|
||||||
|
// used interally by the stats collector.
|
||||||
|
package tagencoding // import "go.opencensus.io/internal/tagencoding"
|
||||||
|
|
||||||
|
// Values represent the encoded buffer for the values.
|
||||||
|
type Values struct {
|
||||||
|
Buffer []byte
|
||||||
|
WriteIndex int
|
||||||
|
ReadIndex int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (vb *Values) growIfRequired(expected int) {
|
||||||
|
if len(vb.Buffer)-vb.WriteIndex < expected {
|
||||||
|
tmp := make([]byte, 2*(len(vb.Buffer)+1)+expected)
|
||||||
|
copy(tmp, vb.Buffer)
|
||||||
|
vb.Buffer = tmp
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteValue is the helper method to encode Values from map[Key][]byte.
|
||||||
|
func (vb *Values) WriteValue(v []byte) {
|
||||||
|
length := len(v) & 0xff
|
||||||
|
vb.growIfRequired(1 + length)
|
||||||
|
|
||||||
|
// writing length of v
|
||||||
|
vb.Buffer[vb.WriteIndex] = byte(length)
|
||||||
|
vb.WriteIndex++
|
||||||
|
|
||||||
|
if length == 0 {
|
||||||
|
// No value was encoded for this key
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// writing v
|
||||||
|
copy(vb.Buffer[vb.WriteIndex:], v[:length])
|
||||||
|
vb.WriteIndex += length
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadValue is the helper method to decode Values to a map[Key][]byte.
|
||||||
|
func (vb *Values) ReadValue() []byte {
|
||||||
|
// read length of v
|
||||||
|
length := int(vb.Buffer[vb.ReadIndex])
|
||||||
|
vb.ReadIndex++
|
||||||
|
if length == 0 {
|
||||||
|
// No value was encoded for this key
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// read value of v
|
||||||
|
v := make([]byte, length)
|
||||||
|
endIdx := vb.ReadIndex + length
|
||||||
|
copy(v, vb.Buffer[vb.ReadIndex:endIdx])
|
||||||
|
vb.ReadIndex = endIdx
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bytes returns a reference to already written bytes in the Buffer.
|
||||||
|
func (vb *Values) Bytes() []byte {
|
||||||
|
return vb.Buffer[:vb.WriteIndex]
|
||||||
|
}
|
|
@ -0,0 +1,53 @@
|
||||||
|
// Copyright 2017, OpenCensus Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package internal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Trace allows internal access to some trace functionality.
|
||||||
|
// TODO(#412): remove this
|
||||||
|
var Trace interface{}
|
||||||
|
|
||||||
|
// LocalSpanStoreEnabled true if the local span store is enabled.
|
||||||
|
var LocalSpanStoreEnabled bool
|
||||||
|
|
||||||
|
// BucketConfiguration stores the number of samples to store for span buckets
|
||||||
|
// for successful and failed spans for a particular span name.
|
||||||
|
type BucketConfiguration struct {
|
||||||
|
Name string
|
||||||
|
MaxRequestsSucceeded int
|
||||||
|
MaxRequestsErrors int
|
||||||
|
}
|
||||||
|
|
||||||
|
// PerMethodSummary is a summary of the spans stored for a single span name.
|
||||||
|
type PerMethodSummary struct {
|
||||||
|
Active int
|
||||||
|
LatencyBuckets []LatencyBucketSummary
|
||||||
|
ErrorBuckets []ErrorBucketSummary
|
||||||
|
}
|
||||||
|
|
||||||
|
// LatencyBucketSummary is a summary of a latency bucket.
|
||||||
|
type LatencyBucketSummary struct {
|
||||||
|
MinLatency, MaxLatency time.Duration
|
||||||
|
Size int
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorBucketSummary is a summary of an error bucket.
|
||||||
|
type ErrorBucketSummary struct {
|
||||||
|
ErrorCode int32
|
||||||
|
Size int
|
||||||
|
}
|
|
@ -0,0 +1,19 @@
|
||||||
|
// Copyright 2018, OpenCensus Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Package metricdata contains the metrics data model.
|
||||||
|
//
|
||||||
|
// This is an EXPERIMENTAL package, and may change in arbitrary ways without
|
||||||
|
// notice.
|
||||||
|
package metricdata // import "go.opencensus.io/metric/metricdata"
|
|
@ -0,0 +1,38 @@
|
||||||
|
// Copyright 2018, OpenCensus Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package metricdata
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Exemplars keys.
|
||||||
|
const (
|
||||||
|
AttachmentKeySpanContext = "SpanContext"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Exemplar is an example data point associated with each bucket of a
|
||||||
|
// distribution type aggregation.
|
||||||
|
//
|
||||||
|
// Their purpose is to provide an example of the kind of thing
|
||||||
|
// (request, RPC, trace span, etc.) that resulted in that measurement.
|
||||||
|
type Exemplar struct {
|
||||||
|
Value float64 // the value that was recorded
|
||||||
|
Timestamp time.Time // the time the value was recorded
|
||||||
|
Attachments Attachments // attachments (if any)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Attachments is a map of extra values associated with a recorded data point.
|
||||||
|
type Attachments map[string]interface{}
|
|
@ -0,0 +1,35 @@
|
||||||
|
// Copyright 2018, OpenCensus Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package metricdata
|
||||||
|
|
||||||
|
// LabelKey represents key of a label. It has optional
|
||||||
|
// description attribute.
|
||||||
|
type LabelKey struct {
|
||||||
|
Key string
|
||||||
|
Description string
|
||||||
|
}
|
||||||
|
|
||||||
|
// LabelValue represents the value of a label.
|
||||||
|
// The zero value represents a missing label value, which may be treated
|
||||||
|
// differently to an empty string value by some back ends.
|
||||||
|
type LabelValue struct {
|
||||||
|
Value string // string value of the label
|
||||||
|
Present bool // flag that indicated whether a value is present or not
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewLabelValue creates a new non-nil LabelValue that represents the given string.
|
||||||
|
func NewLabelValue(val string) LabelValue {
|
||||||
|
return LabelValue{Value: val, Present: true}
|
||||||
|
}
|
|
@ -0,0 +1,46 @@
|
||||||
|
// Copyright 2018, OpenCensus Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package metricdata
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"go.opencensus.io/resource"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Descriptor holds metadata about a metric.
|
||||||
|
type Descriptor struct {
|
||||||
|
Name string // full name of the metric
|
||||||
|
Description string // human-readable description
|
||||||
|
Unit Unit // units for the measure
|
||||||
|
Type Type // type of measure
|
||||||
|
LabelKeys []LabelKey // label keys
|
||||||
|
}
|
||||||
|
|
||||||
|
// Metric represents a quantity measured against a resource with different
|
||||||
|
// label value combinations.
|
||||||
|
type Metric struct {
|
||||||
|
Descriptor Descriptor // metric descriptor
|
||||||
|
Resource *resource.Resource // resource against which this was measured
|
||||||
|
TimeSeries []*TimeSeries // one time series for each combination of label values
|
||||||
|
}
|
||||||
|
|
||||||
|
// TimeSeries is a sequence of points associated with a combination of label
|
||||||
|
// values.
|
||||||
|
type TimeSeries struct {
|
||||||
|
LabelValues []LabelValue // label values, same order as keys in the metric descriptor
|
||||||
|
Points []Point // points sequence
|
||||||
|
StartTime time.Time // time we started recording this time series
|
||||||
|
}
|
|
@ -0,0 +1,193 @@
|
||||||
|
// Copyright 2018, OpenCensus Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package metricdata
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Point is a single data point of a time series.
|
||||||
|
type Point struct {
|
||||||
|
// Time is the point in time that this point represents in a time series.
|
||||||
|
Time time.Time
|
||||||
|
// Value is the value of this point. Prefer using ReadValue to switching on
|
||||||
|
// the value type, since new value types might be added.
|
||||||
|
Value interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
//go:generate stringer -type ValueType
|
||||||
|
|
||||||
|
// NewFloat64Point creates a new Point holding a float64 value.
|
||||||
|
func NewFloat64Point(t time.Time, val float64) Point {
|
||||||
|
return Point{
|
||||||
|
Value: val,
|
||||||
|
Time: t,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewInt64Point creates a new Point holding an int64 value.
|
||||||
|
func NewInt64Point(t time.Time, val int64) Point {
|
||||||
|
return Point{
|
||||||
|
Value: val,
|
||||||
|
Time: t,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDistributionPoint creates a new Point holding a Distribution value.
|
||||||
|
func NewDistributionPoint(t time.Time, val *Distribution) Point {
|
||||||
|
return Point{
|
||||||
|
Value: val,
|
||||||
|
Time: t,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSummaryPoint creates a new Point holding a Summary value.
|
||||||
|
func NewSummaryPoint(t time.Time, val *Summary) Point {
|
||||||
|
return Point{
|
||||||
|
Value: val,
|
||||||
|
Time: t,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValueVisitor allows reading the value of a point.
|
||||||
|
type ValueVisitor interface {
|
||||||
|
VisitFloat64Value(float64)
|
||||||
|
VisitInt64Value(int64)
|
||||||
|
VisitDistributionValue(*Distribution)
|
||||||
|
VisitSummaryValue(*Summary)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadValue accepts a ValueVisitor and calls the appropriate method with the
|
||||||
|
// value of this point.
|
||||||
|
// Consumers of Point should use this in preference to switching on the type
|
||||||
|
// of the value directly, since new value types may be added.
|
||||||
|
func (p Point) ReadValue(vv ValueVisitor) {
|
||||||
|
switch v := p.Value.(type) {
|
||||||
|
case int64:
|
||||||
|
vv.VisitInt64Value(v)
|
||||||
|
case float64:
|
||||||
|
vv.VisitFloat64Value(v)
|
||||||
|
case *Distribution:
|
||||||
|
vv.VisitDistributionValue(v)
|
||||||
|
case *Summary:
|
||||||
|
vv.VisitSummaryValue(v)
|
||||||
|
default:
|
||||||
|
panic("unexpected value type")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Distribution contains summary statistics for a population of values. It
|
||||||
|
// optionally contains a histogram representing the distribution of those
|
||||||
|
// values across a set of buckets.
|
||||||
|
type Distribution struct {
|
||||||
|
// Count is the number of values in the population. Must be non-negative. This value
|
||||||
|
// must equal the sum of the values in bucket_counts if a histogram is
|
||||||
|
// provided.
|
||||||
|
Count int64
|
||||||
|
// Sum is the sum of the values in the population. If count is zero then this field
|
||||||
|
// must be zero.
|
||||||
|
Sum float64
|
||||||
|
// SumOfSquaredDeviation is the sum of squared deviations from the mean of the values in the
|
||||||
|
// population. For values x_i this is:
|
||||||
|
//
|
||||||
|
// Sum[i=1..n]((x_i - mean)^2)
|
||||||
|
//
|
||||||
|
// Knuth, "The Art of Computer Programming", Vol. 2, page 323, 3rd edition
|
||||||
|
// describes Welford's method for accumulating this sum in one pass.
|
||||||
|
//
|
||||||
|
// If count is zero then this field must be zero.
|
||||||
|
SumOfSquaredDeviation float64
|
||||||
|
// BucketOptions describes the bounds of the histogram buckets in this
|
||||||
|
// distribution.
|
||||||
|
//
|
||||||
|
// A Distribution may optionally contain a histogram of the values in the
|
||||||
|
// population.
|
||||||
|
//
|
||||||
|
// If nil, there is no associated histogram.
|
||||||
|
BucketOptions *BucketOptions
|
||||||
|
// Bucket If the distribution does not have a histogram, then omit this field.
|
||||||
|
// If there is a histogram, then the sum of the values in the Bucket counts
|
||||||
|
// must equal the value in the count field of the distribution.
|
||||||
|
Buckets []Bucket
|
||||||
|
}
|
||||||
|
|
||||||
|
// BucketOptions describes the bounds of the histogram buckets in this
|
||||||
|
// distribution.
|
||||||
|
type BucketOptions struct {
|
||||||
|
// Bounds specifies a set of bucket upper bounds.
|
||||||
|
// This defines len(bounds) + 1 (= N) buckets. The boundaries for bucket
|
||||||
|
// index i are:
|
||||||
|
//
|
||||||
|
// [0, Bounds[i]) for i == 0
|
||||||
|
// [Bounds[i-1], Bounds[i]) for 0 < i < N-1
|
||||||
|
// [Bounds[i-1], +infinity) for i == N-1
|
||||||
|
Bounds []float64
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bucket represents a single bucket (value range) in a distribution.
|
||||||
|
type Bucket struct {
|
||||||
|
// Count is the number of values in each bucket of the histogram, as described in
|
||||||
|
// bucket_bounds.
|
||||||
|
Count int64
|
||||||
|
// Exemplar associated with this bucket (if any).
|
||||||
|
Exemplar *Exemplar
|
||||||
|
}
|
||||||
|
|
||||||
|
// Summary is a representation of percentiles.
|
||||||
|
type Summary struct {
|
||||||
|
// Count is the cumulative count (if available).
|
||||||
|
Count int64
|
||||||
|
// Sum is the cumulative sum of values (if available).
|
||||||
|
Sum float64
|
||||||
|
// HasCountAndSum is true if Count and Sum are available.
|
||||||
|
HasCountAndSum bool
|
||||||
|
// Snapshot represents percentiles calculated over an arbitrary time window.
|
||||||
|
// The values in this struct can be reset at arbitrary unknown times, with
|
||||||
|
// the requirement that all of them are reset at the same time.
|
||||||
|
Snapshot Snapshot
|
||||||
|
}
|
||||||
|
|
||||||
|
// Snapshot represents percentiles over an arbitrary time.
|
||||||
|
// The values in this struct can be reset at arbitrary unknown times, with
|
||||||
|
// the requirement that all of them are reset at the same time.
|
||||||
|
type Snapshot struct {
|
||||||
|
// Count is the number of values in the snapshot. Optional since some systems don't
|
||||||
|
// expose this. Set to 0 if not available.
|
||||||
|
Count int64
|
||||||
|
// Sum is the sum of values in the snapshot. Optional since some systems don't
|
||||||
|
// expose this. If count is 0 then this field must be zero.
|
||||||
|
Sum float64
|
||||||
|
// Percentiles is a map from percentile (range (0-100.0]) to the value of
|
||||||
|
// the percentile.
|
||||||
|
Percentiles map[float64]float64
|
||||||
|
}
|
||||||
|
|
||||||
|
//go:generate stringer -type Type
|
||||||
|
|
||||||
|
// Type is the overall type of metric, including its value type and whether it
|
||||||
|
// represents a cumulative total (since the start time) or if it represents a
|
||||||
|
// gauge value.
|
||||||
|
type Type int
|
||||||
|
|
||||||
|
// Metric types.
|
||||||
|
const (
|
||||||
|
TypeGaugeInt64 Type = iota
|
||||||
|
TypeGaugeFloat64
|
||||||
|
TypeGaugeDistribution
|
||||||
|
TypeCumulativeInt64
|
||||||
|
TypeCumulativeFloat64
|
||||||
|
TypeCumulativeDistribution
|
||||||
|
TypeSummary
|
||||||
|
)
|
|
@ -0,0 +1,16 @@
|
||||||
|
// Code generated by "stringer -type Type"; DO NOT EDIT.
|
||||||
|
|
||||||
|
package metricdata
|
||||||
|
|
||||||
|
import "strconv"
|
||||||
|
|
||||||
|
const _Type_name = "TypeGaugeInt64TypeGaugeFloat64TypeGaugeDistributionTypeCumulativeInt64TypeCumulativeFloat64TypeCumulativeDistributionTypeSummary"
|
||||||
|
|
||||||
|
var _Type_index = [...]uint8{0, 14, 30, 51, 70, 91, 117, 128}
|
||||||
|
|
||||||
|
func (i Type) String() string {
|
||||||
|
if i < 0 || i >= Type(len(_Type_index)-1) {
|
||||||
|
return "Type(" + strconv.FormatInt(int64(i), 10) + ")"
|
||||||
|
}
|
||||||
|
return _Type_name[_Type_index[i]:_Type_index[i+1]]
|
||||||
|
}
|
|
@ -0,0 +1,27 @@
|
||||||
|
// Copyright 2018, OpenCensus Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package metricdata
|
||||||
|
|
||||||
|
// Unit is a string encoded according to the case-sensitive abbreviations from the
|
||||||
|
// Unified Code for Units of Measure: http://unitsofmeasure.org/ucum.html
|
||||||
|
type Unit string
|
||||||
|
|
||||||
|
// Predefined units. To record against a unit not represented here, create your
|
||||||
|
// own Unit type constant from a string.
|
||||||
|
const (
|
||||||
|
UnitDimensionless Unit = "1"
|
||||||
|
UnitBytes Unit = "By"
|
||||||
|
UnitMilliseconds Unit = "ms"
|
||||||
|
)
|
|
@ -0,0 +1,78 @@
|
||||||
|
// Copyright 2019, OpenCensus Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package metricproducer
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Manager maintains a list of active producers. Producers can register
|
||||||
|
// with the manager to allow readers to read all metrics provided by them.
|
||||||
|
// Readers can retrieve all producers registered with the manager,
|
||||||
|
// read metrics from the producers and export them.
|
||||||
|
type Manager struct {
|
||||||
|
mu sync.RWMutex
|
||||||
|
producers map[Producer]struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
var prodMgr *Manager
|
||||||
|
var once sync.Once
|
||||||
|
|
||||||
|
// GlobalManager is a single instance of producer manager
|
||||||
|
// that is used by all producers and all readers.
|
||||||
|
func GlobalManager() *Manager {
|
||||||
|
once.Do(func() {
|
||||||
|
prodMgr = &Manager{}
|
||||||
|
prodMgr.producers = make(map[Producer]struct{})
|
||||||
|
})
|
||||||
|
return prodMgr
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddProducer adds the producer to the Manager if it is not already present.
|
||||||
|
func (pm *Manager) AddProducer(producer Producer) {
|
||||||
|
if producer == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
pm.mu.Lock()
|
||||||
|
defer pm.mu.Unlock()
|
||||||
|
pm.producers[producer] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteProducer deletes the producer from the Manager if it is present.
|
||||||
|
func (pm *Manager) DeleteProducer(producer Producer) {
|
||||||
|
if producer == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
pm.mu.Lock()
|
||||||
|
defer pm.mu.Unlock()
|
||||||
|
delete(pm.producers, producer)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetAll returns a slice of all producer currently registered with
|
||||||
|
// the Manager. For each call it generates a new slice. The slice
|
||||||
|
// should not be cached as registration may change at any time. It is
|
||||||
|
// typically called periodically by exporter to read metrics from
|
||||||
|
// the producers.
|
||||||
|
func (pm *Manager) GetAll() []Producer {
|
||||||
|
pm.mu.Lock()
|
||||||
|
defer pm.mu.Unlock()
|
||||||
|
producers := make([]Producer, len(pm.producers))
|
||||||
|
i := 0
|
||||||
|
for producer := range pm.producers {
|
||||||
|
producers[i] = producer
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
return producers
|
||||||
|
}
|
|
@ -0,0 +1,28 @@
|
||||||
|
// Copyright 2019, OpenCensus Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package metricproducer
|
||||||
|
|
||||||
|
import (
|
||||||
|
"go.opencensus.io/metric/metricdata"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Producer is a source of metrics.
|
||||||
|
type Producer interface {
|
||||||
|
// Read should return the current values of all metrics supported by this
|
||||||
|
// metric provider.
|
||||||
|
// The returned metrics should be unique for each combination of name and
|
||||||
|
// resource.
|
||||||
|
Read() []*metricdata.Metric
|
||||||
|
}
|
|
@ -0,0 +1,21 @@
|
||||||
|
// Copyright 2017, OpenCensus Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Package opencensus contains Go support for OpenCensus.
|
||||||
|
package opencensus // import "go.opencensus.io"
|
||||||
|
|
||||||
|
// Version is the current release version of OpenCensus in use.
|
||||||
|
func Version() string {
|
||||||
|
return "0.23.0"
|
||||||
|
}
|
|
@ -0,0 +1,117 @@
|
||||||
|
// Copyright 2018, OpenCensus Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package ochttp
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
"net/http/httptrace"
|
||||||
|
|
||||||
|
"go.opencensus.io/trace"
|
||||||
|
"go.opencensus.io/trace/propagation"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Transport is an http.RoundTripper that instruments all outgoing requests with
|
||||||
|
// OpenCensus stats and tracing.
|
||||||
|
//
|
||||||
|
// The zero value is intended to be a useful default, but for
|
||||||
|
// now it's recommended that you explicitly set Propagation, since the default
|
||||||
|
// for this may change.
|
||||||
|
type Transport struct {
|
||||||
|
// Base may be set to wrap another http.RoundTripper that does the actual
|
||||||
|
// requests. By default http.DefaultTransport is used.
|
||||||
|
//
|
||||||
|
// If base HTTP roundtripper implements CancelRequest,
|
||||||
|
// the returned round tripper will be cancelable.
|
||||||
|
Base http.RoundTripper
|
||||||
|
|
||||||
|
// Propagation defines how traces are propagated. If unspecified, a default
|
||||||
|
// (currently B3 format) will be used.
|
||||||
|
Propagation propagation.HTTPFormat
|
||||||
|
|
||||||
|
// StartOptions are applied to the span started by this Transport around each
|
||||||
|
// request.
|
||||||
|
//
|
||||||
|
// StartOptions.SpanKind will always be set to trace.SpanKindClient
|
||||||
|
// for spans started by this transport.
|
||||||
|
StartOptions trace.StartOptions
|
||||||
|
|
||||||
|
// GetStartOptions allows to set start options per request. If set,
|
||||||
|
// StartOptions is going to be ignored.
|
||||||
|
GetStartOptions func(*http.Request) trace.StartOptions
|
||||||
|
|
||||||
|
// NameFromRequest holds the function to use for generating the span name
|
||||||
|
// from the information found in the outgoing HTTP Request. By default the
|
||||||
|
// name equals the URL Path.
|
||||||
|
FormatSpanName func(*http.Request) string
|
||||||
|
|
||||||
|
// NewClientTrace may be set to a function allowing the current *trace.Span
|
||||||
|
// to be annotated with HTTP request event information emitted by the
|
||||||
|
// httptrace package.
|
||||||
|
NewClientTrace func(*http.Request, *trace.Span) *httptrace.ClientTrace
|
||||||
|
|
||||||
|
// TODO: Implement tag propagation for HTTP.
|
||||||
|
}
|
||||||
|
|
||||||
|
// RoundTrip implements http.RoundTripper, delegating to Base and recording stats and traces for the request.
|
||||||
|
func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||||
|
rt := t.base()
|
||||||
|
if isHealthEndpoint(req.URL.Path) {
|
||||||
|
return rt.RoundTrip(req)
|
||||||
|
}
|
||||||
|
// TODO: remove excessive nesting of http.RoundTrippers here.
|
||||||
|
format := t.Propagation
|
||||||
|
if format == nil {
|
||||||
|
format = defaultFormat
|
||||||
|
}
|
||||||
|
spanNameFormatter := t.FormatSpanName
|
||||||
|
if spanNameFormatter == nil {
|
||||||
|
spanNameFormatter = spanNameFromURL
|
||||||
|
}
|
||||||
|
|
||||||
|
startOpts := t.StartOptions
|
||||||
|
if t.GetStartOptions != nil {
|
||||||
|
startOpts = t.GetStartOptions(req)
|
||||||
|
}
|
||||||
|
|
||||||
|
rt = &traceTransport{
|
||||||
|
base: rt,
|
||||||
|
format: format,
|
||||||
|
startOptions: trace.StartOptions{
|
||||||
|
Sampler: startOpts.Sampler,
|
||||||
|
SpanKind: trace.SpanKindClient,
|
||||||
|
},
|
||||||
|
formatSpanName: spanNameFormatter,
|
||||||
|
newClientTrace: t.NewClientTrace,
|
||||||
|
}
|
||||||
|
rt = statsTransport{base: rt}
|
||||||
|
return rt.RoundTrip(req)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Transport) base() http.RoundTripper {
|
||||||
|
if t.Base != nil {
|
||||||
|
return t.Base
|
||||||
|
}
|
||||||
|
return http.DefaultTransport
|
||||||
|
}
|
||||||
|
|
||||||
|
// CancelRequest cancels an in-flight request by closing its connection.
|
||||||
|
func (t *Transport) CancelRequest(req *http.Request) {
|
||||||
|
type canceler interface {
|
||||||
|
CancelRequest(*http.Request)
|
||||||
|
}
|
||||||
|
if cr, ok := t.base().(canceler); ok {
|
||||||
|
cr.CancelRequest(req)
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,143 @@
|
||||||
|
// Copyright 2018, OpenCensus Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package ochttp
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"strconv"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"go.opencensus.io/stats"
|
||||||
|
"go.opencensus.io/tag"
|
||||||
|
)
|
||||||
|
|
||||||
|
// statsTransport is an http.RoundTripper that collects stats for the outgoing requests.
|
||||||
|
type statsTransport struct {
|
||||||
|
base http.RoundTripper
|
||||||
|
}
|
||||||
|
|
||||||
|
// RoundTrip implements http.RoundTripper, delegating to Base and recording stats for the request.
|
||||||
|
func (t statsTransport) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||||
|
ctx, _ := tag.New(req.Context(),
|
||||||
|
tag.Upsert(KeyClientHost, req.Host),
|
||||||
|
tag.Upsert(Host, req.Host),
|
||||||
|
tag.Upsert(KeyClientPath, req.URL.Path),
|
||||||
|
tag.Upsert(Path, req.URL.Path),
|
||||||
|
tag.Upsert(KeyClientMethod, req.Method),
|
||||||
|
tag.Upsert(Method, req.Method))
|
||||||
|
req = req.WithContext(ctx)
|
||||||
|
track := &tracker{
|
||||||
|
start: time.Now(),
|
||||||
|
ctx: ctx,
|
||||||
|
}
|
||||||
|
if req.Body == nil {
|
||||||
|
// TODO: Handle cases where ContentLength is not set.
|
||||||
|
track.reqSize = -1
|
||||||
|
} else if req.ContentLength > 0 {
|
||||||
|
track.reqSize = req.ContentLength
|
||||||
|
}
|
||||||
|
stats.Record(ctx, ClientRequestCount.M(1))
|
||||||
|
|
||||||
|
// Perform request.
|
||||||
|
resp, err := t.base.RoundTrip(req)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
track.statusCode = http.StatusInternalServerError
|
||||||
|
track.end()
|
||||||
|
} else {
|
||||||
|
track.statusCode = resp.StatusCode
|
||||||
|
if req.Method != "HEAD" {
|
||||||
|
track.respContentLength = resp.ContentLength
|
||||||
|
}
|
||||||
|
if resp.Body == nil {
|
||||||
|
track.end()
|
||||||
|
} else {
|
||||||
|
track.body = resp.Body
|
||||||
|
resp.Body = wrappedBody(track, resp.Body)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// CancelRequest cancels an in-flight request by closing its connection.
|
||||||
|
func (t statsTransport) CancelRequest(req *http.Request) {
|
||||||
|
type canceler interface {
|
||||||
|
CancelRequest(*http.Request)
|
||||||
|
}
|
||||||
|
if cr, ok := t.base.(canceler); ok {
|
||||||
|
cr.CancelRequest(req)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type tracker struct {
|
||||||
|
ctx context.Context
|
||||||
|
respSize int64
|
||||||
|
respContentLength int64
|
||||||
|
reqSize int64
|
||||||
|
start time.Time
|
||||||
|
body io.ReadCloser
|
||||||
|
statusCode int
|
||||||
|
endOnce sync.Once
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ io.ReadCloser = (*tracker)(nil)
|
||||||
|
|
||||||
|
func (t *tracker) end() {
|
||||||
|
t.endOnce.Do(func() {
|
||||||
|
latencyMs := float64(time.Since(t.start)) / float64(time.Millisecond)
|
||||||
|
respSize := t.respSize
|
||||||
|
if t.respSize == 0 && t.respContentLength > 0 {
|
||||||
|
respSize = t.respContentLength
|
||||||
|
}
|
||||||
|
m := []stats.Measurement{
|
||||||
|
ClientSentBytes.M(t.reqSize),
|
||||||
|
ClientReceivedBytes.M(respSize),
|
||||||
|
ClientRoundtripLatency.M(latencyMs),
|
||||||
|
ClientLatency.M(latencyMs),
|
||||||
|
ClientResponseBytes.M(t.respSize),
|
||||||
|
}
|
||||||
|
if t.reqSize >= 0 {
|
||||||
|
m = append(m, ClientRequestBytes.M(t.reqSize))
|
||||||
|
}
|
||||||
|
|
||||||
|
stats.RecordWithTags(t.ctx, []tag.Mutator{
|
||||||
|
tag.Upsert(StatusCode, strconv.Itoa(t.statusCode)),
|
||||||
|
tag.Upsert(KeyClientStatus, strconv.Itoa(t.statusCode)),
|
||||||
|
}, m...)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *tracker) Read(b []byte) (int, error) {
|
||||||
|
n, err := t.body.Read(b)
|
||||||
|
t.respSize += int64(n)
|
||||||
|
switch err {
|
||||||
|
case nil:
|
||||||
|
return n, nil
|
||||||
|
case io.EOF:
|
||||||
|
t.end()
|
||||||
|
}
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *tracker) Close() error {
|
||||||
|
// Invoking endSpan on Close will help catch the cases
|
||||||
|
// in which a read returned a non-nil error, we set the
|
||||||
|
// span status but didn't end the span.
|
||||||
|
t.end()
|
||||||
|
return t.body.Close()
|
||||||
|
}
|
|
@ -0,0 +1,19 @@
|
||||||
|
// Copyright 2018, OpenCensus Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Package ochttp provides OpenCensus instrumentation for net/http package.
|
||||||
|
//
|
||||||
|
// For server instrumentation, see Handler. For client-side instrumentation,
|
||||||
|
// see Transport.
|
||||||
|
package ochttp // import "go.opencensus.io/plugin/ochttp"
|
|
@ -0,0 +1,123 @@
|
||||||
|
// Copyright 2018, OpenCensus Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Package b3 contains a propagation.HTTPFormat implementation
|
||||||
|
// for B3 propagation. See https://github.com/openzipkin/b3-propagation
|
||||||
|
// for more details.
|
||||||
|
package b3 // import "go.opencensus.io/plugin/ochttp/propagation/b3"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/hex"
|
||||||
|
"net/http"
|
||||||
|
|
||||||
|
"go.opencensus.io/trace"
|
||||||
|
"go.opencensus.io/trace/propagation"
|
||||||
|
)
|
||||||
|
|
||||||
|
// B3 headers that OpenCensus understands.
|
||||||
|
const (
|
||||||
|
TraceIDHeader = "X-B3-TraceId"
|
||||||
|
SpanIDHeader = "X-B3-SpanId"
|
||||||
|
SampledHeader = "X-B3-Sampled"
|
||||||
|
)
|
||||||
|
|
||||||
|
// HTTPFormat implements propagation.HTTPFormat to propagate
|
||||||
|
// traces in HTTP headers in B3 propagation format.
|
||||||
|
// HTTPFormat skips the X-B3-ParentId and X-B3-Flags headers
|
||||||
|
// because there are additional fields not represented in the
|
||||||
|
// OpenCensus span context. Spans created from the incoming
|
||||||
|
// header will be the direct children of the client-side span.
|
||||||
|
// Similarly, receiver of the outgoing spans should use client-side
|
||||||
|
// span created by OpenCensus as the parent.
|
||||||
|
type HTTPFormat struct{}
|
||||||
|
|
||||||
|
var _ propagation.HTTPFormat = (*HTTPFormat)(nil)
|
||||||
|
|
||||||
|
// SpanContextFromRequest extracts a B3 span context from incoming requests.
|
||||||
|
func (f *HTTPFormat) SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) {
|
||||||
|
tid, ok := ParseTraceID(req.Header.Get(TraceIDHeader))
|
||||||
|
if !ok {
|
||||||
|
return trace.SpanContext{}, false
|
||||||
|
}
|
||||||
|
sid, ok := ParseSpanID(req.Header.Get(SpanIDHeader))
|
||||||
|
if !ok {
|
||||||
|
return trace.SpanContext{}, false
|
||||||
|
}
|
||||||
|
sampled, _ := ParseSampled(req.Header.Get(SampledHeader))
|
||||||
|
return trace.SpanContext{
|
||||||
|
TraceID: tid,
|
||||||
|
SpanID: sid,
|
||||||
|
TraceOptions: sampled,
|
||||||
|
}, true
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseTraceID parses the value of the X-B3-TraceId header.
|
||||||
|
func ParseTraceID(tid string) (trace.TraceID, bool) {
|
||||||
|
if tid == "" {
|
||||||
|
return trace.TraceID{}, false
|
||||||
|
}
|
||||||
|
b, err := hex.DecodeString(tid)
|
||||||
|
if err != nil {
|
||||||
|
return trace.TraceID{}, false
|
||||||
|
}
|
||||||
|
var traceID trace.TraceID
|
||||||
|
if len(b) <= 8 {
|
||||||
|
// The lower 64-bits.
|
||||||
|
start := 8 + (8 - len(b))
|
||||||
|
copy(traceID[start:], b)
|
||||||
|
} else {
|
||||||
|
start := 16 - len(b)
|
||||||
|
copy(traceID[start:], b)
|
||||||
|
}
|
||||||
|
|
||||||
|
return traceID, true
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseSpanID parses the value of the X-B3-SpanId or X-B3-ParentSpanId headers.
|
||||||
|
func ParseSpanID(sid string) (spanID trace.SpanID, ok bool) {
|
||||||
|
if sid == "" {
|
||||||
|
return trace.SpanID{}, false
|
||||||
|
}
|
||||||
|
b, err := hex.DecodeString(sid)
|
||||||
|
if err != nil {
|
||||||
|
return trace.SpanID{}, false
|
||||||
|
}
|
||||||
|
start := 8 - len(b)
|
||||||
|
copy(spanID[start:], b)
|
||||||
|
return spanID, true
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseSampled parses the value of the X-B3-Sampled header.
|
||||||
|
func ParseSampled(sampled string) (trace.TraceOptions, bool) {
|
||||||
|
switch sampled {
|
||||||
|
case "true", "1":
|
||||||
|
return trace.TraceOptions(1), true
|
||||||
|
default:
|
||||||
|
return trace.TraceOptions(0), false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SpanContextToRequest modifies the given request to include B3 headers.
|
||||||
|
func (f *HTTPFormat) SpanContextToRequest(sc trace.SpanContext, req *http.Request) {
|
||||||
|
req.Header.Set(TraceIDHeader, hex.EncodeToString(sc.TraceID[:]))
|
||||||
|
req.Header.Set(SpanIDHeader, hex.EncodeToString(sc.SpanID[:]))
|
||||||
|
|
||||||
|
var sampled string
|
||||||
|
if sc.IsSampled() {
|
||||||
|
sampled = "1"
|
||||||
|
} else {
|
||||||
|
sampled = "0"
|
||||||
|
}
|
||||||
|
req.Header.Set(SampledHeader, sampled)
|
||||||
|
}
|
|
@ -0,0 +1,61 @@
|
||||||
|
// Copyright 2018, OpenCensus Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package ochttp
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"net/http"
|
||||||
|
|
||||||
|
"go.opencensus.io/tag"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SetRoute sets the http_server_route tag to the given value.
|
||||||
|
// It's useful when an HTTP framework does not support the http.Handler interface
|
||||||
|
// and using WithRouteTag is not an option, but provides a way to hook into the request flow.
|
||||||
|
func SetRoute(ctx context.Context, route string) {
|
||||||
|
if a, ok := ctx.Value(addedTagsKey{}).(*addedTags); ok {
|
||||||
|
a.t = append(a.t, tag.Upsert(KeyServerRoute, route))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithRouteTag returns an http.Handler that records stats with the
|
||||||
|
// http_server_route tag set to the given value.
|
||||||
|
func WithRouteTag(handler http.Handler, route string) http.Handler {
|
||||||
|
return taggedHandlerFunc(func(w http.ResponseWriter, r *http.Request) []tag.Mutator {
|
||||||
|
addRoute := []tag.Mutator{tag.Upsert(KeyServerRoute, route)}
|
||||||
|
ctx, _ := tag.New(r.Context(), addRoute...)
|
||||||
|
r = r.WithContext(ctx)
|
||||||
|
handler.ServeHTTP(w, r)
|
||||||
|
return addRoute
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// taggedHandlerFunc is a http.Handler that returns tags describing the
|
||||||
|
// processing of the request. These tags will be recorded along with the
|
||||||
|
// measures in this package at the end of the request.
|
||||||
|
type taggedHandlerFunc func(w http.ResponseWriter, r *http.Request) []tag.Mutator
|
||||||
|
|
||||||
|
func (h taggedHandlerFunc) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||||
|
tags := h(w, r)
|
||||||
|
if a, ok := r.Context().Value(addedTagsKey{}).(*addedTags); ok {
|
||||||
|
a.t = append(a.t, tags...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type addedTagsKey struct{}
|
||||||
|
|
||||||
|
type addedTags struct {
|
||||||
|
t []tag.Mutator
|
||||||
|
}
|
|
@ -0,0 +1,449 @@
|
||||||
|
// Copyright 2018, OpenCensus Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package ochttp
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"strconv"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"go.opencensus.io/stats"
|
||||||
|
"go.opencensus.io/tag"
|
||||||
|
"go.opencensus.io/trace"
|
||||||
|
"go.opencensus.io/trace/propagation"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Handler is an http.Handler wrapper to instrument your HTTP server with
|
||||||
|
// OpenCensus. It supports both stats and tracing.
|
||||||
|
//
|
||||||
|
// Tracing
|
||||||
|
//
|
||||||
|
// This handler is aware of the incoming request's span, reading it from request
|
||||||
|
// headers as configured using the Propagation field.
|
||||||
|
// The extracted span can be accessed from the incoming request's
|
||||||
|
// context.
|
||||||
|
//
|
||||||
|
// span := trace.FromContext(r.Context())
|
||||||
|
//
|
||||||
|
// The server span will be automatically ended at the end of ServeHTTP.
|
||||||
|
type Handler struct {
|
||||||
|
// Propagation defines how traces are propagated. If unspecified,
|
||||||
|
// B3 propagation will be used.
|
||||||
|
Propagation propagation.HTTPFormat
|
||||||
|
|
||||||
|
// Handler is the handler used to handle the incoming request.
|
||||||
|
Handler http.Handler
|
||||||
|
|
||||||
|
// StartOptions are applied to the span started by this Handler around each
|
||||||
|
// request.
|
||||||
|
//
|
||||||
|
// StartOptions.SpanKind will always be set to trace.SpanKindServer
|
||||||
|
// for spans started by this transport.
|
||||||
|
StartOptions trace.StartOptions
|
||||||
|
|
||||||
|
// GetStartOptions allows to set start options per request. If set,
|
||||||
|
// StartOptions is going to be ignored.
|
||||||
|
GetStartOptions func(*http.Request) trace.StartOptions
|
||||||
|
|
||||||
|
// IsPublicEndpoint should be set to true for publicly accessible HTTP(S)
|
||||||
|
// servers. If true, any trace metadata set on the incoming request will
|
||||||
|
// be added as a linked trace instead of being added as a parent of the
|
||||||
|
// current trace.
|
||||||
|
IsPublicEndpoint bool
|
||||||
|
|
||||||
|
// FormatSpanName holds the function to use for generating the span name
|
||||||
|
// from the information found in the incoming HTTP Request. By default the
|
||||||
|
// name equals the URL Path.
|
||||||
|
FormatSpanName func(*http.Request) string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||||
|
var tags addedTags
|
||||||
|
r, traceEnd := h.startTrace(w, r)
|
||||||
|
defer traceEnd()
|
||||||
|
w, statsEnd := h.startStats(w, r)
|
||||||
|
defer statsEnd(&tags)
|
||||||
|
handler := h.Handler
|
||||||
|
if handler == nil {
|
||||||
|
handler = http.DefaultServeMux
|
||||||
|
}
|
||||||
|
r = r.WithContext(context.WithValue(r.Context(), addedTagsKey{}, &tags))
|
||||||
|
handler.ServeHTTP(w, r)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Handler) startTrace(w http.ResponseWriter, r *http.Request) (*http.Request, func()) {
|
||||||
|
if isHealthEndpoint(r.URL.Path) {
|
||||||
|
return r, func() {}
|
||||||
|
}
|
||||||
|
var name string
|
||||||
|
if h.FormatSpanName == nil {
|
||||||
|
name = spanNameFromURL(r)
|
||||||
|
} else {
|
||||||
|
name = h.FormatSpanName(r)
|
||||||
|
}
|
||||||
|
ctx := r.Context()
|
||||||
|
|
||||||
|
startOpts := h.StartOptions
|
||||||
|
if h.GetStartOptions != nil {
|
||||||
|
startOpts = h.GetStartOptions(r)
|
||||||
|
}
|
||||||
|
|
||||||
|
var span *trace.Span
|
||||||
|
sc, ok := h.extractSpanContext(r)
|
||||||
|
if ok && !h.IsPublicEndpoint {
|
||||||
|
ctx, span = trace.StartSpanWithRemoteParent(ctx, name, sc,
|
||||||
|
trace.WithSampler(startOpts.Sampler),
|
||||||
|
trace.WithSpanKind(trace.SpanKindServer))
|
||||||
|
} else {
|
||||||
|
ctx, span = trace.StartSpan(ctx, name,
|
||||||
|
trace.WithSampler(startOpts.Sampler),
|
||||||
|
trace.WithSpanKind(trace.SpanKindServer),
|
||||||
|
)
|
||||||
|
if ok {
|
||||||
|
span.AddLink(trace.Link{
|
||||||
|
TraceID: sc.TraceID,
|
||||||
|
SpanID: sc.SpanID,
|
||||||
|
Type: trace.LinkTypeParent,
|
||||||
|
Attributes: nil,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
span.AddAttributes(requestAttrs(r)...)
|
||||||
|
if r.Body == nil {
|
||||||
|
// TODO: Handle cases where ContentLength is not set.
|
||||||
|
} else if r.ContentLength > 0 {
|
||||||
|
span.AddMessageReceiveEvent(0, /* TODO: messageID */
|
||||||
|
int64(r.ContentLength), -1)
|
||||||
|
}
|
||||||
|
return r.WithContext(ctx), span.End
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Handler) extractSpanContext(r *http.Request) (trace.SpanContext, bool) {
|
||||||
|
if h.Propagation == nil {
|
||||||
|
return defaultFormat.SpanContextFromRequest(r)
|
||||||
|
}
|
||||||
|
return h.Propagation.SpanContextFromRequest(r)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Handler) startStats(w http.ResponseWriter, r *http.Request) (http.ResponseWriter, func(tags *addedTags)) {
|
||||||
|
ctx, _ := tag.New(r.Context(),
|
||||||
|
tag.Upsert(Host, r.Host),
|
||||||
|
tag.Upsert(Path, r.URL.Path),
|
||||||
|
tag.Upsert(Method, r.Method))
|
||||||
|
track := &trackingResponseWriter{
|
||||||
|
start: time.Now(),
|
||||||
|
ctx: ctx,
|
||||||
|
writer: w,
|
||||||
|
}
|
||||||
|
if r.Body == nil {
|
||||||
|
// TODO: Handle cases where ContentLength is not set.
|
||||||
|
track.reqSize = -1
|
||||||
|
} else if r.ContentLength > 0 {
|
||||||
|
track.reqSize = r.ContentLength
|
||||||
|
}
|
||||||
|
stats.Record(ctx, ServerRequestCount.M(1))
|
||||||
|
return track.wrappedResponseWriter(), track.end
|
||||||
|
}
|
||||||
|
|
||||||
|
type trackingResponseWriter struct {
|
||||||
|
ctx context.Context
|
||||||
|
reqSize int64
|
||||||
|
respSize int64
|
||||||
|
start time.Time
|
||||||
|
statusCode int
|
||||||
|
statusLine string
|
||||||
|
endOnce sync.Once
|
||||||
|
writer http.ResponseWriter
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compile time assertion for ResponseWriter interface
|
||||||
|
var _ http.ResponseWriter = (*trackingResponseWriter)(nil)
|
||||||
|
|
||||||
|
var logTagsErrorOnce sync.Once
|
||||||
|
|
||||||
|
func (t *trackingResponseWriter) end(tags *addedTags) {
|
||||||
|
t.endOnce.Do(func() {
|
||||||
|
if t.statusCode == 0 {
|
||||||
|
t.statusCode = 200
|
||||||
|
}
|
||||||
|
|
||||||
|
span := trace.FromContext(t.ctx)
|
||||||
|
span.SetStatus(TraceStatus(t.statusCode, t.statusLine))
|
||||||
|
span.AddAttributes(trace.Int64Attribute(StatusCodeAttribute, int64(t.statusCode)))
|
||||||
|
|
||||||
|
m := []stats.Measurement{
|
||||||
|
ServerLatency.M(float64(time.Since(t.start)) / float64(time.Millisecond)),
|
||||||
|
ServerResponseBytes.M(t.respSize),
|
||||||
|
}
|
||||||
|
if t.reqSize >= 0 {
|
||||||
|
m = append(m, ServerRequestBytes.M(t.reqSize))
|
||||||
|
}
|
||||||
|
allTags := make([]tag.Mutator, len(tags.t)+1)
|
||||||
|
allTags[0] = tag.Upsert(StatusCode, strconv.Itoa(t.statusCode))
|
||||||
|
copy(allTags[1:], tags.t)
|
||||||
|
stats.RecordWithTags(t.ctx, allTags, m...)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *trackingResponseWriter) Header() http.Header {
|
||||||
|
return t.writer.Header()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *trackingResponseWriter) Write(data []byte) (int, error) {
|
||||||
|
n, err := t.writer.Write(data)
|
||||||
|
t.respSize += int64(n)
|
||||||
|
// Add message event for request bytes sent.
|
||||||
|
span := trace.FromContext(t.ctx)
|
||||||
|
span.AddMessageSendEvent(0 /* TODO: messageID */, int64(n), -1)
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *trackingResponseWriter) WriteHeader(statusCode int) {
|
||||||
|
t.writer.WriteHeader(statusCode)
|
||||||
|
t.statusCode = statusCode
|
||||||
|
t.statusLine = http.StatusText(t.statusCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
// wrappedResponseWriter returns a wrapped version of the original
|
||||||
|
// ResponseWriter and only implements the same combination of additional
|
||||||
|
// interfaces as the original.
|
||||||
|
// This implementation is based on https://github.com/felixge/httpsnoop.
|
||||||
|
func (t *trackingResponseWriter) wrappedResponseWriter() http.ResponseWriter {
|
||||||
|
var (
|
||||||
|
hj, i0 = t.writer.(http.Hijacker)
|
||||||
|
cn, i1 = t.writer.(http.CloseNotifier)
|
||||||
|
pu, i2 = t.writer.(http.Pusher)
|
||||||
|
fl, i3 = t.writer.(http.Flusher)
|
||||||
|
rf, i4 = t.writer.(io.ReaderFrom)
|
||||||
|
)
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case !i0 && !i1 && !i2 && !i3 && !i4:
|
||||||
|
return struct {
|
||||||
|
http.ResponseWriter
|
||||||
|
}{t}
|
||||||
|
case !i0 && !i1 && !i2 && !i3 && i4:
|
||||||
|
return struct {
|
||||||
|
http.ResponseWriter
|
||||||
|
io.ReaderFrom
|
||||||
|
}{t, rf}
|
||||||
|
case !i0 && !i1 && !i2 && i3 && !i4:
|
||||||
|
return struct {
|
||||||
|
http.ResponseWriter
|
||||||
|
http.Flusher
|
||||||
|
}{t, fl}
|
||||||
|
case !i0 && !i1 && !i2 && i3 && i4:
|
||||||
|
return struct {
|
||||||
|
http.ResponseWriter
|
||||||
|
http.Flusher
|
||||||
|
io.ReaderFrom
|
||||||
|
}{t, fl, rf}
|
||||||
|
case !i0 && !i1 && i2 && !i3 && !i4:
|
||||||
|
return struct {
|
||||||
|
http.ResponseWriter
|
||||||
|
http.Pusher
|
||||||
|
}{t, pu}
|
||||||
|
case !i0 && !i1 && i2 && !i3 && i4:
|
||||||
|
return struct {
|
||||||
|
http.ResponseWriter
|
||||||
|
http.Pusher
|
||||||
|
io.ReaderFrom
|
||||||
|
}{t, pu, rf}
|
||||||
|
case !i0 && !i1 && i2 && i3 && !i4:
|
||||||
|
return struct {
|
||||||
|
http.ResponseWriter
|
||||||
|
http.Pusher
|
||||||
|
http.Flusher
|
||||||
|
}{t, pu, fl}
|
||||||
|
case !i0 && !i1 && i2 && i3 && i4:
|
||||||
|
return struct {
|
||||||
|
http.ResponseWriter
|
||||||
|
http.Pusher
|
||||||
|
http.Flusher
|
||||||
|
io.ReaderFrom
|
||||||
|
}{t, pu, fl, rf}
|
||||||
|
case !i0 && i1 && !i2 && !i3 && !i4:
|
||||||
|
return struct {
|
||||||
|
http.ResponseWriter
|
||||||
|
http.CloseNotifier
|
||||||
|
}{t, cn}
|
||||||
|
case !i0 && i1 && !i2 && !i3 && i4:
|
||||||
|
return struct {
|
||||||
|
http.ResponseWriter
|
||||||
|
http.CloseNotifier
|
||||||
|
io.ReaderFrom
|
||||||
|
}{t, cn, rf}
|
||||||
|
case !i0 && i1 && !i2 && i3 && !i4:
|
||||||
|
return struct {
|
||||||
|
http.ResponseWriter
|
||||||
|
http.CloseNotifier
|
||||||
|
http.Flusher
|
||||||
|
}{t, cn, fl}
|
||||||
|
case !i0 && i1 && !i2 && i3 && i4:
|
||||||
|
return struct {
|
||||||
|
http.ResponseWriter
|
||||||
|
http.CloseNotifier
|
||||||
|
http.Flusher
|
||||||
|
io.ReaderFrom
|
||||||
|
}{t, cn, fl, rf}
|
||||||
|
case !i0 && i1 && i2 && !i3 && !i4:
|
||||||
|
return struct {
|
||||||
|
http.ResponseWriter
|
||||||
|
http.CloseNotifier
|
||||||
|
http.Pusher
|
||||||
|
}{t, cn, pu}
|
||||||
|
case !i0 && i1 && i2 && !i3 && i4:
|
||||||
|
return struct {
|
||||||
|
http.ResponseWriter
|
||||||
|
http.CloseNotifier
|
||||||
|
http.Pusher
|
||||||
|
io.ReaderFrom
|
||||||
|
}{t, cn, pu, rf}
|
||||||
|
case !i0 && i1 && i2 && i3 && !i4:
|
||||||
|
return struct {
|
||||||
|
http.ResponseWriter
|
||||||
|
http.CloseNotifier
|
||||||
|
http.Pusher
|
||||||
|
http.Flusher
|
||||||
|
}{t, cn, pu, fl}
|
||||||
|
case !i0 && i1 && i2 && i3 && i4:
|
||||||
|
return struct {
|
||||||
|
http.ResponseWriter
|
||||||
|
http.CloseNotifier
|
||||||
|
http.Pusher
|
||||||
|
http.Flusher
|
||||||
|
io.ReaderFrom
|
||||||
|
}{t, cn, pu, fl, rf}
|
||||||
|
case i0 && !i1 && !i2 && !i3 && !i4:
|
||||||
|
return struct {
|
||||||
|
http.ResponseWriter
|
||||||
|
http.Hijacker
|
||||||
|
}{t, hj}
|
||||||
|
case i0 && !i1 && !i2 && !i3 && i4:
|
||||||
|
return struct {
|
||||||
|
http.ResponseWriter
|
||||||
|
http.Hijacker
|
||||||
|
io.ReaderFrom
|
||||||
|
}{t, hj, rf}
|
||||||
|
case i0 && !i1 && !i2 && i3 && !i4:
|
||||||
|
return struct {
|
||||||
|
http.ResponseWriter
|
||||||
|
http.Hijacker
|
||||||
|
http.Flusher
|
||||||
|
}{t, hj, fl}
|
||||||
|
case i0 && !i1 && !i2 && i3 && i4:
|
||||||
|
return struct {
|
||||||
|
http.ResponseWriter
|
||||||
|
http.Hijacker
|
||||||
|
http.Flusher
|
||||||
|
io.ReaderFrom
|
||||||
|
}{t, hj, fl, rf}
|
||||||
|
case i0 && !i1 && i2 && !i3 && !i4:
|
||||||
|
return struct {
|
||||||
|
http.ResponseWriter
|
||||||
|
http.Hijacker
|
||||||
|
http.Pusher
|
||||||
|
}{t, hj, pu}
|
||||||
|
case i0 && !i1 && i2 && !i3 && i4:
|
||||||
|
return struct {
|
||||||
|
http.ResponseWriter
|
||||||
|
http.Hijacker
|
||||||
|
http.Pusher
|
||||||
|
io.ReaderFrom
|
||||||
|
}{t, hj, pu, rf}
|
||||||
|
case i0 && !i1 && i2 && i3 && !i4:
|
||||||
|
return struct {
|
||||||
|
http.ResponseWriter
|
||||||
|
http.Hijacker
|
||||||
|
http.Pusher
|
||||||
|
http.Flusher
|
||||||
|
}{t, hj, pu, fl}
|
||||||
|
case i0 && !i1 && i2 && i3 && i4:
|
||||||
|
return struct {
|
||||||
|
http.ResponseWriter
|
||||||
|
http.Hijacker
|
||||||
|
http.Pusher
|
||||||
|
http.Flusher
|
||||||
|
io.ReaderFrom
|
||||||
|
}{t, hj, pu, fl, rf}
|
||||||
|
case i0 && i1 && !i2 && !i3 && !i4:
|
||||||
|
return struct {
|
||||||
|
http.ResponseWriter
|
||||||
|
http.Hijacker
|
||||||
|
http.CloseNotifier
|
||||||
|
}{t, hj, cn}
|
||||||
|
case i0 && i1 && !i2 && !i3 && i4:
|
||||||
|
return struct {
|
||||||
|
http.ResponseWriter
|
||||||
|
http.Hijacker
|
||||||
|
http.CloseNotifier
|
||||||
|
io.ReaderFrom
|
||||||
|
}{t, hj, cn, rf}
|
||||||
|
case i0 && i1 && !i2 && i3 && !i4:
|
||||||
|
return struct {
|
||||||
|
http.ResponseWriter
|
||||||
|
http.Hijacker
|
||||||
|
http.CloseNotifier
|
||||||
|
http.Flusher
|
||||||
|
}{t, hj, cn, fl}
|
||||||
|
case i0 && i1 && !i2 && i3 && i4:
|
||||||
|
return struct {
|
||||||
|
http.ResponseWriter
|
||||||
|
http.Hijacker
|
||||||
|
http.CloseNotifier
|
||||||
|
http.Flusher
|
||||||
|
io.ReaderFrom
|
||||||
|
}{t, hj, cn, fl, rf}
|
||||||
|
case i0 && i1 && i2 && !i3 && !i4:
|
||||||
|
return struct {
|
||||||
|
http.ResponseWriter
|
||||||
|
http.Hijacker
|
||||||
|
http.CloseNotifier
|
||||||
|
http.Pusher
|
||||||
|
}{t, hj, cn, pu}
|
||||||
|
case i0 && i1 && i2 && !i3 && i4:
|
||||||
|
return struct {
|
||||||
|
http.ResponseWriter
|
||||||
|
http.Hijacker
|
||||||
|
http.CloseNotifier
|
||||||
|
http.Pusher
|
||||||
|
io.ReaderFrom
|
||||||
|
}{t, hj, cn, pu, rf}
|
||||||
|
case i0 && i1 && i2 && i3 && !i4:
|
||||||
|
return struct {
|
||||||
|
http.ResponseWriter
|
||||||
|
http.Hijacker
|
||||||
|
http.CloseNotifier
|
||||||
|
http.Pusher
|
||||||
|
http.Flusher
|
||||||
|
}{t, hj, cn, pu, fl}
|
||||||
|
case i0 && i1 && i2 && i3 && i4:
|
||||||
|
return struct {
|
||||||
|
http.ResponseWriter
|
||||||
|
http.Hijacker
|
||||||
|
http.CloseNotifier
|
||||||
|
http.Pusher
|
||||||
|
http.Flusher
|
||||||
|
io.ReaderFrom
|
||||||
|
}{t, hj, cn, pu, fl, rf}
|
||||||
|
default:
|
||||||
|
return struct {
|
||||||
|
http.ResponseWriter
|
||||||
|
}{t}
|
||||||
|
}
|
||||||
|
}
|
169
vendor/go.opencensus.io/plugin/ochttp/span_annotating_client_trace.go
generated
vendored
Normal file
169
vendor/go.opencensus.io/plugin/ochttp/span_annotating_client_trace.go
generated
vendored
Normal file
|
@ -0,0 +1,169 @@
|
||||||
|
// Copyright 2018, OpenCensus Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package ochttp
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/tls"
|
||||||
|
"net/http"
|
||||||
|
"net/http/httptrace"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"go.opencensus.io/trace"
|
||||||
|
)
|
||||||
|
|
||||||
|
type spanAnnotator struct {
|
||||||
|
sp *trace.Span
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: Remove NewSpanAnnotator at the next release.
|
||||||
|
|
||||||
|
// NewSpanAnnotator returns a httptrace.ClientTrace which annotates
|
||||||
|
// all emitted httptrace events on the provided Span.
|
||||||
|
// Deprecated: Use NewSpanAnnotatingClientTrace instead
|
||||||
|
func NewSpanAnnotator(r *http.Request, s *trace.Span) *httptrace.ClientTrace {
|
||||||
|
return NewSpanAnnotatingClientTrace(r, s)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSpanAnnotatingClientTrace returns a httptrace.ClientTrace which annotates
|
||||||
|
// all emitted httptrace events on the provided Span.
|
||||||
|
func NewSpanAnnotatingClientTrace(_ *http.Request, s *trace.Span) *httptrace.ClientTrace {
|
||||||
|
sa := spanAnnotator{sp: s}
|
||||||
|
|
||||||
|
return &httptrace.ClientTrace{
|
||||||
|
GetConn: sa.getConn,
|
||||||
|
GotConn: sa.gotConn,
|
||||||
|
PutIdleConn: sa.putIdleConn,
|
||||||
|
GotFirstResponseByte: sa.gotFirstResponseByte,
|
||||||
|
Got100Continue: sa.got100Continue,
|
||||||
|
DNSStart: sa.dnsStart,
|
||||||
|
DNSDone: sa.dnsDone,
|
||||||
|
ConnectStart: sa.connectStart,
|
||||||
|
ConnectDone: sa.connectDone,
|
||||||
|
TLSHandshakeStart: sa.tlsHandshakeStart,
|
||||||
|
TLSHandshakeDone: sa.tlsHandshakeDone,
|
||||||
|
WroteHeaders: sa.wroteHeaders,
|
||||||
|
Wait100Continue: sa.wait100Continue,
|
||||||
|
WroteRequest: sa.wroteRequest,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s spanAnnotator) getConn(hostPort string) {
|
||||||
|
attrs := []trace.Attribute{
|
||||||
|
trace.StringAttribute("httptrace.get_connection.host_port", hostPort),
|
||||||
|
}
|
||||||
|
s.sp.Annotate(attrs, "GetConn")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s spanAnnotator) gotConn(info httptrace.GotConnInfo) {
|
||||||
|
attrs := []trace.Attribute{
|
||||||
|
trace.BoolAttribute("httptrace.got_connection.reused", info.Reused),
|
||||||
|
trace.BoolAttribute("httptrace.got_connection.was_idle", info.WasIdle),
|
||||||
|
}
|
||||||
|
if info.WasIdle {
|
||||||
|
attrs = append(attrs,
|
||||||
|
trace.StringAttribute("httptrace.got_connection.idle_time", info.IdleTime.String()))
|
||||||
|
}
|
||||||
|
s.sp.Annotate(attrs, "GotConn")
|
||||||
|
}
|
||||||
|
|
||||||
|
// PutIdleConn implements a httptrace.ClientTrace hook
|
||||||
|
func (s spanAnnotator) putIdleConn(err error) {
|
||||||
|
var attrs []trace.Attribute
|
||||||
|
if err != nil {
|
||||||
|
attrs = append(attrs,
|
||||||
|
trace.StringAttribute("httptrace.put_idle_connection.error", err.Error()))
|
||||||
|
}
|
||||||
|
s.sp.Annotate(attrs, "PutIdleConn")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s spanAnnotator) gotFirstResponseByte() {
|
||||||
|
s.sp.Annotate(nil, "GotFirstResponseByte")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s spanAnnotator) got100Continue() {
|
||||||
|
s.sp.Annotate(nil, "Got100Continue")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s spanAnnotator) dnsStart(info httptrace.DNSStartInfo) {
|
||||||
|
attrs := []trace.Attribute{
|
||||||
|
trace.StringAttribute("httptrace.dns_start.host", info.Host),
|
||||||
|
}
|
||||||
|
s.sp.Annotate(attrs, "DNSStart")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s spanAnnotator) dnsDone(info httptrace.DNSDoneInfo) {
|
||||||
|
var addrs []string
|
||||||
|
for _, addr := range info.Addrs {
|
||||||
|
addrs = append(addrs, addr.String())
|
||||||
|
}
|
||||||
|
attrs := []trace.Attribute{
|
||||||
|
trace.StringAttribute("httptrace.dns_done.addrs", strings.Join(addrs, " , ")),
|
||||||
|
}
|
||||||
|
if info.Err != nil {
|
||||||
|
attrs = append(attrs,
|
||||||
|
trace.StringAttribute("httptrace.dns_done.error", info.Err.Error()))
|
||||||
|
}
|
||||||
|
s.sp.Annotate(attrs, "DNSDone")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s spanAnnotator) connectStart(network, addr string) {
|
||||||
|
attrs := []trace.Attribute{
|
||||||
|
trace.StringAttribute("httptrace.connect_start.network", network),
|
||||||
|
trace.StringAttribute("httptrace.connect_start.addr", addr),
|
||||||
|
}
|
||||||
|
s.sp.Annotate(attrs, "ConnectStart")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s spanAnnotator) connectDone(network, addr string, err error) {
|
||||||
|
attrs := []trace.Attribute{
|
||||||
|
trace.StringAttribute("httptrace.connect_done.network", network),
|
||||||
|
trace.StringAttribute("httptrace.connect_done.addr", addr),
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
attrs = append(attrs,
|
||||||
|
trace.StringAttribute("httptrace.connect_done.error", err.Error()))
|
||||||
|
}
|
||||||
|
s.sp.Annotate(attrs, "ConnectDone")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s spanAnnotator) tlsHandshakeStart() {
|
||||||
|
s.sp.Annotate(nil, "TLSHandshakeStart")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s spanAnnotator) tlsHandshakeDone(_ tls.ConnectionState, err error) {
|
||||||
|
var attrs []trace.Attribute
|
||||||
|
if err != nil {
|
||||||
|
attrs = append(attrs,
|
||||||
|
trace.StringAttribute("httptrace.tls_handshake_done.error", err.Error()))
|
||||||
|
}
|
||||||
|
s.sp.Annotate(attrs, "TLSHandshakeDone")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s spanAnnotator) wroteHeaders() {
|
||||||
|
s.sp.Annotate(nil, "WroteHeaders")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s spanAnnotator) wait100Continue() {
|
||||||
|
s.sp.Annotate(nil, "Wait100Continue")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s spanAnnotator) wroteRequest(info httptrace.WroteRequestInfo) {
|
||||||
|
var attrs []trace.Attribute
|
||||||
|
if info.Err != nil {
|
||||||
|
attrs = append(attrs,
|
||||||
|
trace.StringAttribute("httptrace.wrote_request.error", info.Err.Error()))
|
||||||
|
}
|
||||||
|
s.sp.Annotate(attrs, "WroteRequest")
|
||||||
|
}
|
|
@ -0,0 +1,292 @@
|
||||||
|
// Copyright 2018, OpenCensus Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package ochttp
|
||||||
|
|
||||||
|
import (
|
||||||
|
"go.opencensus.io/stats"
|
||||||
|
"go.opencensus.io/stats/view"
|
||||||
|
"go.opencensus.io/tag"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Deprecated: client HTTP measures.
|
||||||
|
var (
|
||||||
|
// Deprecated: Use a Count aggregation over one of the other client measures to achieve the same effect.
|
||||||
|
ClientRequestCount = stats.Int64(
|
||||||
|
"opencensus.io/http/client/request_count",
|
||||||
|
"Number of HTTP requests started",
|
||||||
|
stats.UnitDimensionless)
|
||||||
|
// Deprecated: Use ClientSentBytes.
|
||||||
|
ClientRequestBytes = stats.Int64(
|
||||||
|
"opencensus.io/http/client/request_bytes",
|
||||||
|
"HTTP request body size if set as ContentLength (uncompressed)",
|
||||||
|
stats.UnitBytes)
|
||||||
|
// Deprecated: Use ClientReceivedBytes.
|
||||||
|
ClientResponseBytes = stats.Int64(
|
||||||
|
"opencensus.io/http/client/response_bytes",
|
||||||
|
"HTTP response body size (uncompressed)",
|
||||||
|
stats.UnitBytes)
|
||||||
|
// Deprecated: Use ClientRoundtripLatency.
|
||||||
|
ClientLatency = stats.Float64(
|
||||||
|
"opencensus.io/http/client/latency",
|
||||||
|
"End-to-end latency",
|
||||||
|
stats.UnitMilliseconds)
|
||||||
|
)
|
||||||
|
|
||||||
|
// The following client HTTP measures are supported for use in custom views.
|
||||||
|
var (
|
||||||
|
ClientSentBytes = stats.Int64(
|
||||||
|
"opencensus.io/http/client/sent_bytes",
|
||||||
|
"Total bytes sent in request body (not including headers)",
|
||||||
|
stats.UnitBytes,
|
||||||
|
)
|
||||||
|
ClientReceivedBytes = stats.Int64(
|
||||||
|
"opencensus.io/http/client/received_bytes",
|
||||||
|
"Total bytes received in response bodies (not including headers but including error responses with bodies)",
|
||||||
|
stats.UnitBytes,
|
||||||
|
)
|
||||||
|
ClientRoundtripLatency = stats.Float64(
|
||||||
|
"opencensus.io/http/client/roundtrip_latency",
|
||||||
|
"Time between first byte of request headers sent to last byte of response received, or terminal error",
|
||||||
|
stats.UnitMilliseconds,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
// The following server HTTP measures are supported for use in custom views:
|
||||||
|
var (
|
||||||
|
ServerRequestCount = stats.Int64(
|
||||||
|
"opencensus.io/http/server/request_count",
|
||||||
|
"Number of HTTP requests started",
|
||||||
|
stats.UnitDimensionless)
|
||||||
|
ServerRequestBytes = stats.Int64(
|
||||||
|
"opencensus.io/http/server/request_bytes",
|
||||||
|
"HTTP request body size if set as ContentLength (uncompressed)",
|
||||||
|
stats.UnitBytes)
|
||||||
|
ServerResponseBytes = stats.Int64(
|
||||||
|
"opencensus.io/http/server/response_bytes",
|
||||||
|
"HTTP response body size (uncompressed)",
|
||||||
|
stats.UnitBytes)
|
||||||
|
ServerLatency = stats.Float64(
|
||||||
|
"opencensus.io/http/server/latency",
|
||||||
|
"End-to-end latency",
|
||||||
|
stats.UnitMilliseconds)
|
||||||
|
)
|
||||||
|
|
||||||
|
// The following tags are applied to stats recorded by this package. Host, Path
|
||||||
|
// and Method are applied to all measures. StatusCode is not applied to
|
||||||
|
// ClientRequestCount or ServerRequestCount, since it is recorded before the status is known.
|
||||||
|
var (
|
||||||
|
// Host is the value of the HTTP Host header.
|
||||||
|
//
|
||||||
|
// The value of this tag can be controlled by the HTTP client, so you need
|
||||||
|
// to watch out for potentially generating high-cardinality labels in your
|
||||||
|
// metrics backend if you use this tag in views.
|
||||||
|
Host = tag.MustNewKey("http.host")
|
||||||
|
|
||||||
|
// StatusCode is the numeric HTTP response status code,
|
||||||
|
// or "error" if a transport error occurred and no status code was read.
|
||||||
|
StatusCode = tag.MustNewKey("http.status")
|
||||||
|
|
||||||
|
// Path is the URL path (not including query string) in the request.
|
||||||
|
//
|
||||||
|
// The value of this tag can be controlled by the HTTP client, so you need
|
||||||
|
// to watch out for potentially generating high-cardinality labels in your
|
||||||
|
// metrics backend if you use this tag in views.
|
||||||
|
Path = tag.MustNewKey("http.path")
|
||||||
|
|
||||||
|
// Method is the HTTP method of the request, capitalized (GET, POST, etc.).
|
||||||
|
Method = tag.MustNewKey("http.method")
|
||||||
|
|
||||||
|
// KeyServerRoute is a low cardinality string representing the logical
|
||||||
|
// handler of the request. This is usually the pattern registered on the a
|
||||||
|
// ServeMux (or similar string).
|
||||||
|
KeyServerRoute = tag.MustNewKey("http_server_route")
|
||||||
|
)
|
||||||
|
|
||||||
|
// Client tag keys.
|
||||||
|
var (
|
||||||
|
// KeyClientMethod is the HTTP method, capitalized (i.e. GET, POST, PUT, DELETE, etc.).
|
||||||
|
KeyClientMethod = tag.MustNewKey("http_client_method")
|
||||||
|
// KeyClientPath is the URL path (not including query string).
|
||||||
|
KeyClientPath = tag.MustNewKey("http_client_path")
|
||||||
|
// KeyClientStatus is the HTTP status code as an integer (e.g. 200, 404, 500.), or "error" if no response status line was received.
|
||||||
|
KeyClientStatus = tag.MustNewKey("http_client_status")
|
||||||
|
// KeyClientHost is the value of the request Host header.
|
||||||
|
KeyClientHost = tag.MustNewKey("http_client_host")
|
||||||
|
)
|
||||||
|
|
||||||
|
// Default distributions used by views in this package.
|
||||||
|
var (
|
||||||
|
DefaultSizeDistribution = view.Distribution(1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296)
|
||||||
|
DefaultLatencyDistribution = view.Distribution(1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000)
|
||||||
|
)
|
||||||
|
|
||||||
|
// Package ochttp provides some convenience views for client measures.
|
||||||
|
// You still need to register these views for data to actually be collected.
|
||||||
|
var (
|
||||||
|
ClientSentBytesDistribution = &view.View{
|
||||||
|
Name: "opencensus.io/http/client/sent_bytes",
|
||||||
|
Measure: ClientSentBytes,
|
||||||
|
Aggregation: DefaultSizeDistribution,
|
||||||
|
Description: "Total bytes sent in request body (not including headers), by HTTP method and response status",
|
||||||
|
TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus},
|
||||||
|
}
|
||||||
|
|
||||||
|
ClientReceivedBytesDistribution = &view.View{
|
||||||
|
Name: "opencensus.io/http/client/received_bytes",
|
||||||
|
Measure: ClientReceivedBytes,
|
||||||
|
Aggregation: DefaultSizeDistribution,
|
||||||
|
Description: "Total bytes received in response bodies (not including headers but including error responses with bodies), by HTTP method and response status",
|
||||||
|
TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus},
|
||||||
|
}
|
||||||
|
|
||||||
|
ClientRoundtripLatencyDistribution = &view.View{
|
||||||
|
Name: "opencensus.io/http/client/roundtrip_latency",
|
||||||
|
Measure: ClientRoundtripLatency,
|
||||||
|
Aggregation: DefaultLatencyDistribution,
|
||||||
|
Description: "End-to-end latency, by HTTP method and response status",
|
||||||
|
TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus},
|
||||||
|
}
|
||||||
|
|
||||||
|
ClientCompletedCount = &view.View{
|
||||||
|
Name: "opencensus.io/http/client/completed_count",
|
||||||
|
Measure: ClientRoundtripLatency,
|
||||||
|
Aggregation: view.Count(),
|
||||||
|
Description: "Count of completed requests, by HTTP method and response status",
|
||||||
|
TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// Deprecated: Old client Views.
|
||||||
|
var (
|
||||||
|
// Deprecated: No direct replacement, but see ClientCompletedCount.
|
||||||
|
ClientRequestCountView = &view.View{
|
||||||
|
Name: "opencensus.io/http/client/request_count",
|
||||||
|
Description: "Count of HTTP requests started",
|
||||||
|
Measure: ClientRequestCount,
|
||||||
|
Aggregation: view.Count(),
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Use ClientSentBytesDistribution.
|
||||||
|
ClientRequestBytesView = &view.View{
|
||||||
|
Name: "opencensus.io/http/client/request_bytes",
|
||||||
|
Description: "Size distribution of HTTP request body",
|
||||||
|
Measure: ClientSentBytes,
|
||||||
|
Aggregation: DefaultSizeDistribution,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Use ClientReceivedBytesDistribution instead.
|
||||||
|
ClientResponseBytesView = &view.View{
|
||||||
|
Name: "opencensus.io/http/client/response_bytes",
|
||||||
|
Description: "Size distribution of HTTP response body",
|
||||||
|
Measure: ClientReceivedBytes,
|
||||||
|
Aggregation: DefaultSizeDistribution,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Use ClientRoundtripLatencyDistribution instead.
|
||||||
|
ClientLatencyView = &view.View{
|
||||||
|
Name: "opencensus.io/http/client/latency",
|
||||||
|
Description: "Latency distribution of HTTP requests",
|
||||||
|
Measure: ClientRoundtripLatency,
|
||||||
|
Aggregation: DefaultLatencyDistribution,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Use ClientCompletedCount instead.
|
||||||
|
ClientRequestCountByMethod = &view.View{
|
||||||
|
Name: "opencensus.io/http/client/request_count_by_method",
|
||||||
|
Description: "Client request count by HTTP method",
|
||||||
|
TagKeys: []tag.Key{Method},
|
||||||
|
Measure: ClientSentBytes,
|
||||||
|
Aggregation: view.Count(),
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Use ClientCompletedCount instead.
|
||||||
|
ClientResponseCountByStatusCode = &view.View{
|
||||||
|
Name: "opencensus.io/http/client/response_count_by_status_code",
|
||||||
|
Description: "Client response count by status code",
|
||||||
|
TagKeys: []tag.Key{StatusCode},
|
||||||
|
Measure: ClientRoundtripLatency,
|
||||||
|
Aggregation: view.Count(),
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// Package ochttp provides some convenience views for server measures.
|
||||||
|
// You still need to register these views for data to actually be collected.
|
||||||
|
var (
|
||||||
|
ServerRequestCountView = &view.View{
|
||||||
|
Name: "opencensus.io/http/server/request_count",
|
||||||
|
Description: "Count of HTTP requests started",
|
||||||
|
Measure: ServerRequestCount,
|
||||||
|
Aggregation: view.Count(),
|
||||||
|
}
|
||||||
|
|
||||||
|
ServerRequestBytesView = &view.View{
|
||||||
|
Name: "opencensus.io/http/server/request_bytes",
|
||||||
|
Description: "Size distribution of HTTP request body",
|
||||||
|
Measure: ServerRequestBytes,
|
||||||
|
Aggregation: DefaultSizeDistribution,
|
||||||
|
}
|
||||||
|
|
||||||
|
ServerResponseBytesView = &view.View{
|
||||||
|
Name: "opencensus.io/http/server/response_bytes",
|
||||||
|
Description: "Size distribution of HTTP response body",
|
||||||
|
Measure: ServerResponseBytes,
|
||||||
|
Aggregation: DefaultSizeDistribution,
|
||||||
|
}
|
||||||
|
|
||||||
|
ServerLatencyView = &view.View{
|
||||||
|
Name: "opencensus.io/http/server/latency",
|
||||||
|
Description: "Latency distribution of HTTP requests",
|
||||||
|
Measure: ServerLatency,
|
||||||
|
Aggregation: DefaultLatencyDistribution,
|
||||||
|
}
|
||||||
|
|
||||||
|
ServerRequestCountByMethod = &view.View{
|
||||||
|
Name: "opencensus.io/http/server/request_count_by_method",
|
||||||
|
Description: "Server request count by HTTP method",
|
||||||
|
TagKeys: []tag.Key{Method},
|
||||||
|
Measure: ServerRequestCount,
|
||||||
|
Aggregation: view.Count(),
|
||||||
|
}
|
||||||
|
|
||||||
|
ServerResponseCountByStatusCode = &view.View{
|
||||||
|
Name: "opencensus.io/http/server/response_count_by_status_code",
|
||||||
|
Description: "Server response count by status code",
|
||||||
|
TagKeys: []tag.Key{StatusCode},
|
||||||
|
Measure: ServerLatency,
|
||||||
|
Aggregation: view.Count(),
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// DefaultClientViews are the default client views provided by this package.
|
||||||
|
// Deprecated: No replacement. Register the views you would like individually.
|
||||||
|
var DefaultClientViews = []*view.View{
|
||||||
|
ClientRequestCountView,
|
||||||
|
ClientRequestBytesView,
|
||||||
|
ClientResponseBytesView,
|
||||||
|
ClientLatencyView,
|
||||||
|
ClientRequestCountByMethod,
|
||||||
|
ClientResponseCountByStatusCode,
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultServerViews are the default server views provided by this package.
|
||||||
|
// Deprecated: No replacement. Register the views you would like individually.
|
||||||
|
var DefaultServerViews = []*view.View{
|
||||||
|
ServerRequestCountView,
|
||||||
|
ServerRequestBytesView,
|
||||||
|
ServerResponseBytesView,
|
||||||
|
ServerLatencyView,
|
||||||
|
ServerRequestCountByMethod,
|
||||||
|
ServerResponseCountByStatusCode,
|
||||||
|
}
|
|
@ -0,0 +1,241 @@
|
||||||
|
// Copyright 2018, OpenCensus Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package ochttp
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"net/http/httptrace"
|
||||||
|
|
||||||
|
"go.opencensus.io/plugin/ochttp/propagation/b3"
|
||||||
|
"go.opencensus.io/trace"
|
||||||
|
"go.opencensus.io/trace/propagation"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TODO(jbd): Add godoc examples.
|
||||||
|
|
||||||
|
var defaultFormat propagation.HTTPFormat = &b3.HTTPFormat{}
|
||||||
|
|
||||||
|
// Attributes recorded on the span for the requests.
|
||||||
|
// Only trace exporters will need them.
|
||||||
|
const (
|
||||||
|
HostAttribute = "http.host"
|
||||||
|
MethodAttribute = "http.method"
|
||||||
|
PathAttribute = "http.path"
|
||||||
|
URLAttribute = "http.url"
|
||||||
|
UserAgentAttribute = "http.user_agent"
|
||||||
|
StatusCodeAttribute = "http.status_code"
|
||||||
|
)
|
||||||
|
|
||||||
|
type traceTransport struct {
|
||||||
|
base http.RoundTripper
|
||||||
|
startOptions trace.StartOptions
|
||||||
|
format propagation.HTTPFormat
|
||||||
|
formatSpanName func(*http.Request) string
|
||||||
|
newClientTrace func(*http.Request, *trace.Span) *httptrace.ClientTrace
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(jbd): Add message events for request and response size.
|
||||||
|
|
||||||
|
// RoundTrip creates a trace.Span and inserts it into the outgoing request's headers.
|
||||||
|
// The created span can follow a parent span, if a parent is presented in
|
||||||
|
// the request's context.
|
||||||
|
func (t *traceTransport) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||||
|
name := t.formatSpanName(req)
|
||||||
|
// TODO(jbd): Discuss whether we want to prefix
|
||||||
|
// outgoing requests with Sent.
|
||||||
|
ctx, span := trace.StartSpan(req.Context(), name,
|
||||||
|
trace.WithSampler(t.startOptions.Sampler),
|
||||||
|
trace.WithSpanKind(trace.SpanKindClient))
|
||||||
|
|
||||||
|
if t.newClientTrace != nil {
|
||||||
|
req = req.WithContext(httptrace.WithClientTrace(ctx, t.newClientTrace(req, span)))
|
||||||
|
} else {
|
||||||
|
req = req.WithContext(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
if t.format != nil {
|
||||||
|
// SpanContextToRequest will modify its Request argument, which is
|
||||||
|
// contrary to the contract for http.RoundTripper, so we need to
|
||||||
|
// pass it a copy of the Request.
|
||||||
|
// However, the Request struct itself was already copied by
|
||||||
|
// the WithContext calls above and so we just need to copy the header.
|
||||||
|
header := make(http.Header)
|
||||||
|
for k, v := range req.Header {
|
||||||
|
header[k] = v
|
||||||
|
}
|
||||||
|
req.Header = header
|
||||||
|
t.format.SpanContextToRequest(span.SpanContext(), req)
|
||||||
|
}
|
||||||
|
|
||||||
|
span.AddAttributes(requestAttrs(req)...)
|
||||||
|
resp, err := t.base.RoundTrip(req)
|
||||||
|
if err != nil {
|
||||||
|
span.SetStatus(trace.Status{Code: trace.StatusCodeUnknown, Message: err.Error()})
|
||||||
|
span.End()
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
span.AddAttributes(responseAttrs(resp)...)
|
||||||
|
span.SetStatus(TraceStatus(resp.StatusCode, resp.Status))
|
||||||
|
|
||||||
|
// span.End() will be invoked after
|
||||||
|
// a read from resp.Body returns io.EOF or when
|
||||||
|
// resp.Body.Close() is invoked.
|
||||||
|
bt := &bodyTracker{rc: resp.Body, span: span}
|
||||||
|
resp.Body = wrappedBody(bt, resp.Body)
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// bodyTracker wraps a response.Body and invokes
|
||||||
|
// trace.EndSpan on encountering io.EOF on reading
|
||||||
|
// the body of the original response.
|
||||||
|
type bodyTracker struct {
|
||||||
|
rc io.ReadCloser
|
||||||
|
span *trace.Span
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ io.ReadCloser = (*bodyTracker)(nil)
|
||||||
|
|
||||||
|
func (bt *bodyTracker) Read(b []byte) (int, error) {
|
||||||
|
n, err := bt.rc.Read(b)
|
||||||
|
|
||||||
|
switch err {
|
||||||
|
case nil:
|
||||||
|
return n, nil
|
||||||
|
case io.EOF:
|
||||||
|
bt.span.End()
|
||||||
|
default:
|
||||||
|
// For all other errors, set the span status
|
||||||
|
bt.span.SetStatus(trace.Status{
|
||||||
|
// Code 2 is the error code for Internal server error.
|
||||||
|
Code: 2,
|
||||||
|
Message: err.Error(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bt *bodyTracker) Close() error {
|
||||||
|
// Invoking endSpan on Close will help catch the cases
|
||||||
|
// in which a read returned a non-nil error, we set the
|
||||||
|
// span status but didn't end the span.
|
||||||
|
bt.span.End()
|
||||||
|
return bt.rc.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// CancelRequest cancels an in-flight request by closing its connection.
|
||||||
|
func (t *traceTransport) CancelRequest(req *http.Request) {
|
||||||
|
type canceler interface {
|
||||||
|
CancelRequest(*http.Request)
|
||||||
|
}
|
||||||
|
if cr, ok := t.base.(canceler); ok {
|
||||||
|
cr.CancelRequest(req)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func spanNameFromURL(req *http.Request) string {
|
||||||
|
return req.URL.Path
|
||||||
|
}
|
||||||
|
|
||||||
|
func requestAttrs(r *http.Request) []trace.Attribute {
|
||||||
|
userAgent := r.UserAgent()
|
||||||
|
|
||||||
|
attrs := make([]trace.Attribute, 0, 5)
|
||||||
|
attrs = append(attrs,
|
||||||
|
trace.StringAttribute(PathAttribute, r.URL.Path),
|
||||||
|
trace.StringAttribute(URLAttribute, r.URL.String()),
|
||||||
|
trace.StringAttribute(HostAttribute, r.Host),
|
||||||
|
trace.StringAttribute(MethodAttribute, r.Method),
|
||||||
|
)
|
||||||
|
|
||||||
|
if userAgent != "" {
|
||||||
|
attrs = append(attrs, trace.StringAttribute(UserAgentAttribute, userAgent))
|
||||||
|
}
|
||||||
|
|
||||||
|
return attrs
|
||||||
|
}
|
||||||
|
|
||||||
|
func responseAttrs(resp *http.Response) []trace.Attribute {
|
||||||
|
return []trace.Attribute{
|
||||||
|
trace.Int64Attribute(StatusCodeAttribute, int64(resp.StatusCode)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TraceStatus is a utility to convert the HTTP status code to a trace.Status that
|
||||||
|
// represents the outcome as closely as possible.
|
||||||
|
func TraceStatus(httpStatusCode int, statusLine string) trace.Status {
|
||||||
|
var code int32
|
||||||
|
if httpStatusCode < 200 || httpStatusCode >= 400 {
|
||||||
|
code = trace.StatusCodeUnknown
|
||||||
|
}
|
||||||
|
switch httpStatusCode {
|
||||||
|
case 499:
|
||||||
|
code = trace.StatusCodeCancelled
|
||||||
|
case http.StatusBadRequest:
|
||||||
|
code = trace.StatusCodeInvalidArgument
|
||||||
|
case http.StatusUnprocessableEntity:
|
||||||
|
code = trace.StatusCodeInvalidArgument
|
||||||
|
case http.StatusGatewayTimeout:
|
||||||
|
code = trace.StatusCodeDeadlineExceeded
|
||||||
|
case http.StatusNotFound:
|
||||||
|
code = trace.StatusCodeNotFound
|
||||||
|
case http.StatusForbidden:
|
||||||
|
code = trace.StatusCodePermissionDenied
|
||||||
|
case http.StatusUnauthorized: // 401 is actually unauthenticated.
|
||||||
|
code = trace.StatusCodeUnauthenticated
|
||||||
|
case http.StatusTooManyRequests:
|
||||||
|
code = trace.StatusCodeResourceExhausted
|
||||||
|
case http.StatusNotImplemented:
|
||||||
|
code = trace.StatusCodeUnimplemented
|
||||||
|
case http.StatusServiceUnavailable:
|
||||||
|
code = trace.StatusCodeUnavailable
|
||||||
|
case http.StatusOK:
|
||||||
|
code = trace.StatusCodeOK
|
||||||
|
}
|
||||||
|
return trace.Status{Code: code, Message: codeToStr[code]}
|
||||||
|
}
|
||||||
|
|
||||||
|
var codeToStr = map[int32]string{
|
||||||
|
trace.StatusCodeOK: `OK`,
|
||||||
|
trace.StatusCodeCancelled: `CANCELLED`,
|
||||||
|
trace.StatusCodeUnknown: `UNKNOWN`,
|
||||||
|
trace.StatusCodeInvalidArgument: `INVALID_ARGUMENT`,
|
||||||
|
trace.StatusCodeDeadlineExceeded: `DEADLINE_EXCEEDED`,
|
||||||
|
trace.StatusCodeNotFound: `NOT_FOUND`,
|
||||||
|
trace.StatusCodeAlreadyExists: `ALREADY_EXISTS`,
|
||||||
|
trace.StatusCodePermissionDenied: `PERMISSION_DENIED`,
|
||||||
|
trace.StatusCodeResourceExhausted: `RESOURCE_EXHAUSTED`,
|
||||||
|
trace.StatusCodeFailedPrecondition: `FAILED_PRECONDITION`,
|
||||||
|
trace.StatusCodeAborted: `ABORTED`,
|
||||||
|
trace.StatusCodeOutOfRange: `OUT_OF_RANGE`,
|
||||||
|
trace.StatusCodeUnimplemented: `UNIMPLEMENTED`,
|
||||||
|
trace.StatusCodeInternal: `INTERNAL`,
|
||||||
|
trace.StatusCodeUnavailable: `UNAVAILABLE`,
|
||||||
|
trace.StatusCodeDataLoss: `DATA_LOSS`,
|
||||||
|
trace.StatusCodeUnauthenticated: `UNAUTHENTICATED`,
|
||||||
|
}
|
||||||
|
|
||||||
|
func isHealthEndpoint(path string) bool {
|
||||||
|
// Health checking is pretty frequent and
|
||||||
|
// traces collected for health endpoints
|
||||||
|
// can be extremely noisy and expensive.
|
||||||
|
// Disable canonical health checking endpoints
|
||||||
|
// like /healthz and /_ah/health for now.
|
||||||
|
if path == "/healthz" || path == "/_ah/health" {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
|
@ -0,0 +1,44 @@
|
||||||
|
// Copyright 2019, OpenCensus Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package ochttp
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
// wrappedBody returns a wrapped version of the original
|
||||||
|
// Body and only implements the same combination of additional
|
||||||
|
// interfaces as the original.
|
||||||
|
func wrappedBody(wrapper io.ReadCloser, body io.ReadCloser) io.ReadCloser {
|
||||||
|
var (
|
||||||
|
wr, i0 = body.(io.Writer)
|
||||||
|
)
|
||||||
|
switch {
|
||||||
|
case !i0:
|
||||||
|
return struct {
|
||||||
|
io.ReadCloser
|
||||||
|
}{wrapper}
|
||||||
|
|
||||||
|
case i0:
|
||||||
|
return struct {
|
||||||
|
io.ReadCloser
|
||||||
|
io.Writer
|
||||||
|
}{wrapper, wr}
|
||||||
|
default:
|
||||||
|
return struct {
|
||||||
|
io.ReadCloser
|
||||||
|
}{wrapper}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,164 @@
|
||||||
|
// Copyright 2018, OpenCensus Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Package resource provides functionality for resource, which capture
|
||||||
|
// identifying information about the entities for which signals are exported.
|
||||||
|
package resource
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"regexp"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Environment variables used by FromEnv to decode a resource.
|
||||||
|
const (
|
||||||
|
EnvVarType = "OC_RESOURCE_TYPE"
|
||||||
|
EnvVarLabels = "OC_RESOURCE_LABELS"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Resource describes an entity about which identifying information and metadata is exposed.
|
||||||
|
// For example, a type "k8s.io/container" may hold labels describing the pod name and namespace.
|
||||||
|
type Resource struct {
|
||||||
|
Type string
|
||||||
|
Labels map[string]string
|
||||||
|
}
|
||||||
|
|
||||||
|
// EncodeLabels encodes a labels map to a string as provided via the OC_RESOURCE_LABELS environment variable.
|
||||||
|
func EncodeLabels(labels map[string]string) string {
|
||||||
|
sortedKeys := make([]string, 0, len(labels))
|
||||||
|
for k := range labels {
|
||||||
|
sortedKeys = append(sortedKeys, k)
|
||||||
|
}
|
||||||
|
sort.Strings(sortedKeys)
|
||||||
|
|
||||||
|
s := ""
|
||||||
|
for i, k := range sortedKeys {
|
||||||
|
if i > 0 {
|
||||||
|
s += ","
|
||||||
|
}
|
||||||
|
s += k + "=" + strconv.Quote(labels[k])
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
var labelRegex = regexp.MustCompile(`^\s*([[:ascii:]]{1,256}?)=("[[:ascii:]]{0,256}?")\s*,`)
|
||||||
|
|
||||||
|
// DecodeLabels decodes a serialized label map as used in the OC_RESOURCE_LABELS variable.
|
||||||
|
// A list of labels of the form `<key1>="<value1>",<key2>="<value2>",...` is accepted.
|
||||||
|
// Domain names and paths are accepted as label keys.
|
||||||
|
// Most users will want to use FromEnv instead.
|
||||||
|
func DecodeLabels(s string) (map[string]string, error) {
|
||||||
|
m := map[string]string{}
|
||||||
|
// Ensure a trailing comma, which allows us to keep the regex simpler
|
||||||
|
s = strings.TrimRight(strings.TrimSpace(s), ",") + ","
|
||||||
|
|
||||||
|
for len(s) > 0 {
|
||||||
|
match := labelRegex.FindStringSubmatch(s)
|
||||||
|
if len(match) == 0 {
|
||||||
|
return nil, fmt.Errorf("invalid label formatting, remainder: %s", s)
|
||||||
|
}
|
||||||
|
v := match[2]
|
||||||
|
if v == "" {
|
||||||
|
v = match[3]
|
||||||
|
} else {
|
||||||
|
var err error
|
||||||
|
if v, err = strconv.Unquote(v); err != nil {
|
||||||
|
return nil, fmt.Errorf("invalid label formatting, remainder: %s, err: %s", s, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
m[match[1]] = v
|
||||||
|
|
||||||
|
s = s[len(match[0]):]
|
||||||
|
}
|
||||||
|
return m, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FromEnv is a detector that loads resource information from the OC_RESOURCE_TYPE
|
||||||
|
// and OC_RESOURCE_labelS environment variables.
|
||||||
|
func FromEnv(context.Context) (*Resource, error) {
|
||||||
|
res := &Resource{
|
||||||
|
Type: strings.TrimSpace(os.Getenv(EnvVarType)),
|
||||||
|
}
|
||||||
|
labels := strings.TrimSpace(os.Getenv(EnvVarLabels))
|
||||||
|
if labels == "" {
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
|
var err error
|
||||||
|
if res.Labels, err = DecodeLabels(labels); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ Detector = FromEnv
|
||||||
|
|
||||||
|
// merge resource information from b into a. In case of a collision, a takes precedence.
|
||||||
|
func merge(a, b *Resource) *Resource {
|
||||||
|
if a == nil {
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
if b == nil {
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
res := &Resource{
|
||||||
|
Type: a.Type,
|
||||||
|
Labels: map[string]string{},
|
||||||
|
}
|
||||||
|
if res.Type == "" {
|
||||||
|
res.Type = b.Type
|
||||||
|
}
|
||||||
|
for k, v := range b.Labels {
|
||||||
|
res.Labels[k] = v
|
||||||
|
}
|
||||||
|
// Labels from resource a overwrite labels from resource b.
|
||||||
|
for k, v := range a.Labels {
|
||||||
|
res.Labels[k] = v
|
||||||
|
}
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
// Detector attempts to detect resource information.
|
||||||
|
// If the detector cannot find resource information, the returned resource is nil but no
|
||||||
|
// error is returned.
|
||||||
|
// An error is only returned on unexpected failures.
|
||||||
|
type Detector func(context.Context) (*Resource, error)
|
||||||
|
|
||||||
|
// MultiDetector returns a Detector that calls all input detectors in order and
|
||||||
|
// merges each result with the previous one. In case a type of label key is already set,
|
||||||
|
// the first set value is takes precedence.
|
||||||
|
// It returns on the first error that a sub-detector encounters.
|
||||||
|
func MultiDetector(detectors ...Detector) Detector {
|
||||||
|
return func(ctx context.Context) (*Resource, error) {
|
||||||
|
return detectAll(ctx, detectors...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// detectall calls all input detectors sequentially an merges each result with the previous one.
|
||||||
|
// It returns on the first error that a sub-detector encounters.
|
||||||
|
func detectAll(ctx context.Context, detectors ...Detector) (*Resource, error) {
|
||||||
|
var res *Resource
|
||||||
|
for _, d := range detectors {
|
||||||
|
r, err := d(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
res = merge(res, r)
|
||||||
|
}
|
||||||
|
return res, nil
|
||||||
|
}
|
|
@ -0,0 +1,69 @@
|
||||||
|
// Copyright 2017, OpenCensus Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
//
|
||||||
|
|
||||||
|
/*
|
||||||
|
Package stats contains support for OpenCensus stats recording.
|
||||||
|
|
||||||
|
OpenCensus allows users to create typed measures, record measurements,
|
||||||
|
aggregate the collected data, and export the aggregated data.
|
||||||
|
|
||||||
|
Measures
|
||||||
|
|
||||||
|
A measure represents a type of data point to be tracked and recorded.
|
||||||
|
For example, latency, request Mb/s, and response Mb/s are measures
|
||||||
|
to collect from a server.
|
||||||
|
|
||||||
|
Measure constructors such as Int64 and Float64 automatically
|
||||||
|
register the measure by the given name. Each registered measure needs
|
||||||
|
to be unique by name. Measures also have a description and a unit.
|
||||||
|
|
||||||
|
Libraries can define and export measures. Application authors can then
|
||||||
|
create views and collect and break down measures by the tags they are
|
||||||
|
interested in.
|
||||||
|
|
||||||
|
Recording measurements
|
||||||
|
|
||||||
|
Measurement is a data point to be collected for a measure. For example,
|
||||||
|
for a latency (ms) measure, 100 is a measurement that represents a 100ms
|
||||||
|
latency event. Measurements are created from measures with
|
||||||
|
the current context. Tags from the current context are recorded with the
|
||||||
|
measurements if they are any.
|
||||||
|
|
||||||
|
Recorded measurements are dropped immediately if no views are registered for them.
|
||||||
|
There is usually no need to conditionally enable and disable
|
||||||
|
recording to reduce cost. Recording of measurements is cheap.
|
||||||
|
|
||||||
|
Libraries can always record measurements, and applications can later decide
|
||||||
|
on which measurements they want to collect by registering views. This allows
|
||||||
|
libraries to turn on the instrumentation by default.
|
||||||
|
|
||||||
|
Exemplars
|
||||||
|
|
||||||
|
For a given recorded measurement, the associated exemplar is a diagnostic map
|
||||||
|
that gives more information about the measurement.
|
||||||
|
|
||||||
|
When aggregated using a Distribution aggregation, an exemplar is kept for each
|
||||||
|
bucket in the Distribution. This allows you to easily find an example of a
|
||||||
|
measurement that fell into each bucket.
|
||||||
|
|
||||||
|
For example, if you also use the OpenCensus trace package and you
|
||||||
|
record a measurement with a context that contains a sampled trace span,
|
||||||
|
then the trace span will be added to the exemplar associated with the measurement.
|
||||||
|
|
||||||
|
When exported to a supporting back end, you should be able to easily navigate
|
||||||
|
to example traces that fell into each bucket in the Distribution.
|
||||||
|
|
||||||
|
*/
|
||||||
|
package stats // import "go.opencensus.io/stats"
|
|
@ -0,0 +1,25 @@
|
||||||
|
// Copyright 2018, OpenCensus Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package internal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"go.opencensus.io/tag"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DefaultRecorder will be called for each Record call.
|
||||||
|
var DefaultRecorder func(tags *tag.Map, measurement interface{}, attachments map[string]interface{})
|
||||||
|
|
||||||
|
// SubscriptionReporter reports when a view subscribed with a measure.
|
||||||
|
var SubscriptionReporter func(measure string)
|
|
@ -0,0 +1,109 @@
|
||||||
|
// Copyright 2017, OpenCensus Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
//
|
||||||
|
|
||||||
|
package stats
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Measure represents a single numeric value to be tracked and recorded.
|
||||||
|
// For example, latency, request bytes, and response bytes could be measures
|
||||||
|
// to collect from a server.
|
||||||
|
//
|
||||||
|
// Measures by themselves have no outside effects. In order to be exported,
|
||||||
|
// the measure needs to be used in a View. If no Views are defined over a
|
||||||
|
// measure, there is very little cost in recording it.
|
||||||
|
type Measure interface {
|
||||||
|
// Name returns the name of this measure.
|
||||||
|
//
|
||||||
|
// Measure names are globally unique (among all libraries linked into your program).
|
||||||
|
// We recommend prefixing the measure name with a domain name relevant to your
|
||||||
|
// project or application.
|
||||||
|
//
|
||||||
|
// Measure names are never sent over the wire or exported to backends.
|
||||||
|
// They are only used to create Views.
|
||||||
|
Name() string
|
||||||
|
|
||||||
|
// Description returns the human-readable description of this measure.
|
||||||
|
Description() string
|
||||||
|
|
||||||
|
// Unit returns the units for the values this measure takes on.
|
||||||
|
//
|
||||||
|
// Units are encoded according to the case-sensitive abbreviations from the
|
||||||
|
// Unified Code for Units of Measure: http://unitsofmeasure.org/ucum.html
|
||||||
|
Unit() string
|
||||||
|
}
|
||||||
|
|
||||||
|
// measureDescriptor is the untyped descriptor associated with each measure.
|
||||||
|
// Int64Measure and Float64Measure wrap measureDescriptor to provide typed
|
||||||
|
// recording APIs.
|
||||||
|
// Two Measures with the same name will have the same measureDescriptor.
|
||||||
|
type measureDescriptor struct {
|
||||||
|
subs int32 // access atomically
|
||||||
|
|
||||||
|
name string
|
||||||
|
description string
|
||||||
|
unit string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *measureDescriptor) subscribe() {
|
||||||
|
atomic.StoreInt32(&m.subs, 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *measureDescriptor) subscribed() bool {
|
||||||
|
return atomic.LoadInt32(&m.subs) == 1
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
mu sync.RWMutex
|
||||||
|
measures = make(map[string]*measureDescriptor)
|
||||||
|
)
|
||||||
|
|
||||||
|
func registerMeasureHandle(name, desc, unit string) *measureDescriptor {
|
||||||
|
mu.Lock()
|
||||||
|
defer mu.Unlock()
|
||||||
|
|
||||||
|
if stored, ok := measures[name]; ok {
|
||||||
|
return stored
|
||||||
|
}
|
||||||
|
m := &measureDescriptor{
|
||||||
|
name: name,
|
||||||
|
description: desc,
|
||||||
|
unit: unit,
|
||||||
|
}
|
||||||
|
measures[name] = m
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
|
||||||
|
// Measurement is the numeric value measured when recording stats. Each measure
|
||||||
|
// provides methods to create measurements of their kind. For example, Int64Measure
|
||||||
|
// provides M to convert an int64 into a measurement.
|
||||||
|
type Measurement struct {
|
||||||
|
v float64
|
||||||
|
m Measure
|
||||||
|
desc *measureDescriptor
|
||||||
|
}
|
||||||
|
|
||||||
|
// Value returns the value of the Measurement as a float64.
|
||||||
|
func (m Measurement) Value() float64 {
|
||||||
|
return m.v
|
||||||
|
}
|
||||||
|
|
||||||
|
// Measure returns the Measure from which this Measurement was created.
|
||||||
|
func (m Measurement) Measure() Measure {
|
||||||
|
return m.m
|
||||||
|
}
|
|
@ -0,0 +1,55 @@
|
||||||
|
// Copyright 2017, OpenCensus Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
//
|
||||||
|
|
||||||
|
package stats
|
||||||
|
|
||||||
|
// Float64Measure is a measure for float64 values.
|
||||||
|
type Float64Measure struct {
|
||||||
|
desc *measureDescriptor
|
||||||
|
}
|
||||||
|
|
||||||
|
// M creates a new float64 measurement.
|
||||||
|
// Use Record to record measurements.
|
||||||
|
func (m *Float64Measure) M(v float64) Measurement {
|
||||||
|
return Measurement{
|
||||||
|
m: m,
|
||||||
|
desc: m.desc,
|
||||||
|
v: v,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float64 creates a new measure for float64 values.
|
||||||
|
//
|
||||||
|
// See the documentation for interface Measure for more guidance on the
|
||||||
|
// parameters of this function.
|
||||||
|
func Float64(name, description, unit string) *Float64Measure {
|
||||||
|
mi := registerMeasureHandle(name, description, unit)
|
||||||
|
return &Float64Measure{mi}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Name returns the name of the measure.
|
||||||
|
func (m *Float64Measure) Name() string {
|
||||||
|
return m.desc.name
|
||||||
|
}
|
||||||
|
|
||||||
|
// Description returns the description of the measure.
|
||||||
|
func (m *Float64Measure) Description() string {
|
||||||
|
return m.desc.description
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unit returns the unit of the measure.
|
||||||
|
func (m *Float64Measure) Unit() string {
|
||||||
|
return m.desc.unit
|
||||||
|
}
|
|
@ -0,0 +1,55 @@
|
||||||
|
// Copyright 2017, OpenCensus Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
//
|
||||||
|
|
||||||
|
package stats
|
||||||
|
|
||||||
|
// Int64Measure is a measure for int64 values.
|
||||||
|
type Int64Measure struct {
|
||||||
|
desc *measureDescriptor
|
||||||
|
}
|
||||||
|
|
||||||
|
// M creates a new int64 measurement.
|
||||||
|
// Use Record to record measurements.
|
||||||
|
func (m *Int64Measure) M(v int64) Measurement {
|
||||||
|
return Measurement{
|
||||||
|
m: m,
|
||||||
|
desc: m.desc,
|
||||||
|
v: float64(v),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int64 creates a new measure for int64 values.
|
||||||
|
//
|
||||||
|
// See the documentation for interface Measure for more guidance on the
|
||||||
|
// parameters of this function.
|
||||||
|
func Int64(name, description, unit string) *Int64Measure {
|
||||||
|
mi := registerMeasureHandle(name, description, unit)
|
||||||
|
return &Int64Measure{mi}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Name returns the name of the measure.
|
||||||
|
func (m *Int64Measure) Name() string {
|
||||||
|
return m.desc.name
|
||||||
|
}
|
||||||
|
|
||||||
|
// Description returns the description of the measure.
|
||||||
|
func (m *Int64Measure) Description() string {
|
||||||
|
return m.desc.description
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unit returns the unit of the measure.
|
||||||
|
func (m *Int64Measure) Unit() string {
|
||||||
|
return m.desc.unit
|
||||||
|
}
|
|
@ -0,0 +1,117 @@
|
||||||
|
// Copyright 2018, OpenCensus Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
//
|
||||||
|
|
||||||
|
package stats
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"go.opencensus.io/metric/metricdata"
|
||||||
|
"go.opencensus.io/stats/internal"
|
||||||
|
"go.opencensus.io/tag"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
internal.SubscriptionReporter = func(measure string) {
|
||||||
|
mu.Lock()
|
||||||
|
measures[measure].subscribe()
|
||||||
|
mu.Unlock()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type recordOptions struct {
|
||||||
|
attachments metricdata.Attachments
|
||||||
|
mutators []tag.Mutator
|
||||||
|
measurements []Measurement
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithAttachments applies provided exemplar attachments.
|
||||||
|
func WithAttachments(attachments metricdata.Attachments) Options {
|
||||||
|
return func(ro *recordOptions) {
|
||||||
|
ro.attachments = attachments
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithTags applies provided tag mutators.
|
||||||
|
func WithTags(mutators ...tag.Mutator) Options {
|
||||||
|
return func(ro *recordOptions) {
|
||||||
|
ro.mutators = mutators
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithMeasurements applies provided measurements.
|
||||||
|
func WithMeasurements(measurements ...Measurement) Options {
|
||||||
|
return func(ro *recordOptions) {
|
||||||
|
ro.measurements = measurements
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Options apply changes to recordOptions.
|
||||||
|
type Options func(*recordOptions)
|
||||||
|
|
||||||
|
func createRecordOption(ros ...Options) *recordOptions {
|
||||||
|
o := &recordOptions{}
|
||||||
|
for _, ro := range ros {
|
||||||
|
ro(o)
|
||||||
|
}
|
||||||
|
return o
|
||||||
|
}
|
||||||
|
|
||||||
|
// Record records one or multiple measurements with the same context at once.
|
||||||
|
// If there are any tags in the context, measurements will be tagged with them.
|
||||||
|
func Record(ctx context.Context, ms ...Measurement) {
|
||||||
|
RecordWithOptions(ctx, WithMeasurements(ms...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// RecordWithTags records one or multiple measurements at once.
|
||||||
|
//
|
||||||
|
// Measurements will be tagged with the tags in the context mutated by the mutators.
|
||||||
|
// RecordWithTags is useful if you want to record with tag mutations but don't want
|
||||||
|
// to propagate the mutations in the context.
|
||||||
|
func RecordWithTags(ctx context.Context, mutators []tag.Mutator, ms ...Measurement) error {
|
||||||
|
return RecordWithOptions(ctx, WithTags(mutators...), WithMeasurements(ms...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// RecordWithOptions records measurements from the given options (if any) against context
|
||||||
|
// and tags and attachments in the options (if any).
|
||||||
|
// If there are any tags in the context, measurements will be tagged with them.
|
||||||
|
func RecordWithOptions(ctx context.Context, ros ...Options) error {
|
||||||
|
o := createRecordOption(ros...)
|
||||||
|
if len(o.measurements) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
recorder := internal.DefaultRecorder
|
||||||
|
if recorder == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
record := false
|
||||||
|
for _, m := range o.measurements {
|
||||||
|
if m.desc.subscribed() {
|
||||||
|
record = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !record {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if len(o.mutators) > 0 {
|
||||||
|
var err error
|
||||||
|
if ctx, err = tag.New(ctx, o.mutators...); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
recorder(tag.FromContext(ctx), o.measurements, o.attachments)
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -0,0 +1,25 @@
|
||||||
|
// Copyright 2018, OpenCensus Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
//
|
||||||
|
|
||||||
|
package stats
|
||||||
|
|
||||||
|
// Units are encoded according to the case-sensitive abbreviations from the
|
||||||
|
// Unified Code for Units of Measure: http://unitsofmeasure.org/ucum.html
|
||||||
|
const (
|
||||||
|
UnitNone = "1" // Deprecated: Use UnitDimensionless.
|
||||||
|
UnitDimensionless = "1"
|
||||||
|
UnitBytes = "By"
|
||||||
|
UnitMilliseconds = "ms"
|
||||||
|
)
|
|
@ -0,0 +1,120 @@
|
||||||
|
// Copyright 2017, OpenCensus Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
//
|
||||||
|
|
||||||
|
package view
|
||||||
|
|
||||||
|
// AggType represents the type of aggregation function used on a View.
|
||||||
|
type AggType int
|
||||||
|
|
||||||
|
// All available aggregation types.
|
||||||
|
const (
|
||||||
|
AggTypeNone AggType = iota // no aggregation; reserved for future use.
|
||||||
|
AggTypeCount // the count aggregation, see Count.
|
||||||
|
AggTypeSum // the sum aggregation, see Sum.
|
||||||
|
AggTypeDistribution // the distribution aggregation, see Distribution.
|
||||||
|
AggTypeLastValue // the last value aggregation, see LastValue.
|
||||||
|
)
|
||||||
|
|
||||||
|
func (t AggType) String() string {
|
||||||
|
return aggTypeName[t]
|
||||||
|
}
|
||||||
|
|
||||||
|
var aggTypeName = map[AggType]string{
|
||||||
|
AggTypeNone: "None",
|
||||||
|
AggTypeCount: "Count",
|
||||||
|
AggTypeSum: "Sum",
|
||||||
|
AggTypeDistribution: "Distribution",
|
||||||
|
AggTypeLastValue: "LastValue",
|
||||||
|
}
|
||||||
|
|
||||||
|
// Aggregation represents a data aggregation method. Use one of the functions:
|
||||||
|
// Count, Sum, or Distribution to construct an Aggregation.
|
||||||
|
type Aggregation struct {
|
||||||
|
Type AggType // Type is the AggType of this Aggregation.
|
||||||
|
Buckets []float64 // Buckets are the bucket endpoints if this Aggregation represents a distribution, see Distribution.
|
||||||
|
|
||||||
|
newData func() AggregationData
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
aggCount = &Aggregation{
|
||||||
|
Type: AggTypeCount,
|
||||||
|
newData: func() AggregationData {
|
||||||
|
return &CountData{}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
aggSum = &Aggregation{
|
||||||
|
Type: AggTypeSum,
|
||||||
|
newData: func() AggregationData {
|
||||||
|
return &SumData{}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// Count indicates that data collected and aggregated
|
||||||
|
// with this method will be turned into a count value.
|
||||||
|
// For example, total number of accepted requests can be
|
||||||
|
// aggregated by using Count.
|
||||||
|
func Count() *Aggregation {
|
||||||
|
return aggCount
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sum indicates that data collected and aggregated
|
||||||
|
// with this method will be summed up.
|
||||||
|
// For example, accumulated request bytes can be aggregated by using
|
||||||
|
// Sum.
|
||||||
|
func Sum() *Aggregation {
|
||||||
|
return aggSum
|
||||||
|
}
|
||||||
|
|
||||||
|
// Distribution indicates that the desired aggregation is
|
||||||
|
// a histogram distribution.
|
||||||
|
//
|
||||||
|
// A distribution aggregation may contain a histogram of the values in the
|
||||||
|
// population. The bucket boundaries for that histogram are described
|
||||||
|
// by the bounds. This defines len(bounds)+1 buckets.
|
||||||
|
//
|
||||||
|
// If len(bounds) >= 2 then the boundaries for bucket index i are:
|
||||||
|
//
|
||||||
|
// [-infinity, bounds[i]) for i = 0
|
||||||
|
// [bounds[i-1], bounds[i]) for 0 < i < length
|
||||||
|
// [bounds[i-1], +infinity) for i = length
|
||||||
|
//
|
||||||
|
// If len(bounds) is 0 then there is no histogram associated with the
|
||||||
|
// distribution. There will be a single bucket with boundaries
|
||||||
|
// (-infinity, +infinity).
|
||||||
|
//
|
||||||
|
// If len(bounds) is 1 then there is no finite buckets, and that single
|
||||||
|
// element is the common boundary of the overflow and underflow buckets.
|
||||||
|
func Distribution(bounds ...float64) *Aggregation {
|
||||||
|
return &Aggregation{
|
||||||
|
Type: AggTypeDistribution,
|
||||||
|
Buckets: bounds,
|
||||||
|
newData: func() AggregationData {
|
||||||
|
return newDistributionData(bounds)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// LastValue only reports the last value recorded using this
|
||||||
|
// aggregation. All other measurements will be dropped.
|
||||||
|
func LastValue() *Aggregation {
|
||||||
|
return &Aggregation{
|
||||||
|
Type: AggTypeLastValue,
|
||||||
|
newData: func() AggregationData {
|
||||||
|
return &LastValueData{}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,293 @@
|
||||||
|
// Copyright 2017, OpenCensus Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
//
|
||||||
|
|
||||||
|
package view
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"go.opencensus.io/metric/metricdata"
|
||||||
|
)
|
||||||
|
|
||||||
|
// AggregationData represents an aggregated value from a collection.
|
||||||
|
// They are reported on the view data during exporting.
|
||||||
|
// Mosts users won't directly access aggregration data.
|
||||||
|
type AggregationData interface {
|
||||||
|
isAggregationData() bool
|
||||||
|
addSample(v float64, attachments map[string]interface{}, t time.Time)
|
||||||
|
clone() AggregationData
|
||||||
|
equal(other AggregationData) bool
|
||||||
|
toPoint(t metricdata.Type, time time.Time) metricdata.Point
|
||||||
|
}
|
||||||
|
|
||||||
|
const epsilon = 1e-9
|
||||||
|
|
||||||
|
// CountData is the aggregated data for the Count aggregation.
|
||||||
|
// A count aggregation processes data and counts the recordings.
|
||||||
|
//
|
||||||
|
// Most users won't directly access count data.
|
||||||
|
type CountData struct {
|
||||||
|
Value int64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *CountData) isAggregationData() bool { return true }
|
||||||
|
|
||||||
|
func (a *CountData) addSample(_ float64, _ map[string]interface{}, _ time.Time) {
|
||||||
|
a.Value = a.Value + 1
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *CountData) clone() AggregationData {
|
||||||
|
return &CountData{Value: a.Value}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *CountData) equal(other AggregationData) bool {
|
||||||
|
a2, ok := other.(*CountData)
|
||||||
|
if !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return a.Value == a2.Value
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *CountData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point {
|
||||||
|
switch metricType {
|
||||||
|
case metricdata.TypeCumulativeInt64:
|
||||||
|
return metricdata.NewInt64Point(t, a.Value)
|
||||||
|
default:
|
||||||
|
panic("unsupported metricdata.Type")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SumData is the aggregated data for the Sum aggregation.
|
||||||
|
// A sum aggregation processes data and sums up the recordings.
|
||||||
|
//
|
||||||
|
// Most users won't directly access sum data.
|
||||||
|
type SumData struct {
|
||||||
|
Value float64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *SumData) isAggregationData() bool { return true }
|
||||||
|
|
||||||
|
func (a *SumData) addSample(v float64, _ map[string]interface{}, _ time.Time) {
|
||||||
|
a.Value += v
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *SumData) clone() AggregationData {
|
||||||
|
return &SumData{Value: a.Value}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *SumData) equal(other AggregationData) bool {
|
||||||
|
a2, ok := other.(*SumData)
|
||||||
|
if !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return math.Pow(a.Value-a2.Value, 2) < epsilon
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *SumData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point {
|
||||||
|
switch metricType {
|
||||||
|
case metricdata.TypeCumulativeInt64:
|
||||||
|
return metricdata.NewInt64Point(t, int64(a.Value))
|
||||||
|
case metricdata.TypeCumulativeFloat64:
|
||||||
|
return metricdata.NewFloat64Point(t, a.Value)
|
||||||
|
default:
|
||||||
|
panic("unsupported metricdata.Type")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DistributionData is the aggregated data for the
|
||||||
|
// Distribution aggregation.
|
||||||
|
//
|
||||||
|
// Most users won't directly access distribution data.
|
||||||
|
//
|
||||||
|
// For a distribution with N bounds, the associated DistributionData will have
|
||||||
|
// N+1 buckets.
|
||||||
|
type DistributionData struct {
|
||||||
|
Count int64 // number of data points aggregated
|
||||||
|
Min float64 // minimum value in the distribution
|
||||||
|
Max float64 // max value in the distribution
|
||||||
|
Mean float64 // mean of the distribution
|
||||||
|
SumOfSquaredDev float64 // sum of the squared deviation from the mean
|
||||||
|
CountPerBucket []int64 // number of occurrences per bucket
|
||||||
|
// ExemplarsPerBucket is slice the same length as CountPerBucket containing
|
||||||
|
// an exemplar for the associated bucket, or nil.
|
||||||
|
ExemplarsPerBucket []*metricdata.Exemplar
|
||||||
|
bounds []float64 // histogram distribution of the values
|
||||||
|
}
|
||||||
|
|
||||||
|
func newDistributionData(bounds []float64) *DistributionData {
|
||||||
|
bucketCount := len(bounds) + 1
|
||||||
|
return &DistributionData{
|
||||||
|
CountPerBucket: make([]int64, bucketCount),
|
||||||
|
ExemplarsPerBucket: make([]*metricdata.Exemplar, bucketCount),
|
||||||
|
bounds: bounds,
|
||||||
|
Min: math.MaxFloat64,
|
||||||
|
Max: math.SmallestNonzeroFloat64,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sum returns the sum of all samples collected.
|
||||||
|
func (a *DistributionData) Sum() float64 { return a.Mean * float64(a.Count) }
|
||||||
|
|
||||||
|
func (a *DistributionData) variance() float64 {
|
||||||
|
if a.Count <= 1 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return a.SumOfSquaredDev / float64(a.Count-1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *DistributionData) isAggregationData() bool { return true }
|
||||||
|
|
||||||
|
// TODO(songy23): support exemplar attachments.
|
||||||
|
func (a *DistributionData) addSample(v float64, attachments map[string]interface{}, t time.Time) {
|
||||||
|
if v < a.Min {
|
||||||
|
a.Min = v
|
||||||
|
}
|
||||||
|
if v > a.Max {
|
||||||
|
a.Max = v
|
||||||
|
}
|
||||||
|
a.Count++
|
||||||
|
a.addToBucket(v, attachments, t)
|
||||||
|
|
||||||
|
if a.Count == 1 {
|
||||||
|
a.Mean = v
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
oldMean := a.Mean
|
||||||
|
a.Mean = a.Mean + (v-a.Mean)/float64(a.Count)
|
||||||
|
a.SumOfSquaredDev = a.SumOfSquaredDev + (v-oldMean)*(v-a.Mean)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *DistributionData) addToBucket(v float64, attachments map[string]interface{}, t time.Time) {
|
||||||
|
var count *int64
|
||||||
|
var i int
|
||||||
|
var b float64
|
||||||
|
for i, b = range a.bounds {
|
||||||
|
if v < b {
|
||||||
|
count = &a.CountPerBucket[i]
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if count == nil { // Last bucket.
|
||||||
|
i = len(a.bounds)
|
||||||
|
count = &a.CountPerBucket[i]
|
||||||
|
}
|
||||||
|
*count++
|
||||||
|
if exemplar := getExemplar(v, attachments, t); exemplar != nil {
|
||||||
|
a.ExemplarsPerBucket[i] = exemplar
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func getExemplar(v float64, attachments map[string]interface{}, t time.Time) *metricdata.Exemplar {
|
||||||
|
if len(attachments) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return &metricdata.Exemplar{
|
||||||
|
Value: v,
|
||||||
|
Timestamp: t,
|
||||||
|
Attachments: attachments,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *DistributionData) clone() AggregationData {
|
||||||
|
c := *a
|
||||||
|
c.CountPerBucket = append([]int64(nil), a.CountPerBucket...)
|
||||||
|
c.ExemplarsPerBucket = append([]*metricdata.Exemplar(nil), a.ExemplarsPerBucket...)
|
||||||
|
return &c
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *DistributionData) equal(other AggregationData) bool {
|
||||||
|
a2, ok := other.(*DistributionData)
|
||||||
|
if !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if a2 == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if len(a.CountPerBucket) != len(a2.CountPerBucket) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for i := range a.CountPerBucket {
|
||||||
|
if a.CountPerBucket[i] != a2.CountPerBucket[i] {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return a.Count == a2.Count && a.Min == a2.Min && a.Max == a2.Max && math.Pow(a.Mean-a2.Mean, 2) < epsilon && math.Pow(a.variance()-a2.variance(), 2) < epsilon
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *DistributionData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point {
|
||||||
|
switch metricType {
|
||||||
|
case metricdata.TypeCumulativeDistribution:
|
||||||
|
buckets := []metricdata.Bucket{}
|
||||||
|
for i := 0; i < len(a.CountPerBucket); i++ {
|
||||||
|
buckets = append(buckets, metricdata.Bucket{
|
||||||
|
Count: a.CountPerBucket[i],
|
||||||
|
Exemplar: a.ExemplarsPerBucket[i],
|
||||||
|
})
|
||||||
|
}
|
||||||
|
bucketOptions := &metricdata.BucketOptions{Bounds: a.bounds}
|
||||||
|
|
||||||
|
val := &metricdata.Distribution{
|
||||||
|
Count: a.Count,
|
||||||
|
Sum: a.Sum(),
|
||||||
|
SumOfSquaredDeviation: a.SumOfSquaredDev,
|
||||||
|
BucketOptions: bucketOptions,
|
||||||
|
Buckets: buckets,
|
||||||
|
}
|
||||||
|
return metricdata.NewDistributionPoint(t, val)
|
||||||
|
|
||||||
|
default:
|
||||||
|
// TODO: [rghetia] when we have a use case for TypeGaugeDistribution.
|
||||||
|
panic("unsupported metricdata.Type")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// LastValueData returns the last value recorded for LastValue aggregation.
|
||||||
|
type LastValueData struct {
|
||||||
|
Value float64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *LastValueData) isAggregationData() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *LastValueData) addSample(v float64, _ map[string]interface{}, _ time.Time) {
|
||||||
|
l.Value = v
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *LastValueData) clone() AggregationData {
|
||||||
|
return &LastValueData{l.Value}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *LastValueData) equal(other AggregationData) bool {
|
||||||
|
a2, ok := other.(*LastValueData)
|
||||||
|
if !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return l.Value == a2.Value
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *LastValueData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point {
|
||||||
|
switch metricType {
|
||||||
|
case metricdata.TypeGaugeInt64:
|
||||||
|
return metricdata.NewInt64Point(t, int64(l.Value))
|
||||||
|
case metricdata.TypeGaugeFloat64:
|
||||||
|
return metricdata.NewFloat64Point(t, l.Value)
|
||||||
|
default:
|
||||||
|
panic("unsupported metricdata.Type")
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,86 @@
|
||||||
|
// Copyright 2017, OpenCensus Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
//
|
||||||
|
|
||||||
|
package view
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sort"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"go.opencensus.io/internal/tagencoding"
|
||||||
|
"go.opencensus.io/tag"
|
||||||
|
)
|
||||||
|
|
||||||
|
type collector struct {
|
||||||
|
// signatures holds the aggregations values for each unique tag signature
|
||||||
|
// (values for all keys) to its aggregator.
|
||||||
|
signatures map[string]AggregationData
|
||||||
|
// Aggregation is the description of the aggregation to perform for this
|
||||||
|
// view.
|
||||||
|
a *Aggregation
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *collector) addSample(s string, v float64, attachments map[string]interface{}, t time.Time) {
|
||||||
|
aggregator, ok := c.signatures[s]
|
||||||
|
if !ok {
|
||||||
|
aggregator = c.a.newData()
|
||||||
|
c.signatures[s] = aggregator
|
||||||
|
}
|
||||||
|
aggregator.addSample(v, attachments, t)
|
||||||
|
}
|
||||||
|
|
||||||
|
// collectRows returns a snapshot of the collected Row values.
|
||||||
|
func (c *collector) collectedRows(keys []tag.Key) []*Row {
|
||||||
|
rows := make([]*Row, 0, len(c.signatures))
|
||||||
|
for sig, aggregator := range c.signatures {
|
||||||
|
tags := decodeTags([]byte(sig), keys)
|
||||||
|
row := &Row{Tags: tags, Data: aggregator.clone()}
|
||||||
|
rows = append(rows, row)
|
||||||
|
}
|
||||||
|
return rows
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *collector) clearRows() {
|
||||||
|
c.signatures = make(map[string]AggregationData)
|
||||||
|
}
|
||||||
|
|
||||||
|
// encodeWithKeys encodes the map by using values
|
||||||
|
// only associated with the keys provided.
|
||||||
|
func encodeWithKeys(m *tag.Map, keys []tag.Key) []byte {
|
||||||
|
vb := &tagencoding.Values{
|
||||||
|
Buffer: make([]byte, len(keys)),
|
||||||
|
}
|
||||||
|
for _, k := range keys {
|
||||||
|
v, _ := m.Value(k)
|
||||||
|
vb.WriteValue([]byte(v))
|
||||||
|
}
|
||||||
|
return vb.Bytes()
|
||||||
|
}
|
||||||
|
|
||||||
|
// decodeTags decodes tags from the buffer and
|
||||||
|
// orders them by the keys.
|
||||||
|
func decodeTags(buf []byte, keys []tag.Key) []tag.Tag {
|
||||||
|
vb := &tagencoding.Values{Buffer: buf}
|
||||||
|
var tags []tag.Tag
|
||||||
|
for _, k := range keys {
|
||||||
|
v := vb.ReadValue()
|
||||||
|
if v != nil {
|
||||||
|
tags = append(tags, tag.Tag{Key: k, Value: string(v)})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
vb.ReadIndex = 0
|
||||||
|
sort.Slice(tags, func(i, j int) bool { return tags[i].Key.Name() < tags[j].Key.Name() })
|
||||||
|
return tags
|
||||||
|
}
|
|
@ -0,0 +1,47 @@
|
||||||
|
// Copyright 2017, OpenCensus Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
//
|
||||||
|
|
||||||
|
// Package view contains support for collecting and exposing aggregates over stats.
|
||||||
|
//
|
||||||
|
// In order to collect measurements, views need to be defined and registered.
|
||||||
|
// A view allows recorded measurements to be filtered and aggregated.
|
||||||
|
//
|
||||||
|
// All recorded measurements can be grouped by a list of tags.
|
||||||
|
//
|
||||||
|
// OpenCensus provides several aggregation methods: Count, Distribution and Sum.
|
||||||
|
//
|
||||||
|
// Count only counts the number of measurement points recorded.
|
||||||
|
// Distribution provides statistical summary of the aggregated data by counting
|
||||||
|
// how many recorded measurements fall into each bucket.
|
||||||
|
// Sum adds up the measurement values.
|
||||||
|
// LastValue just keeps track of the most recently recorded measurement value.
|
||||||
|
// All aggregations are cumulative.
|
||||||
|
//
|
||||||
|
// Views can be registerd and unregistered at any time during program execution.
|
||||||
|
//
|
||||||
|
// Libraries can define views but it is recommended that in most cases registering
|
||||||
|
// views be left up to applications.
|
||||||
|
//
|
||||||
|
// Exporting
|
||||||
|
//
|
||||||
|
// Collected and aggregated data can be exported to a metric collection
|
||||||
|
// backend by registering its exporter.
|
||||||
|
//
|
||||||
|
// Multiple exporters can be registered to upload the data to various
|
||||||
|
// different back ends.
|
||||||
|
package view // import "go.opencensus.io/stats/view"
|
||||||
|
|
||||||
|
// TODO(acetechnologist): Add a link to the language independent OpenCensus
|
||||||
|
// spec when it is available.
|
|
@ -0,0 +1,58 @@
|
||||||
|
// Copyright 2017, OpenCensus Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package view
|
||||||
|
|
||||||
|
import "sync"
|
||||||
|
|
||||||
|
var (
|
||||||
|
exportersMu sync.RWMutex // guards exporters
|
||||||
|
exporters = make(map[Exporter]struct{})
|
||||||
|
)
|
||||||
|
|
||||||
|
// Exporter exports the collected records as view data.
|
||||||
|
//
|
||||||
|
// The ExportView method should return quickly; if an
|
||||||
|
// Exporter takes a significant amount of time to
|
||||||
|
// process a Data, that work should be done on another goroutine.
|
||||||
|
//
|
||||||
|
// It is safe to assume that ExportView will not be called concurrently from
|
||||||
|
// multiple goroutines.
|
||||||
|
//
|
||||||
|
// The Data should not be modified.
|
||||||
|
type Exporter interface {
|
||||||
|
ExportView(viewData *Data)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RegisterExporter registers an exporter.
|
||||||
|
// Collected data will be reported via all the
|
||||||
|
// registered exporters. Once you no longer
|
||||||
|
// want data to be exported, invoke UnregisterExporter
|
||||||
|
// with the previously registered exporter.
|
||||||
|
//
|
||||||
|
// Binaries can register exporters, libraries shouldn't register exporters.
|
||||||
|
func RegisterExporter(e Exporter) {
|
||||||
|
exportersMu.Lock()
|
||||||
|
defer exportersMu.Unlock()
|
||||||
|
|
||||||
|
exporters[e] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnregisterExporter unregisters an exporter.
|
||||||
|
func UnregisterExporter(e Exporter) {
|
||||||
|
exportersMu.Lock()
|
||||||
|
defer exportersMu.Unlock()
|
||||||
|
|
||||||
|
delete(exporters, e)
|
||||||
|
}
|
|
@ -0,0 +1,221 @@
|
||||||
|
// Copyright 2017, OpenCensus Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
//
|
||||||
|
|
||||||
|
package view
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"sort"
|
||||||
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"go.opencensus.io/metric/metricdata"
|
||||||
|
"go.opencensus.io/stats"
|
||||||
|
"go.opencensus.io/tag"
|
||||||
|
)
|
||||||
|
|
||||||
|
// View allows users to aggregate the recorded stats.Measurements.
|
||||||
|
// Views need to be passed to the Register function before data will be
|
||||||
|
// collected and sent to Exporters.
|
||||||
|
type View struct {
|
||||||
|
Name string // Name of View. Must be unique. If unset, will default to the name of the Measure.
|
||||||
|
Description string // Description is a human-readable description for this view.
|
||||||
|
|
||||||
|
// TagKeys are the tag keys describing the grouping of this view.
|
||||||
|
// A single Row will be produced for each combination of associated tag values.
|
||||||
|
TagKeys []tag.Key
|
||||||
|
|
||||||
|
// Measure is a stats.Measure to aggregate in this view.
|
||||||
|
Measure stats.Measure
|
||||||
|
|
||||||
|
// Aggregation is the aggregation function to apply to the set of Measurements.
|
||||||
|
Aggregation *Aggregation
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithName returns a copy of the View with a new name. This is useful for
|
||||||
|
// renaming views to cope with limitations placed on metric names by various
|
||||||
|
// backends.
|
||||||
|
func (v *View) WithName(name string) *View {
|
||||||
|
vNew := *v
|
||||||
|
vNew.Name = name
|
||||||
|
return &vNew
|
||||||
|
}
|
||||||
|
|
||||||
|
// same compares two views and returns true if they represent the same aggregation.
|
||||||
|
func (v *View) same(other *View) bool {
|
||||||
|
if v == other {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if v == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return reflect.DeepEqual(v.Aggregation, other.Aggregation) &&
|
||||||
|
v.Measure.Name() == other.Measure.Name()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrNegativeBucketBounds error returned if histogram contains negative bounds.
|
||||||
|
//
|
||||||
|
// Deprecated: this should not be public.
|
||||||
|
var ErrNegativeBucketBounds = errors.New("negative bucket bounds not supported")
|
||||||
|
|
||||||
|
// canonicalize canonicalizes v by setting explicit
|
||||||
|
// defaults for Name and Description and sorting the TagKeys
|
||||||
|
func (v *View) canonicalize() error {
|
||||||
|
if v.Measure == nil {
|
||||||
|
return fmt.Errorf("cannot register view %q: measure not set", v.Name)
|
||||||
|
}
|
||||||
|
if v.Aggregation == nil {
|
||||||
|
return fmt.Errorf("cannot register view %q: aggregation not set", v.Name)
|
||||||
|
}
|
||||||
|
if v.Name == "" {
|
||||||
|
v.Name = v.Measure.Name()
|
||||||
|
}
|
||||||
|
if v.Description == "" {
|
||||||
|
v.Description = v.Measure.Description()
|
||||||
|
}
|
||||||
|
if err := checkViewName(v.Name); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
sort.Slice(v.TagKeys, func(i, j int) bool {
|
||||||
|
return v.TagKeys[i].Name() < v.TagKeys[j].Name()
|
||||||
|
})
|
||||||
|
sort.Float64s(v.Aggregation.Buckets)
|
||||||
|
for _, b := range v.Aggregation.Buckets {
|
||||||
|
if b < 0 {
|
||||||
|
return ErrNegativeBucketBounds
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// drop 0 bucket silently.
|
||||||
|
v.Aggregation.Buckets = dropZeroBounds(v.Aggregation.Buckets...)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func dropZeroBounds(bounds ...float64) []float64 {
|
||||||
|
for i, bound := range bounds {
|
||||||
|
if bound > 0 {
|
||||||
|
return bounds[i:]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return []float64{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// viewInternal is the internal representation of a View.
|
||||||
|
type viewInternal struct {
|
||||||
|
view *View // view is the canonicalized View definition associated with this view.
|
||||||
|
subscribed uint32 // 1 if someone is subscribed and data need to be exported, use atomic to access
|
||||||
|
collector *collector
|
||||||
|
metricDescriptor *metricdata.Descriptor
|
||||||
|
}
|
||||||
|
|
||||||
|
func newViewInternal(v *View) (*viewInternal, error) {
|
||||||
|
return &viewInternal{
|
||||||
|
view: v,
|
||||||
|
collector: &collector{make(map[string]AggregationData), v.Aggregation},
|
||||||
|
metricDescriptor: viewToMetricDescriptor(v),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *viewInternal) subscribe() {
|
||||||
|
atomic.StoreUint32(&v.subscribed, 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *viewInternal) unsubscribe() {
|
||||||
|
atomic.StoreUint32(&v.subscribed, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// isSubscribed returns true if the view is exporting
|
||||||
|
// data by subscription.
|
||||||
|
func (v *viewInternal) isSubscribed() bool {
|
||||||
|
return atomic.LoadUint32(&v.subscribed) == 1
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *viewInternal) clearRows() {
|
||||||
|
v.collector.clearRows()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *viewInternal) collectedRows() []*Row {
|
||||||
|
return v.collector.collectedRows(v.view.TagKeys)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *viewInternal) addSample(m *tag.Map, val float64, attachments map[string]interface{}, t time.Time) {
|
||||||
|
if !v.isSubscribed() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
sig := string(encodeWithKeys(m, v.view.TagKeys))
|
||||||
|
v.collector.addSample(sig, val, attachments, t)
|
||||||
|
}
|
||||||
|
|
||||||
|
// A Data is a set of rows about usage of the single measure associated
|
||||||
|
// with the given view. Each row is specific to a unique set of tags.
|
||||||
|
type Data struct {
|
||||||
|
View *View
|
||||||
|
Start, End time.Time
|
||||||
|
Rows []*Row
|
||||||
|
}
|
||||||
|
|
||||||
|
// Row is the collected value for a specific set of key value pairs a.k.a tags.
|
||||||
|
type Row struct {
|
||||||
|
Tags []tag.Tag
|
||||||
|
Data AggregationData
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Row) String() string {
|
||||||
|
var buffer bytes.Buffer
|
||||||
|
buffer.WriteString("{ ")
|
||||||
|
buffer.WriteString("{ ")
|
||||||
|
for _, t := range r.Tags {
|
||||||
|
buffer.WriteString(fmt.Sprintf("{%v %v}", t.Key.Name(), t.Value))
|
||||||
|
}
|
||||||
|
buffer.WriteString(" }")
|
||||||
|
buffer.WriteString(fmt.Sprintf("%v", r.Data))
|
||||||
|
buffer.WriteString(" }")
|
||||||
|
return buffer.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Equal returns true if both rows are equal. Tags are expected to be ordered
|
||||||
|
// by the key name. Even if both rows have the same tags but the tags appear in
|
||||||
|
// different orders it will return false.
|
||||||
|
func (r *Row) Equal(other *Row) bool {
|
||||||
|
if r == other {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return reflect.DeepEqual(r.Tags, other.Tags) && r.Data.equal(other.Data)
|
||||||
|
}
|
||||||
|
|
||||||
|
const maxNameLength = 255
|
||||||
|
|
||||||
|
// Returns true if the given string contains only printable characters.
|
||||||
|
func isPrintable(str string) bool {
|
||||||
|
for _, r := range str {
|
||||||
|
if !(r >= ' ' && r <= '~') {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkViewName(name string) error {
|
||||||
|
if len(name) > maxNameLength {
|
||||||
|
return fmt.Errorf("view name cannot be larger than %v", maxNameLength)
|
||||||
|
}
|
||||||
|
if !isPrintable(name) {
|
||||||
|
return fmt.Errorf("view name needs to be an ASCII string")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -0,0 +1,140 @@
|
||||||
|
// Copyright 2019, OpenCensus Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
//
|
||||||
|
|
||||||
|
package view
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"go.opencensus.io/metric/metricdata"
|
||||||
|
"go.opencensus.io/stats"
|
||||||
|
)
|
||||||
|
|
||||||
|
func getUnit(unit string) metricdata.Unit {
|
||||||
|
switch unit {
|
||||||
|
case "1":
|
||||||
|
return metricdata.UnitDimensionless
|
||||||
|
case "ms":
|
||||||
|
return metricdata.UnitMilliseconds
|
||||||
|
case "By":
|
||||||
|
return metricdata.UnitBytes
|
||||||
|
}
|
||||||
|
return metricdata.UnitDimensionless
|
||||||
|
}
|
||||||
|
|
||||||
|
func getType(v *View) metricdata.Type {
|
||||||
|
m := v.Measure
|
||||||
|
agg := v.Aggregation
|
||||||
|
|
||||||
|
switch agg.Type {
|
||||||
|
case AggTypeSum:
|
||||||
|
switch m.(type) {
|
||||||
|
case *stats.Int64Measure:
|
||||||
|
return metricdata.TypeCumulativeInt64
|
||||||
|
case *stats.Float64Measure:
|
||||||
|
return metricdata.TypeCumulativeFloat64
|
||||||
|
default:
|
||||||
|
panic("unexpected measure type")
|
||||||
|
}
|
||||||
|
case AggTypeDistribution:
|
||||||
|
return metricdata.TypeCumulativeDistribution
|
||||||
|
case AggTypeLastValue:
|
||||||
|
switch m.(type) {
|
||||||
|
case *stats.Int64Measure:
|
||||||
|
return metricdata.TypeGaugeInt64
|
||||||
|
case *stats.Float64Measure:
|
||||||
|
return metricdata.TypeGaugeFloat64
|
||||||
|
default:
|
||||||
|
panic("unexpected measure type")
|
||||||
|
}
|
||||||
|
case AggTypeCount:
|
||||||
|
switch m.(type) {
|
||||||
|
case *stats.Int64Measure:
|
||||||
|
return metricdata.TypeCumulativeInt64
|
||||||
|
case *stats.Float64Measure:
|
||||||
|
return metricdata.TypeCumulativeInt64
|
||||||
|
default:
|
||||||
|
panic("unexpected measure type")
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
panic("unexpected aggregation type")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func getLabelKeys(v *View) []metricdata.LabelKey {
|
||||||
|
labelKeys := []metricdata.LabelKey{}
|
||||||
|
for _, k := range v.TagKeys {
|
||||||
|
labelKeys = append(labelKeys, metricdata.LabelKey{Key: k.Name()})
|
||||||
|
}
|
||||||
|
return labelKeys
|
||||||
|
}
|
||||||
|
|
||||||
|
func viewToMetricDescriptor(v *View) *metricdata.Descriptor {
|
||||||
|
return &metricdata.Descriptor{
|
||||||
|
Name: v.Name,
|
||||||
|
Description: v.Description,
|
||||||
|
Unit: getUnit(v.Measure.Unit()),
|
||||||
|
Type: getType(v),
|
||||||
|
LabelKeys: getLabelKeys(v),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func toLabelValues(row *Row, expectedKeys []metricdata.LabelKey) []metricdata.LabelValue {
|
||||||
|
labelValues := []metricdata.LabelValue{}
|
||||||
|
tagMap := make(map[string]string)
|
||||||
|
for _, tag := range row.Tags {
|
||||||
|
tagMap[tag.Key.Name()] = tag.Value
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, key := range expectedKeys {
|
||||||
|
if val, ok := tagMap[key.Key]; ok {
|
||||||
|
labelValues = append(labelValues, metricdata.NewLabelValue(val))
|
||||||
|
} else {
|
||||||
|
labelValues = append(labelValues, metricdata.LabelValue{})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return labelValues
|
||||||
|
}
|
||||||
|
|
||||||
|
func rowToTimeseries(v *viewInternal, row *Row, now time.Time, startTime time.Time) *metricdata.TimeSeries {
|
||||||
|
return &metricdata.TimeSeries{
|
||||||
|
Points: []metricdata.Point{row.Data.toPoint(v.metricDescriptor.Type, now)},
|
||||||
|
LabelValues: toLabelValues(row, v.metricDescriptor.LabelKeys),
|
||||||
|
StartTime: startTime,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func viewToMetric(v *viewInternal, now time.Time, startTime time.Time) *metricdata.Metric {
|
||||||
|
if v.metricDescriptor.Type == metricdata.TypeGaugeInt64 ||
|
||||||
|
v.metricDescriptor.Type == metricdata.TypeGaugeFloat64 {
|
||||||
|
startTime = time.Time{}
|
||||||
|
}
|
||||||
|
|
||||||
|
rows := v.collectedRows()
|
||||||
|
if len(rows) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
ts := []*metricdata.TimeSeries{}
|
||||||
|
for _, row := range rows {
|
||||||
|
ts = append(ts, rowToTimeseries(v, row, now, startTime))
|
||||||
|
}
|
||||||
|
|
||||||
|
m := &metricdata.Metric{
|
||||||
|
Descriptor: *v.metricDescriptor,
|
||||||
|
TimeSeries: ts,
|
||||||
|
}
|
||||||
|
return m
|
||||||
|
}
|
|
@ -0,0 +1,281 @@
|
||||||
|
// Copyright 2017, OpenCensus Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
//
|
||||||
|
|
||||||
|
package view
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"go.opencensus.io/metric/metricdata"
|
||||||
|
"go.opencensus.io/metric/metricproducer"
|
||||||
|
"go.opencensus.io/stats"
|
||||||
|
"go.opencensus.io/stats/internal"
|
||||||
|
"go.opencensus.io/tag"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
defaultWorker = newWorker()
|
||||||
|
go defaultWorker.start()
|
||||||
|
internal.DefaultRecorder = record
|
||||||
|
}
|
||||||
|
|
||||||
|
type measureRef struct {
|
||||||
|
measure string
|
||||||
|
views map[*viewInternal]struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
type worker struct {
|
||||||
|
measures map[string]*measureRef
|
||||||
|
views map[string]*viewInternal
|
||||||
|
startTimes map[*viewInternal]time.Time
|
||||||
|
|
||||||
|
timer *time.Ticker
|
||||||
|
c chan command
|
||||||
|
quit, done chan bool
|
||||||
|
mu sync.RWMutex
|
||||||
|
}
|
||||||
|
|
||||||
|
var defaultWorker *worker
|
||||||
|
|
||||||
|
var defaultReportingDuration = 10 * time.Second
|
||||||
|
|
||||||
|
// Find returns a registered view associated with this name.
|
||||||
|
// If no registered view is found, nil is returned.
|
||||||
|
func Find(name string) (v *View) {
|
||||||
|
req := &getViewByNameReq{
|
||||||
|
name: name,
|
||||||
|
c: make(chan *getViewByNameResp),
|
||||||
|
}
|
||||||
|
defaultWorker.c <- req
|
||||||
|
resp := <-req.c
|
||||||
|
return resp.v
|
||||||
|
}
|
||||||
|
|
||||||
|
// Register begins collecting data for the given views.
|
||||||
|
// Once a view is registered, it reports data to the registered exporters.
|
||||||
|
func Register(views ...*View) error {
|
||||||
|
req := ®isterViewReq{
|
||||||
|
views: views,
|
||||||
|
err: make(chan error),
|
||||||
|
}
|
||||||
|
defaultWorker.c <- req
|
||||||
|
return <-req.err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unregister the given views. Data will not longer be exported for these views
|
||||||
|
// after Unregister returns.
|
||||||
|
// It is not necessary to unregister from views you expect to collect for the
|
||||||
|
// duration of your program execution.
|
||||||
|
func Unregister(views ...*View) {
|
||||||
|
names := make([]string, len(views))
|
||||||
|
for i := range views {
|
||||||
|
names[i] = views[i].Name
|
||||||
|
}
|
||||||
|
req := &unregisterFromViewReq{
|
||||||
|
views: names,
|
||||||
|
done: make(chan struct{}),
|
||||||
|
}
|
||||||
|
defaultWorker.c <- req
|
||||||
|
<-req.done
|
||||||
|
}
|
||||||
|
|
||||||
|
// RetrieveData gets a snapshot of the data collected for the the view registered
|
||||||
|
// with the given name. It is intended for testing only.
|
||||||
|
func RetrieveData(viewName string) ([]*Row, error) {
|
||||||
|
req := &retrieveDataReq{
|
||||||
|
now: time.Now(),
|
||||||
|
v: viewName,
|
||||||
|
c: make(chan *retrieveDataResp),
|
||||||
|
}
|
||||||
|
defaultWorker.c <- req
|
||||||
|
resp := <-req.c
|
||||||
|
return resp.rows, resp.err
|
||||||
|
}
|
||||||
|
|
||||||
|
func record(tags *tag.Map, ms interface{}, attachments map[string]interface{}) {
|
||||||
|
req := &recordReq{
|
||||||
|
tm: tags,
|
||||||
|
ms: ms.([]stats.Measurement),
|
||||||
|
attachments: attachments,
|
||||||
|
t: time.Now(),
|
||||||
|
}
|
||||||
|
defaultWorker.c <- req
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetReportingPeriod sets the interval between reporting aggregated views in
|
||||||
|
// the program. If duration is less than or equal to zero, it enables the
|
||||||
|
// default behavior.
|
||||||
|
//
|
||||||
|
// Note: each exporter makes different promises about what the lowest supported
|
||||||
|
// duration is. For example, the Stackdriver exporter recommends a value no
|
||||||
|
// lower than 1 minute. Consult each exporter per your needs.
|
||||||
|
func SetReportingPeriod(d time.Duration) {
|
||||||
|
// TODO(acetechnologist): ensure that the duration d is more than a certain
|
||||||
|
// value. e.g. 1s
|
||||||
|
req := &setReportingPeriodReq{
|
||||||
|
d: d,
|
||||||
|
c: make(chan bool),
|
||||||
|
}
|
||||||
|
defaultWorker.c <- req
|
||||||
|
<-req.c // don't return until the timer is set to the new duration.
|
||||||
|
}
|
||||||
|
|
||||||
|
func newWorker() *worker {
|
||||||
|
return &worker{
|
||||||
|
measures: make(map[string]*measureRef),
|
||||||
|
views: make(map[string]*viewInternal),
|
||||||
|
startTimes: make(map[*viewInternal]time.Time),
|
||||||
|
timer: time.NewTicker(defaultReportingDuration),
|
||||||
|
c: make(chan command, 1024),
|
||||||
|
quit: make(chan bool),
|
||||||
|
done: make(chan bool),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *worker) start() {
|
||||||
|
prodMgr := metricproducer.GlobalManager()
|
||||||
|
prodMgr.AddProducer(w)
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case cmd := <-w.c:
|
||||||
|
cmd.handleCommand(w)
|
||||||
|
case <-w.timer.C:
|
||||||
|
w.reportUsage(time.Now())
|
||||||
|
case <-w.quit:
|
||||||
|
w.timer.Stop()
|
||||||
|
close(w.c)
|
||||||
|
w.done <- true
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *worker) stop() {
|
||||||
|
prodMgr := metricproducer.GlobalManager()
|
||||||
|
prodMgr.DeleteProducer(w)
|
||||||
|
|
||||||
|
w.quit <- true
|
||||||
|
<-w.done
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *worker) getMeasureRef(name string) *measureRef {
|
||||||
|
if mr, ok := w.measures[name]; ok {
|
||||||
|
return mr
|
||||||
|
}
|
||||||
|
mr := &measureRef{
|
||||||
|
measure: name,
|
||||||
|
views: make(map[*viewInternal]struct{}),
|
||||||
|
}
|
||||||
|
w.measures[name] = mr
|
||||||
|
return mr
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *worker) tryRegisterView(v *View) (*viewInternal, error) {
|
||||||
|
w.mu.Lock()
|
||||||
|
defer w.mu.Unlock()
|
||||||
|
vi, err := newViewInternal(v)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if x, ok := w.views[vi.view.Name]; ok {
|
||||||
|
if !x.view.same(vi.view) {
|
||||||
|
return nil, fmt.Errorf("cannot register view %q; a different view with the same name is already registered", v.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// the view is already registered so there is nothing to do and the
|
||||||
|
// command is considered successful.
|
||||||
|
return x, nil
|
||||||
|
}
|
||||||
|
w.views[vi.view.Name] = vi
|
||||||
|
ref := w.getMeasureRef(vi.view.Measure.Name())
|
||||||
|
ref.views[vi] = struct{}{}
|
||||||
|
return vi, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *worker) unregisterView(viewName string) {
|
||||||
|
w.mu.Lock()
|
||||||
|
defer w.mu.Unlock()
|
||||||
|
delete(w.views, viewName)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *worker) reportView(v *viewInternal, now time.Time) {
|
||||||
|
if !v.isSubscribed() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
rows := v.collectedRows()
|
||||||
|
_, ok := w.startTimes[v]
|
||||||
|
if !ok {
|
||||||
|
w.startTimes[v] = now
|
||||||
|
}
|
||||||
|
viewData := &Data{
|
||||||
|
View: v.view,
|
||||||
|
Start: w.startTimes[v],
|
||||||
|
End: time.Now(),
|
||||||
|
Rows: rows,
|
||||||
|
}
|
||||||
|
exportersMu.Lock()
|
||||||
|
for e := range exporters {
|
||||||
|
e.ExportView(viewData)
|
||||||
|
}
|
||||||
|
exportersMu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *worker) reportUsage(now time.Time) {
|
||||||
|
w.mu.Lock()
|
||||||
|
defer w.mu.Unlock()
|
||||||
|
for _, v := range w.views {
|
||||||
|
w.reportView(v, now)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *worker) toMetric(v *viewInternal, now time.Time) *metricdata.Metric {
|
||||||
|
if !v.isSubscribed() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
_, ok := w.startTimes[v]
|
||||||
|
if !ok {
|
||||||
|
w.startTimes[v] = now
|
||||||
|
}
|
||||||
|
|
||||||
|
var startTime time.Time
|
||||||
|
if v.metricDescriptor.Type == metricdata.TypeGaugeInt64 ||
|
||||||
|
v.metricDescriptor.Type == metricdata.TypeGaugeFloat64 {
|
||||||
|
startTime = time.Time{}
|
||||||
|
} else {
|
||||||
|
startTime = w.startTimes[v]
|
||||||
|
}
|
||||||
|
|
||||||
|
return viewToMetric(v, now, startTime)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read reads all view data and returns them as metrics.
|
||||||
|
// It is typically invoked by metric reader to export stats in metric format.
|
||||||
|
func (w *worker) Read() []*metricdata.Metric {
|
||||||
|
w.mu.Lock()
|
||||||
|
defer w.mu.Unlock()
|
||||||
|
now := time.Now()
|
||||||
|
metrics := make([]*metricdata.Metric, 0, len(w.views))
|
||||||
|
for _, v := range w.views {
|
||||||
|
metric := w.toMetric(v, now)
|
||||||
|
if metric != nil {
|
||||||
|
metrics = append(metrics, metric)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return metrics
|
||||||
|
}
|
|
@ -0,0 +1,186 @@
|
||||||
|
// Copyright 2017, OpenCensus Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
//
|
||||||
|
|
||||||
|
package view
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"go.opencensus.io/stats"
|
||||||
|
"go.opencensus.io/stats/internal"
|
||||||
|
"go.opencensus.io/tag"
|
||||||
|
)
|
||||||
|
|
||||||
|
type command interface {
|
||||||
|
handleCommand(w *worker)
|
||||||
|
}
|
||||||
|
|
||||||
|
// getViewByNameReq is the command to get a view given its name.
|
||||||
|
type getViewByNameReq struct {
|
||||||
|
name string
|
||||||
|
c chan *getViewByNameResp
|
||||||
|
}
|
||||||
|
|
||||||
|
type getViewByNameResp struct {
|
||||||
|
v *View
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cmd *getViewByNameReq) handleCommand(w *worker) {
|
||||||
|
v := w.views[cmd.name]
|
||||||
|
if v == nil {
|
||||||
|
cmd.c <- &getViewByNameResp{nil}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
cmd.c <- &getViewByNameResp{v.view}
|
||||||
|
}
|
||||||
|
|
||||||
|
// registerViewReq is the command to register a view.
|
||||||
|
type registerViewReq struct {
|
||||||
|
views []*View
|
||||||
|
err chan error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cmd *registerViewReq) handleCommand(w *worker) {
|
||||||
|
for _, v := range cmd.views {
|
||||||
|
if err := v.canonicalize(); err != nil {
|
||||||
|
cmd.err <- err
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var errstr []string
|
||||||
|
for _, view := range cmd.views {
|
||||||
|
vi, err := w.tryRegisterView(view)
|
||||||
|
if err != nil {
|
||||||
|
errstr = append(errstr, fmt.Sprintf("%s: %v", view.Name, err))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
internal.SubscriptionReporter(view.Measure.Name())
|
||||||
|
vi.subscribe()
|
||||||
|
}
|
||||||
|
if len(errstr) > 0 {
|
||||||
|
cmd.err <- errors.New(strings.Join(errstr, "\n"))
|
||||||
|
} else {
|
||||||
|
cmd.err <- nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// unregisterFromViewReq is the command to unregister to a view. Has no
|
||||||
|
// impact on the data collection for client that are pulling data from the
|
||||||
|
// library.
|
||||||
|
type unregisterFromViewReq struct {
|
||||||
|
views []string
|
||||||
|
done chan struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cmd *unregisterFromViewReq) handleCommand(w *worker) {
|
||||||
|
for _, name := range cmd.views {
|
||||||
|
vi, ok := w.views[name]
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Report pending data for this view before removing it.
|
||||||
|
w.reportView(vi, time.Now())
|
||||||
|
|
||||||
|
vi.unsubscribe()
|
||||||
|
if !vi.isSubscribed() {
|
||||||
|
// this was the last subscription and view is not collecting anymore.
|
||||||
|
// The collected data can be cleared.
|
||||||
|
vi.clearRows()
|
||||||
|
}
|
||||||
|
w.unregisterView(name)
|
||||||
|
}
|
||||||
|
cmd.done <- struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// retrieveDataReq is the command to retrieve data for a view.
|
||||||
|
type retrieveDataReq struct {
|
||||||
|
now time.Time
|
||||||
|
v string
|
||||||
|
c chan *retrieveDataResp
|
||||||
|
}
|
||||||
|
|
||||||
|
type retrieveDataResp struct {
|
||||||
|
rows []*Row
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cmd *retrieveDataReq) handleCommand(w *worker) {
|
||||||
|
w.mu.Lock()
|
||||||
|
defer w.mu.Unlock()
|
||||||
|
vi, ok := w.views[cmd.v]
|
||||||
|
if !ok {
|
||||||
|
cmd.c <- &retrieveDataResp{
|
||||||
|
nil,
|
||||||
|
fmt.Errorf("cannot retrieve data; view %q is not registered", cmd.v),
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if !vi.isSubscribed() {
|
||||||
|
cmd.c <- &retrieveDataResp{
|
||||||
|
nil,
|
||||||
|
fmt.Errorf("cannot retrieve data; view %q has no subscriptions or collection is not forcibly started", cmd.v),
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
cmd.c <- &retrieveDataResp{
|
||||||
|
vi.collectedRows(),
|
||||||
|
nil,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// recordReq is the command to record data related to multiple measures
|
||||||
|
// at once.
|
||||||
|
type recordReq struct {
|
||||||
|
tm *tag.Map
|
||||||
|
ms []stats.Measurement
|
||||||
|
attachments map[string]interface{}
|
||||||
|
t time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cmd *recordReq) handleCommand(w *worker) {
|
||||||
|
w.mu.Lock()
|
||||||
|
defer w.mu.Unlock()
|
||||||
|
for _, m := range cmd.ms {
|
||||||
|
if (m == stats.Measurement{}) { // not registered
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
ref := w.getMeasureRef(m.Measure().Name())
|
||||||
|
for v := range ref.views {
|
||||||
|
v.addSample(cmd.tm, m.Value(), cmd.attachments, time.Now())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// setReportingPeriodReq is the command to modify the duration between
|
||||||
|
// reporting the collected data to the registered clients.
|
||||||
|
type setReportingPeriodReq struct {
|
||||||
|
d time.Duration
|
||||||
|
c chan bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cmd *setReportingPeriodReq) handleCommand(w *worker) {
|
||||||
|
w.timer.Stop()
|
||||||
|
if cmd.d <= 0 {
|
||||||
|
w.timer = time.NewTicker(defaultReportingDuration)
|
||||||
|
} else {
|
||||||
|
w.timer = time.NewTicker(cmd.d)
|
||||||
|
}
|
||||||
|
cmd.c <- true
|
||||||
|
}
|
|
@ -0,0 +1,43 @@
|
||||||
|
// Copyright 2017, OpenCensus Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
//
|
||||||
|
|
||||||
|
package tag
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
)
|
||||||
|
|
||||||
|
// FromContext returns the tag map stored in the context.
|
||||||
|
func FromContext(ctx context.Context) *Map {
|
||||||
|
// The returned tag map shouldn't be mutated.
|
||||||
|
ts := ctx.Value(mapCtxKey)
|
||||||
|
if ts == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return ts.(*Map)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewContext creates a new context with the given tag map.
|
||||||
|
// To propagate a tag map to downstream methods and downstream RPCs, add a tag map
|
||||||
|
// to the current context. NewContext will return a copy of the current context,
|
||||||
|
// and put the tag map into the returned one.
|
||||||
|
// If there is already a tag map in the current context, it will be replaced with m.
|
||||||
|
func NewContext(ctx context.Context, m *Map) context.Context {
|
||||||
|
return context.WithValue(ctx, mapCtxKey, m)
|
||||||
|
}
|
||||||
|
|
||||||
|
type ctxKey struct{}
|
||||||
|
|
||||||
|
var mapCtxKey = ctxKey{}
|
|
@ -0,0 +1,26 @@
|
||||||
|
// Copyright 2017, OpenCensus Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
//
|
||||||
|
|
||||||
|
/*
|
||||||
|
Package tag contains OpenCensus tags.
|
||||||
|
|
||||||
|
Tags are key-value pairs. Tags provide additional cardinality to
|
||||||
|
the OpenCensus instrumentation data.
|
||||||
|
|
||||||
|
Tags can be propagated on the wire and in the same
|
||||||
|
process via context.Context. Encode and Decode should be
|
||||||
|
used to represent tags into their binary propagation form.
|
||||||
|
*/
|
||||||
|
package tag // import "go.opencensus.io/tag"
|
|
@ -0,0 +1,44 @@
|
||||||
|
// Copyright 2017, OpenCensus Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
//
|
||||||
|
|
||||||
|
package tag
|
||||||
|
|
||||||
|
// Key represents a tag key.
|
||||||
|
type Key struct {
|
||||||
|
name string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewKey creates or retrieves a string key identified by name.
|
||||||
|
// Calling NewKey more than once with the same name returns the same key.
|
||||||
|
func NewKey(name string) (Key, error) {
|
||||||
|
if !checkKeyName(name) {
|
||||||
|
return Key{}, errInvalidKeyName
|
||||||
|
}
|
||||||
|
return Key{name: name}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MustNewKey returns a key with the given name, and panics if name is an invalid key name.
|
||||||
|
func MustNewKey(name string) Key {
|
||||||
|
k, err := NewKey(name)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return k
|
||||||
|
}
|
||||||
|
|
||||||
|
// Name returns the name of the key.
|
||||||
|
func (k Key) Name() string {
|
||||||
|
return k.name
|
||||||
|
}
|
|
@ -0,0 +1,229 @@
|
||||||
|
// Copyright 2017, OpenCensus Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
//
|
||||||
|
|
||||||
|
package tag
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"sort"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Tag is a key value pair that can be propagated on wire.
|
||||||
|
type Tag struct {
|
||||||
|
Key Key
|
||||||
|
Value string
|
||||||
|
}
|
||||||
|
|
||||||
|
type tagContent struct {
|
||||||
|
value string
|
||||||
|
m metadatas
|
||||||
|
}
|
||||||
|
|
||||||
|
// Map is a map of tags. Use New to create a context containing
|
||||||
|
// a new Map.
|
||||||
|
type Map struct {
|
||||||
|
m map[Key]tagContent
|
||||||
|
}
|
||||||
|
|
||||||
|
// Value returns the value for the key if a value for the key exists.
|
||||||
|
func (m *Map) Value(k Key) (string, bool) {
|
||||||
|
if m == nil {
|
||||||
|
return "", false
|
||||||
|
}
|
||||||
|
v, ok := m.m[k]
|
||||||
|
return v.value, ok
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Map) String() string {
|
||||||
|
if m == nil {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
keys := make([]Key, 0, len(m.m))
|
||||||
|
for k := range m.m {
|
||||||
|
keys = append(keys, k)
|
||||||
|
}
|
||||||
|
sort.Slice(keys, func(i, j int) bool { return keys[i].Name() < keys[j].Name() })
|
||||||
|
|
||||||
|
var buffer bytes.Buffer
|
||||||
|
buffer.WriteString("{ ")
|
||||||
|
for _, k := range keys {
|
||||||
|
buffer.WriteString(fmt.Sprintf("{%v %v}", k.name, m.m[k]))
|
||||||
|
}
|
||||||
|
buffer.WriteString(" }")
|
||||||
|
return buffer.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Map) insert(k Key, v string, md metadatas) {
|
||||||
|
if _, ok := m.m[k]; ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
m.m[k] = tagContent{value: v, m: md}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Map) update(k Key, v string, md metadatas) {
|
||||||
|
if _, ok := m.m[k]; ok {
|
||||||
|
m.m[k] = tagContent{value: v, m: md}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Map) upsert(k Key, v string, md metadatas) {
|
||||||
|
m.m[k] = tagContent{value: v, m: md}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Map) delete(k Key) {
|
||||||
|
delete(m.m, k)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newMap() *Map {
|
||||||
|
return &Map{m: make(map[Key]tagContent)}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mutator modifies a tag map.
|
||||||
|
type Mutator interface {
|
||||||
|
Mutate(t *Map) (*Map, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Insert returns a mutator that inserts a
|
||||||
|
// value associated with k. If k already exists in the tag map,
|
||||||
|
// mutator doesn't update the value.
|
||||||
|
// Metadata applies metadata to the tag. It is optional.
|
||||||
|
// Metadatas are applied in the order in which it is provided.
|
||||||
|
// If more than one metadata updates the same attribute then
|
||||||
|
// the update from the last metadata prevails.
|
||||||
|
func Insert(k Key, v string, mds ...Metadata) Mutator {
|
||||||
|
return &mutator{
|
||||||
|
fn: func(m *Map) (*Map, error) {
|
||||||
|
if !checkValue(v) {
|
||||||
|
return nil, errInvalidValue
|
||||||
|
}
|
||||||
|
m.insert(k, v, createMetadatas(mds...))
|
||||||
|
return m, nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update returns a mutator that updates the
|
||||||
|
// value of the tag associated with k with v. If k doesn't
|
||||||
|
// exists in the tag map, the mutator doesn't insert the value.
|
||||||
|
// Metadata applies metadata to the tag. It is optional.
|
||||||
|
// Metadatas are applied in the order in which it is provided.
|
||||||
|
// If more than one metadata updates the same attribute then
|
||||||
|
// the update from the last metadata prevails.
|
||||||
|
func Update(k Key, v string, mds ...Metadata) Mutator {
|
||||||
|
return &mutator{
|
||||||
|
fn: func(m *Map) (*Map, error) {
|
||||||
|
if !checkValue(v) {
|
||||||
|
return nil, errInvalidValue
|
||||||
|
}
|
||||||
|
m.update(k, v, createMetadatas(mds...))
|
||||||
|
return m, nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Upsert returns a mutator that upserts the
|
||||||
|
// value of the tag associated with k with v. It inserts the
|
||||||
|
// value if k doesn't exist already. It mutates the value
|
||||||
|
// if k already exists.
|
||||||
|
// Metadata applies metadata to the tag. It is optional.
|
||||||
|
// Metadatas are applied in the order in which it is provided.
|
||||||
|
// If more than one metadata updates the same attribute then
|
||||||
|
// the update from the last metadata prevails.
|
||||||
|
func Upsert(k Key, v string, mds ...Metadata) Mutator {
|
||||||
|
return &mutator{
|
||||||
|
fn: func(m *Map) (*Map, error) {
|
||||||
|
if !checkValue(v) {
|
||||||
|
return nil, errInvalidValue
|
||||||
|
}
|
||||||
|
m.upsert(k, v, createMetadatas(mds...))
|
||||||
|
return m, nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func createMetadatas(mds ...Metadata) metadatas {
|
||||||
|
var metas metadatas
|
||||||
|
if len(mds) > 0 {
|
||||||
|
for _, md := range mds {
|
||||||
|
if md != nil {
|
||||||
|
md(&metas)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
WithTTL(TTLUnlimitedPropagation)(&metas)
|
||||||
|
}
|
||||||
|
return metas
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete returns a mutator that deletes
|
||||||
|
// the value associated with k.
|
||||||
|
func Delete(k Key) Mutator {
|
||||||
|
return &mutator{
|
||||||
|
fn: func(m *Map) (*Map, error) {
|
||||||
|
m.delete(k)
|
||||||
|
return m, nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// New returns a new context that contains a tag map
|
||||||
|
// originated from the incoming context and modified
|
||||||
|
// with the provided mutators.
|
||||||
|
func New(ctx context.Context, mutator ...Mutator) (context.Context, error) {
|
||||||
|
m := newMap()
|
||||||
|
orig := FromContext(ctx)
|
||||||
|
if orig != nil {
|
||||||
|
for k, v := range orig.m {
|
||||||
|
if !checkKeyName(k.Name()) {
|
||||||
|
return ctx, fmt.Errorf("key:%q: %v", k, errInvalidKeyName)
|
||||||
|
}
|
||||||
|
if !checkValue(v.value) {
|
||||||
|
return ctx, fmt.Errorf("key:%q value:%q: %v", k.Name(), v, errInvalidValue)
|
||||||
|
}
|
||||||
|
m.insert(k, v.value, v.m)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var err error
|
||||||
|
for _, mod := range mutator {
|
||||||
|
m, err = mod.Mutate(m)
|
||||||
|
if err != nil {
|
||||||
|
return ctx, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return NewContext(ctx, m), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do is similar to pprof.Do: a convenience for installing the tags
|
||||||
|
// from the context as Go profiler labels. This allows you to
|
||||||
|
// correlated runtime profiling with stats.
|
||||||
|
//
|
||||||
|
// It converts the key/values from the given map to Go profiler labels
|
||||||
|
// and calls pprof.Do.
|
||||||
|
//
|
||||||
|
// Do is going to do nothing if your Go version is below 1.9.
|
||||||
|
func Do(ctx context.Context, f func(ctx context.Context)) {
|
||||||
|
do(ctx, f)
|
||||||
|
}
|
||||||
|
|
||||||
|
type mutator struct {
|
||||||
|
fn func(t *Map) (*Map, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mutator) Mutate(t *Map) (*Map, error) {
|
||||||
|
return m.fn(t)
|
||||||
|
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue