generate a single debug file for a long duration capture (#10279)

* debug: remove the CLI check for debug_enabled

The API allows collecting profiles even debug_enabled=false as long as
ACLs are enabled. Remove this check from the CLI so that users do not
need to set debug_enabled=true for no reason.

Also:
- fix the API client to return errors on non-200 status codes for debug
  endpoints
- improve the failure messages when pprof data can not be collected

Co-Authored-By: Dhia Ayachi <dhia@hashicorp.com>

* remove parallel test runs

parallel runs create a race condition that fail the debug tests

* snapshot the timestamp at the beginning of the capture

- timestamp used to create the capture sub folder is snapshot only at the beginning of the capture and reused for subsequent captures
- capture append to the file if it already exist

* Revert "snapshot the timestamp at the beginning of the capture"

This reverts commit c2d03346

* Refactor captureDynamic to extract capture logic for each item in a different func

* snapshot the timestamp at the beginning of the capture

- timestamp used to create the capture sub folder is snapshot only at the beginning of the capture and reused for subsequent captures
- capture append to the file if it already exist

* Revert "snapshot the timestamp at the beginning of the capture"

This reverts commit c2d03346

* Refactor captureDynamic to extract capture logic for each item in a different func

* extract wait group outside the go routine to avoid a race condition

* capture pprof in a separate go routine

* perform a single capture for pprof data for the whole duration

* add missing vendor dependency

* add a change log and fix documentation to reflect the change

* create function for timestamp dir creation and simplify error handling

* use error groups and ticker to simplify interval capture loop

* Logs, profile and traces are captured for the full duration. Metrics, Heap and Go routines are captured every interval

* refactor Logs capture routine and add log capture specific test

* improve error reporting when log test fail

* change test duration to 1s

* make time parsing in log line more robust

* refactor log time format in a const

* test on log line empty the earliest possible and return

Co-authored-by: Freddy <freddygv@users.noreply.github.com>

* rename function to captureShortLived

* more specific changelog

Co-authored-by: Paul Banks <banks@banksco.de>

* update documentation to reflect current implementation

* add test for behavior when invalid param is passed to the command

* fix argument line in test

* a more detailed description of the new behaviour

Co-authored-by: Paul Banks <banks@banksco.de>

* print success right after the capture is done

* remove an unnecessary error check

Co-authored-by: Daniel Nephin <dnephin@hashicorp.com>

* upgraded github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57 => v0.0.0-20210601050228-01bbb1931b22

Co-authored-by: Daniel Nephin <dnephin@hashicorp.com>
Co-authored-by: Freddy <freddygv@users.noreply.github.com>
Co-authored-by: Paul Banks <banks@banksco.de>
This commit is contained in:
Dhia Ayachi 2021-06-07 13:00:51 -04:00 committed by GitHub
parent f2c2809612
commit e3dd0f9a44
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
19 changed files with 4865 additions and 210 deletions

3
.changelog/10279.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:improvement
debug: capture a single stream of logs, and single pprof profile and trace for the whole duration
```

View File

@ -7,15 +7,15 @@ import (
"errors"
"flag"
"fmt"
"golang.org/x/sync/errgroup"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"sync"
"time"
multierror "github.com/hashicorp/go-multierror"
"github.com/hashicorp/go-multierror"
"github.com/mitchellh/cli"
"github.com/hashicorp/consul/api"
@ -193,7 +193,10 @@ func (c *cmd) Run(args []string) int {
// Capture dynamic information from the target agent, blocking for duration
if c.configuredTarget("metrics") || c.configuredTarget("logs") || c.configuredTarget("pprof") {
err = c.captureDynamic()
g := new(errgroup.Group)
g.Go(c.captureInterval)
g.Go(c.captureLongRunning)
err = g.Wait()
if err != nil {
c.UI.Error(fmt.Sprintf("Error encountered during collection: %v", err))
}
@ -283,8 +286,8 @@ func (c *cmd) prepare() (version string, err error) {
// to the output path
func (c *cmd) captureStatic() error {
// Collect errors via multierror as we want to gracefully
// fail if an API is inacessible
var errors error
// fail if an API is inaccessible
var errs error
// Collect the named outputs here
outputs := make(map[string]interface{})
@ -293,7 +296,7 @@ func (c *cmd) captureStatic() error {
if c.configuredTarget("host") {
host, err := c.client.Agent().Host()
if err != nil {
errors = multierror.Append(errors, err)
errs = multierror.Append(errs, err)
}
outputs["host"] = host
}
@ -302,7 +305,7 @@ func (c *cmd) captureStatic() error {
if c.configuredTarget("agent") {
agent, err := c.client.Agent().Self()
if err != nil {
errors = multierror.Append(errors, err)
errs = multierror.Append(errs, err)
}
outputs["agent"] = agent
}
@ -311,7 +314,7 @@ func (c *cmd) captureStatic() error {
if c.configuredTarget("cluster") {
members, err := c.client.Agent().Members(true)
if err != nil {
errors = multierror.Append(errors, err)
errs = multierror.Append(errs, err)
}
outputs["cluster"] = members
}
@ -320,205 +323,45 @@ func (c *cmd) captureStatic() error {
for output, v := range outputs {
marshaled, err := json.MarshalIndent(v, "", "\t")
if err != nil {
errors = multierror.Append(errors, err)
errs = multierror.Append(errs, err)
}
err = ioutil.WriteFile(fmt.Sprintf("%s/%s.json", c.output, output), marshaled, 0644)
if err != nil {
errors = multierror.Append(errors, err)
errs = multierror.Append(errs, err)
}
}
return errors
return errs
}
// captureDynamic blocks for the duration of the command
// captureInterval blocks for the duration of the command
// specified by the duration flag, capturing the dynamic
// targets at the interval specified
func (c *cmd) captureDynamic() error {
successChan := make(chan int64)
errCh := make(chan error)
func (c *cmd) captureInterval() error {
intervalChn := time.NewTicker(c.interval)
defer intervalChn.Stop()
durationChn := time.After(c.duration)
intervalCount := 0
c.UI.Output(fmt.Sprintf("Beginning capture interval %s (%d)", time.Now().Local().String(), intervalCount))
// We'll wait for all of the targets configured to be
// captured before continuing
var wg sync.WaitGroup
capture := func() {
timestamp := time.Now().Local().Unix()
// Make the directory that will store all captured data
// for this interval
timestampDir := fmt.Sprintf("%s/%d", c.output, timestamp)
err := os.MkdirAll(timestampDir, 0755)
if err != nil {
errCh <- err
}
// Capture metrics
if c.configuredTarget("metrics") {
wg.Add(1)
go func() {
metrics, err := c.client.Agent().Metrics()
if err != nil {
errCh <- err
}
marshaled, err := json.MarshalIndent(metrics, "", "\t")
if err != nil {
errCh <- err
}
err = ioutil.WriteFile(fmt.Sprintf("%s/%s.json", timestampDir, "metrics"), marshaled, 0644)
if err != nil {
errCh <- err
}
// We need to sleep for the configured interval in the case
// of metrics being the only target captured. When it is,
// the waitgroup would return on Wait() and repeat without
// waiting for the interval.
time.Sleep(c.interval)
wg.Done()
}()
}
// Capture pprof
if c.configuredTarget("pprof") {
wg.Add(1)
go func() {
// We need to capture profiles and traces at the same time
// and block for both of them
var wgProf sync.WaitGroup
heap, err := c.client.Debug().Heap()
if err != nil {
errCh <- fmt.Errorf("failed to collect heap profile: %w", err)
}
err = ioutil.WriteFile(fmt.Sprintf("%s/heap.prof", timestampDir), heap, 0644)
if err != nil {
errCh <- err
}
// Capture a profile/trace with a minimum of 1s
s := c.interval.Seconds()
if s < 1 {
s = 1
}
wgProf.Add(1)
go func() {
prof, err := c.client.Debug().Profile(int(s))
if err != nil {
errCh <- fmt.Errorf("failed to collect cpu profile: %w", err)
}
err = ioutil.WriteFile(fmt.Sprintf("%s/profile.prof", timestampDir), prof, 0644)
if err != nil {
errCh <- err
}
wgProf.Done()
}()
wgProf.Add(1)
go func() {
trace, err := c.client.Debug().Trace(int(s))
if err != nil {
errCh <- fmt.Errorf("failed to collect trace: %w", err)
}
err = ioutil.WriteFile(fmt.Sprintf("%s/trace.out", timestampDir), trace, 0644)
if err != nil {
errCh <- err
}
wgProf.Done()
}()
gr, err := c.client.Debug().Goroutine()
if err != nil {
errCh <- fmt.Errorf("failed to collect goroutine profile: %w", err)
}
err = ioutil.WriteFile(fmt.Sprintf("%s/goroutine.prof", timestampDir), gr, 0644)
if err != nil {
errCh <- err
}
wgProf.Wait()
wg.Done()
}()
}
// Capture logs
if c.configuredTarget("logs") {
wg.Add(1)
go func() {
endLogChn := make(chan struct{})
logCh, err := c.client.Agent().Monitor("DEBUG", endLogChn, nil)
if err != nil {
errCh <- err
}
// Close the log stream
defer close(endLogChn)
// Create the log file for writing
f, err := os.Create(fmt.Sprintf("%s/%s", timestampDir, "consul.log"))
if err != nil {
errCh <- err
}
defer f.Close()
intervalChn := time.After(c.interval)
OUTER:
for {
select {
case log := <-logCh:
// Append the line to the file
if _, err = f.WriteString(log + "\n"); err != nil {
errCh <- err
break OUTER
}
// Stop collecting the logs after the interval specified
case <-intervalChn:
break OUTER
}
}
wg.Done()
}()
}
// Wait for all captures to complete
wg.Wait()
// Send down the timestamp for UI output
successChan <- timestamp
err := captureShortLived(c)
if err != nil {
return err
}
go capture()
c.UI.Output(fmt.Sprintf("Capture successful %s (%d)", time.Now().Local().String(), intervalCount))
for {
select {
case t := <-successChan:
case t := <-intervalChn.C:
intervalCount++
c.UI.Output(fmt.Sprintf("Capture successful %s (%d)", time.Unix(t, 0).Local().String(), intervalCount))
go capture()
case e := <-errCh:
c.UI.Error(fmt.Sprintf("Capture failure: %s", e))
err := captureShortLived(c)
if err != nil {
return err
}
c.UI.Output(fmt.Sprintf("Capture successful %s (%d)", t.Local().String(), intervalCount))
case <-durationChn:
intervalChn.Stop()
return nil
case <-c.shutdownCh:
return errors.New("stopping collection due to shutdown signal")
@ -526,6 +369,168 @@ func (c *cmd) captureDynamic() error {
}
}
func captureShortLived(c *cmd) error {
g := new(errgroup.Group)
timestamp := time.Now().Local().Unix()
timestampDir, err := c.createTimestampDir(timestamp)
if err != nil {
return err
}
if c.configuredTarget("pprof") {
g.Go(func() error {
return c.captureHeap(timestampDir)
})
g.Go(func() error {
return c.captureGoRoutines(timestampDir)
})
}
// Capture metrics
if c.configuredTarget("metrics") {
g.Go(func() error {
return c.captureMetrics(timestampDir)
})
}
return g.Wait()
}
func (c *cmd) createTimestampDir(timestamp int64) (string, error) {
// Make the directory that will store all captured data
// for this interval
timestampDir := fmt.Sprintf("%s/%d", c.output, timestamp)
err := os.MkdirAll(timestampDir, 0755)
if err != nil {
return "", err
}
return timestampDir, nil
}
func (c *cmd) captureLongRunning() error {
timestamp := time.Now().Local().Unix()
timestampDir, err := c.createTimestampDir(timestamp)
if err != nil {
return err
}
g := new(errgroup.Group)
// Capture a profile/trace with a minimum of 1s
s := c.duration.Seconds()
if s < 1 {
s = 1
}
// Capture pprof
if c.configuredTarget("pprof") {
g.Go(func() error {
return c.captureProfile(s, timestampDir)
})
g.Go(func() error {
return c.captureTrace(s, timestampDir)
})
}
// Capture logs
if c.configuredTarget("logs") {
g.Go(func() error {
return c.captureLogs(timestampDir)
})
}
return g.Wait()
}
func (c *cmd) captureGoRoutines(timestampDir string) error {
gr, err := c.client.Debug().Goroutine()
if err != nil {
return fmt.Errorf("failed to collect goroutine profile: %w", err)
}
err = ioutil.WriteFile(fmt.Sprintf("%s/goroutine.prof", timestampDir), gr, 0644)
return err
}
func (c *cmd) captureTrace(s float64, timestampDir string) error {
trace, err := c.client.Debug().Trace(int(s))
if err != nil {
return fmt.Errorf("failed to collect trace: %w", err)
}
err = ioutil.WriteFile(fmt.Sprintf("%s/trace.out", timestampDir), trace, 0644)
return err
}
func (c *cmd) captureProfile(s float64, timestampDir string) error {
prof, err := c.client.Debug().Profile(int(s))
if err != nil {
return fmt.Errorf("failed to collect cpu profile: %w", err)
}
err = ioutil.WriteFile(fmt.Sprintf("%s/profile.prof", timestampDir), prof, 0644)
return err
}
func (c *cmd) captureHeap(timestampDir string) error {
heap, err := c.client.Debug().Heap()
if err != nil {
return fmt.Errorf("failed to collect heap profile: %w", err)
}
err = ioutil.WriteFile(fmt.Sprintf("%s/heap.prof", timestampDir), heap, 0644)
return err
}
func (c *cmd) captureLogs(timestampDir string) error {
endLogChn := make(chan struct{})
timeIsUp := time.After(c.duration)
logCh, err := c.client.Agent().Monitor("DEBUG", endLogChn, nil)
if err != nil {
return err
}
// Close the log stream
defer close(endLogChn)
// Create the log file for writing
f, err := os.Create(fmt.Sprintf("%s/%s", timestampDir, "consul.log"))
if err != nil {
return err
}
defer f.Close()
for {
select {
case log := <-logCh:
if log == "" {
return nil
}
if _, err = f.WriteString(log + "\n"); err != nil {
return err
}
case <-timeIsUp:
return nil
}
}
}
func (c *cmd) captureMetrics(timestampDir string) error {
metrics, err := c.client.Agent().Metrics()
if err != nil {
return err
}
marshaled, err := json.MarshalIndent(metrics, "", "\t")
if err != nil {
return err
}
err = ioutil.WriteFile(fmt.Sprintf("%s/%s.json", timestampDir, "metrics"), marshaled, 0644)
return err
}
// allowedTarget returns a boolean if the target is able to be captured
func (c *cmd) allowedTarget(target string) bool {
for _, dt := range c.defaultTargets() {
@ -635,9 +640,7 @@ func (c *cmd) createArchiveTemp(path string) (tempName string, err error) {
return fmt.Errorf("failed to copy files for archive: %s", err)
}
f.Close()
return nil
return f.Close()
})
if err != nil {

View File

@ -2,18 +2,22 @@ package debug
import (
"archive/tar"
"bufio"
"compress/gzip"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"regexp"
"strings"
"testing"
"time"
"github.com/mitchellh/cli"
"github.com/stretchr/testify/require"
"github.com/google/pprof/profile"
"github.com/hashicorp/consul/agent"
"github.com/hashicorp/consul/sdk/testutil"
"github.com/hashicorp/consul/testrpc"
@ -148,6 +152,29 @@ func TestDebugCommand_ArgsBad(t *testing.T) {
}
}
func TestDebugCommand_InvalidFlags(t *testing.T) {
ui := cli.NewMockUi()
cmd := New(ui, nil)
cmd.validateTiming = false
outputPath := ""
args := []string{
"-invalid=value",
"-output=" + outputPath,
"-duration=100ms",
"-interval=50ms",
}
if code := cmd.Run(args); code == 0 {
t.Fatalf("should exit non-zero, got code: %d", code)
}
errOutput := ui.ErrorWriter.String()
if !strings.Contains(errOutput, "==> Error parsing flags: flag provided but not defined:") {
t.Errorf("expected error output, got %q", errOutput)
}
}
func TestDebugCommand_OutputPathBad(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")
@ -330,6 +357,116 @@ func TestDebugCommand_CaptureTargets(t *testing.T) {
}
}
func TestDebugCommand_CaptureLogs(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")
}
cases := map[string]struct {
// used in -target param
targets []string
// existence verified after execution
files []string
// non-existence verified after execution
excludedFiles []string
}{
"logs-only": {
[]string{"logs"},
[]string{"*/consul.log"},
[]string{"agent.json", "host.json", "cluster.json", "*/metrics.json"},
},
}
for name, tc := range cases {
testDir := testutil.TempDir(t, "debug")
a := agent.NewTestAgent(t, `
enable_debug = true
`)
defer a.Shutdown()
testrpc.WaitForLeader(t, a.RPC, "dc1")
ui := cli.NewMockUi()
cmd := New(ui, nil)
cmd.validateTiming = false
outputPath := fmt.Sprintf("%s/debug-%s", testDir, name)
args := []string{
"-http-addr=" + a.HTTPAddr(),
"-output=" + outputPath,
"-archive=false",
"-duration=1000ms",
"-interval=50ms",
}
for _, t := range tc.targets {
args = append(args, "-capture="+t)
}
if code := cmd.Run(args); code != 0 {
t.Fatalf("should exit 0, got code: %d", code)
}
errOutput := ui.ErrorWriter.String()
if errOutput != "" {
t.Errorf("expected no error output, got %q", errOutput)
}
// Ensure the debug data was written
_, err := os.Stat(outputPath)
if err != nil {
t.Fatalf("output path should exist: %s", err)
}
// Ensure the captured static files exist
for _, f := range tc.files {
path := fmt.Sprintf("%s/%s", outputPath, f)
// Glob ignores file system errors
fs, _ := filepath.Glob(path)
if len(fs) <= 0 {
t.Fatalf("%s: output data should exist for %s", name, f)
}
for _, logFile := range fs {
content, err := ioutil.ReadFile(logFile)
require.NoError(t, err)
scanner := bufio.NewScanner(strings.NewReader(string(content)))
for scanner.Scan() {
logLine := scanner.Text()
if !validateLogLine([]byte(logLine)) {
t.Fatalf("%s: log line is not valid %s", name, logLine)
}
}
}
}
// Ensure any excluded files do not exist
for _, f := range tc.excludedFiles {
path := fmt.Sprintf("%s/%s", outputPath, f)
// Glob ignores file system errors
fs, _ := filepath.Glob(path)
if len(fs) > 0 {
t.Fatalf("%s: output data should not exist for %s", name, f)
}
}
}
}
func validateLogLine(content []byte) bool {
fields := strings.SplitN(string(content), " ", 2)
if len(fields) != 2 {
return false
}
const logTimeFormat = "2006-01-02T15:04:05.000"
t := content[:len(logTimeFormat)]
_, err := time.Parse(logTimeFormat, string(t))
if err != nil {
return false
}
re := regexp.MustCompile(`(\[(ERROR|WARN|INFO|DEBUG|TRACE)]) (.*?): (.*)`)
valid := re.Match([]byte(fields[1]))
return valid
}
func TestDebugCommand_ProfilesExist(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")
@ -354,7 +491,7 @@ func TestDebugCommand_ProfilesExist(t *testing.T) {
"-output=" + outputPath,
// CPU profile has a minimum of 1s
"-archive=false",
"-duration=1s",
"-duration=2s",
"-interval=1s",
"-capture=pprof",
}
@ -367,9 +504,17 @@ func TestDebugCommand_ProfilesExist(t *testing.T) {
// Glob ignores file system errors
for _, v := range profiles {
fs, _ := filepath.Glob(fmt.Sprintf("%s/*/%s", outputPath, v))
if len(fs) == 0 {
if len(fs) < 1 {
t.Errorf("output data should exist for %s", v)
}
for _, f := range fs {
if !strings.Contains(f, "trace.out") {
content, err := ioutil.ReadFile(f)
require.NoError(t, err)
_, err = profile.ParseData(content)
require.NoError(t, err)
}
}
}
errOutput := ui.ErrorWriter.String()
@ -476,17 +621,11 @@ func TestDebugCommand_DebugDisabled(t *testing.T) {
profiles := []string{"heap.prof", "profile.prof", "goroutine.prof", "trace.out"}
// Glob ignores file system errors
for _, v := range profiles {
fs, _ := filepath.Glob(fmt.Sprintf("%s/*/%s", outputPath, v))
// TODO: make this always one
require.True(t, len(fs) >= 1)
content, err := ioutil.ReadFile(fs[0])
require.NoError(t, err)
require.Len(t, content, 0)
fs, _ := filepath.Glob(fmt.Sprintf("%s/*r/%s", outputPath, v))
require.True(t, len(fs) == 0)
}
errOutput := ui.ErrorWriter.String()
for _, prof := range []string{"heap", "cpu", "goroutine", "trace"} {
expected := fmt.Sprintf("failed to collect %v", prof)
require.Contains(t, errOutput, expected)
}
require.Contains(t, errOutput, "failed to collect")
}

1
go.mod
View File

@ -27,6 +27,7 @@ require (
github.com/google/go-cmp v0.5.2
github.com/google/go-querystring v1.0.0 // indirect
github.com/google/gofuzz v1.2.0
github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22
github.com/google/tcpproxy v0.0.0-20180808230851-dfa16c61dad2
github.com/hashicorp/consul/api v1.8.0
github.com/hashicorp/consul/sdk v0.7.0

7
go.sum
View File

@ -80,6 +80,9 @@ github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible h1:C29Ae4G5GtYyYMm1aztcyj/J5ckgJm2zwdDajFbx1NY=
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
github.com/circonus-labs/circonusllhist v0.1.3 h1:TJH+oke8D16535+jHExHj4nQvzlZrj7ug5D7I/orNUA=
@ -193,6 +196,8 @@ github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22 h1:ub2sxhs2A0HRa2dWHavvmWxiVGXNfE9wI+gcTMwED8A=
github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/tcpproxy v0.0.0-20180808230851-dfa16c61dad2 h1:AtvtonGEH/fZK0XPNNBdB6swgy7Iudfx88wzyIpwqJ8=
github.com/google/tcpproxy v0.0.0-20180808230851-dfa16c61dad2/go.mod h1:DavVbd41y+b7ukKDmlnPR4nGYmkWXR6vHUkjQNiHPBs=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
@ -298,6 +303,7 @@ github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKe
github.com/hashicorp/yamux v0.0.0-20200609203250-aecfd211c9ce h1:7UnVY3T/ZnHUrfviiAgIUjg2PXxsQfs5bphsG8F7Keo=
github.com/hashicorp/yamux v0.0.0-20200609203250-aecfd211c9ce/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28=
github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
@ -601,6 +607,7 @@ golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=

7
vendor/github.com/google/pprof/AUTHORS generated vendored Normal file
View File

@ -0,0 +1,7 @@
# This is the official list of pprof authors for copyright purposes.
# This file is distinct from the CONTRIBUTORS files.
# See the latter for an explanation.
# Names should be added to this file as:
# Name or Organization <email address>
# The email address is not required for organizations.
Google Inc.

16
vendor/github.com/google/pprof/CONTRIBUTORS generated vendored Normal file
View File

@ -0,0 +1,16 @@
# People who have agreed to one of the CLAs and can contribute patches.
# The AUTHORS file lists the copyright holders; this file
# lists people. For example, Google employees are listed here
# but not in AUTHORS, because Google holds the copyright.
#
# https://developers.google.com/open-source/cla/individual
# https://developers.google.com/open-source/cla/corporate
#
# Names should be added to this file as:
# Name <email address>
Raul Silvera <rsilvera@google.com>
Tipp Moseley <tipp@google.com>
Hyoun Kyu Cho <netforce@google.com>
Martin Spier <spiermar@gmail.com>
Taco de Wolff <tacodewolff@gmail.com>
Andrew Hunter <andrewhhunter@gmail.com>

202
vendor/github.com/google/pprof/LICENSE generated vendored Normal file
View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

567
vendor/github.com/google/pprof/profile/encode.go generated vendored Normal file
View File

@ -0,0 +1,567 @@
// Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package profile
import (
"errors"
"sort"
)
func (p *Profile) decoder() []decoder {
return profileDecoder
}
// preEncode populates the unexported fields to be used by encode
// (with suffix X) from the corresponding exported fields. The
// exported fields are cleared up to facilitate testing.
func (p *Profile) preEncode() {
strings := make(map[string]int)
addString(strings, "")
for _, st := range p.SampleType {
st.typeX = addString(strings, st.Type)
st.unitX = addString(strings, st.Unit)
}
for _, s := range p.Sample {
s.labelX = nil
var keys []string
for k := range s.Label {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
vs := s.Label[k]
for _, v := range vs {
s.labelX = append(s.labelX,
label{
keyX: addString(strings, k),
strX: addString(strings, v),
},
)
}
}
var numKeys []string
for k := range s.NumLabel {
numKeys = append(numKeys, k)
}
sort.Strings(numKeys)
for _, k := range numKeys {
keyX := addString(strings, k)
vs := s.NumLabel[k]
units := s.NumUnit[k]
for i, v := range vs {
var unitX int64
if len(units) != 0 {
unitX = addString(strings, units[i])
}
s.labelX = append(s.labelX,
label{
keyX: keyX,
numX: v,
unitX: unitX,
},
)
}
}
s.locationIDX = make([]uint64, len(s.Location))
for i, loc := range s.Location {
s.locationIDX[i] = loc.ID
}
}
for _, m := range p.Mapping {
m.fileX = addString(strings, m.File)
m.buildIDX = addString(strings, m.BuildID)
}
for _, l := range p.Location {
for i, ln := range l.Line {
if ln.Function != nil {
l.Line[i].functionIDX = ln.Function.ID
} else {
l.Line[i].functionIDX = 0
}
}
if l.Mapping != nil {
l.mappingIDX = l.Mapping.ID
} else {
l.mappingIDX = 0
}
}
for _, f := range p.Function {
f.nameX = addString(strings, f.Name)
f.systemNameX = addString(strings, f.SystemName)
f.filenameX = addString(strings, f.Filename)
}
p.dropFramesX = addString(strings, p.DropFrames)
p.keepFramesX = addString(strings, p.KeepFrames)
if pt := p.PeriodType; pt != nil {
pt.typeX = addString(strings, pt.Type)
pt.unitX = addString(strings, pt.Unit)
}
p.commentX = nil
for _, c := range p.Comments {
p.commentX = append(p.commentX, addString(strings, c))
}
p.defaultSampleTypeX = addString(strings, p.DefaultSampleType)
p.stringTable = make([]string, len(strings))
for s, i := range strings {
p.stringTable[i] = s
}
}
func (p *Profile) encode(b *buffer) {
for _, x := range p.SampleType {
encodeMessage(b, 1, x)
}
for _, x := range p.Sample {
encodeMessage(b, 2, x)
}
for _, x := range p.Mapping {
encodeMessage(b, 3, x)
}
for _, x := range p.Location {
encodeMessage(b, 4, x)
}
for _, x := range p.Function {
encodeMessage(b, 5, x)
}
encodeStrings(b, 6, p.stringTable)
encodeInt64Opt(b, 7, p.dropFramesX)
encodeInt64Opt(b, 8, p.keepFramesX)
encodeInt64Opt(b, 9, p.TimeNanos)
encodeInt64Opt(b, 10, p.DurationNanos)
if pt := p.PeriodType; pt != nil && (pt.typeX != 0 || pt.unitX != 0) {
encodeMessage(b, 11, p.PeriodType)
}
encodeInt64Opt(b, 12, p.Period)
encodeInt64s(b, 13, p.commentX)
encodeInt64(b, 14, p.defaultSampleTypeX)
}
var profileDecoder = []decoder{
nil, // 0
// repeated ValueType sample_type = 1
func(b *buffer, m message) error {
x := new(ValueType)
pp := m.(*Profile)
pp.SampleType = append(pp.SampleType, x)
return decodeMessage(b, x)
},
// repeated Sample sample = 2
func(b *buffer, m message) error {
x := new(Sample)
pp := m.(*Profile)
pp.Sample = append(pp.Sample, x)
return decodeMessage(b, x)
},
// repeated Mapping mapping = 3
func(b *buffer, m message) error {
x := new(Mapping)
pp := m.(*Profile)
pp.Mapping = append(pp.Mapping, x)
return decodeMessage(b, x)
},
// repeated Location location = 4
func(b *buffer, m message) error {
x := new(Location)
x.Line = make([]Line, 0, 8) // Pre-allocate Line buffer
pp := m.(*Profile)
pp.Location = append(pp.Location, x)
err := decodeMessage(b, x)
var tmp []Line
x.Line = append(tmp, x.Line...) // Shrink to allocated size
return err
},
// repeated Function function = 5
func(b *buffer, m message) error {
x := new(Function)
pp := m.(*Profile)
pp.Function = append(pp.Function, x)
return decodeMessage(b, x)
},
// repeated string string_table = 6
func(b *buffer, m message) error {
err := decodeStrings(b, &m.(*Profile).stringTable)
if err != nil {
return err
}
if m.(*Profile).stringTable[0] != "" {
return errors.New("string_table[0] must be ''")
}
return nil
},
// int64 drop_frames = 7
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).dropFramesX) },
// int64 keep_frames = 8
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).keepFramesX) },
// int64 time_nanos = 9
func(b *buffer, m message) error {
if m.(*Profile).TimeNanos != 0 {
return errConcatProfile
}
return decodeInt64(b, &m.(*Profile).TimeNanos)
},
// int64 duration_nanos = 10
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).DurationNanos) },
// ValueType period_type = 11
func(b *buffer, m message) error {
x := new(ValueType)
pp := m.(*Profile)
pp.PeriodType = x
return decodeMessage(b, x)
},
// int64 period = 12
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).Period) },
// repeated int64 comment = 13
func(b *buffer, m message) error { return decodeInt64s(b, &m.(*Profile).commentX) },
// int64 defaultSampleType = 14
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).defaultSampleTypeX) },
}
// postDecode takes the unexported fields populated by decode (with
// suffix X) and populates the corresponding exported fields.
// The unexported fields are cleared up to facilitate testing.
func (p *Profile) postDecode() error {
var err error
mappings := make(map[uint64]*Mapping, len(p.Mapping))
mappingIds := make([]*Mapping, len(p.Mapping)+1)
for _, m := range p.Mapping {
m.File, err = getString(p.stringTable, &m.fileX, err)
m.BuildID, err = getString(p.stringTable, &m.buildIDX, err)
if m.ID < uint64(len(mappingIds)) {
mappingIds[m.ID] = m
} else {
mappings[m.ID] = m
}
}
functions := make(map[uint64]*Function, len(p.Function))
functionIds := make([]*Function, len(p.Function)+1)
for _, f := range p.Function {
f.Name, err = getString(p.stringTable, &f.nameX, err)
f.SystemName, err = getString(p.stringTable, &f.systemNameX, err)
f.Filename, err = getString(p.stringTable, &f.filenameX, err)
if f.ID < uint64(len(functionIds)) {
functionIds[f.ID] = f
} else {
functions[f.ID] = f
}
}
locations := make(map[uint64]*Location, len(p.Location))
locationIds := make([]*Location, len(p.Location)+1)
for _, l := range p.Location {
if id := l.mappingIDX; id < uint64(len(mappingIds)) {
l.Mapping = mappingIds[id]
} else {
l.Mapping = mappings[id]
}
l.mappingIDX = 0
for i, ln := range l.Line {
if id := ln.functionIDX; id != 0 {
l.Line[i].functionIDX = 0
if id < uint64(len(functionIds)) {
l.Line[i].Function = functionIds[id]
} else {
l.Line[i].Function = functions[id]
}
}
}
if l.ID < uint64(len(locationIds)) {
locationIds[l.ID] = l
} else {
locations[l.ID] = l
}
}
for _, st := range p.SampleType {
st.Type, err = getString(p.stringTable, &st.typeX, err)
st.Unit, err = getString(p.stringTable, &st.unitX, err)
}
for _, s := range p.Sample {
labels := make(map[string][]string, len(s.labelX))
numLabels := make(map[string][]int64, len(s.labelX))
numUnits := make(map[string][]string, len(s.labelX))
for _, l := range s.labelX {
var key, value string
key, err = getString(p.stringTable, &l.keyX, err)
if l.strX != 0 {
value, err = getString(p.stringTable, &l.strX, err)
labels[key] = append(labels[key], value)
} else if l.numX != 0 || l.unitX != 0 {
numValues := numLabels[key]
units := numUnits[key]
if l.unitX != 0 {
var unit string
unit, err = getString(p.stringTable, &l.unitX, err)
units = padStringArray(units, len(numValues))
numUnits[key] = append(units, unit)
}
numLabels[key] = append(numLabels[key], l.numX)
}
}
if len(labels) > 0 {
s.Label = labels
}
if len(numLabels) > 0 {
s.NumLabel = numLabels
for key, units := range numUnits {
if len(units) > 0 {
numUnits[key] = padStringArray(units, len(numLabels[key]))
}
}
s.NumUnit = numUnits
}
s.Location = make([]*Location, len(s.locationIDX))
for i, lid := range s.locationIDX {
if lid < uint64(len(locationIds)) {
s.Location[i] = locationIds[lid]
} else {
s.Location[i] = locations[lid]
}
}
s.locationIDX = nil
}
p.DropFrames, err = getString(p.stringTable, &p.dropFramesX, err)
p.KeepFrames, err = getString(p.stringTable, &p.keepFramesX, err)
if pt := p.PeriodType; pt == nil {
p.PeriodType = &ValueType{}
}
if pt := p.PeriodType; pt != nil {
pt.Type, err = getString(p.stringTable, &pt.typeX, err)
pt.Unit, err = getString(p.stringTable, &pt.unitX, err)
}
for _, i := range p.commentX {
var c string
c, err = getString(p.stringTable, &i, err)
p.Comments = append(p.Comments, c)
}
p.commentX = nil
p.DefaultSampleType, err = getString(p.stringTable, &p.defaultSampleTypeX, err)
p.stringTable = nil
return err
}
// padStringArray pads arr with enough empty strings to make arr
// length l when arr's length is less than l.
func padStringArray(arr []string, l int) []string {
if l <= len(arr) {
return arr
}
return append(arr, make([]string, l-len(arr))...)
}
func (p *ValueType) decoder() []decoder {
return valueTypeDecoder
}
func (p *ValueType) encode(b *buffer) {
encodeInt64Opt(b, 1, p.typeX)
encodeInt64Opt(b, 2, p.unitX)
}
var valueTypeDecoder = []decoder{
nil, // 0
// optional int64 type = 1
func(b *buffer, m message) error { return decodeInt64(b, &m.(*ValueType).typeX) },
// optional int64 unit = 2
func(b *buffer, m message) error { return decodeInt64(b, &m.(*ValueType).unitX) },
}
func (p *Sample) decoder() []decoder {
return sampleDecoder
}
func (p *Sample) encode(b *buffer) {
encodeUint64s(b, 1, p.locationIDX)
encodeInt64s(b, 2, p.Value)
for _, x := range p.labelX {
encodeMessage(b, 3, x)
}
}
var sampleDecoder = []decoder{
nil, // 0
// repeated uint64 location = 1
func(b *buffer, m message) error { return decodeUint64s(b, &m.(*Sample).locationIDX) },
// repeated int64 value = 2
func(b *buffer, m message) error { return decodeInt64s(b, &m.(*Sample).Value) },
// repeated Label label = 3
func(b *buffer, m message) error {
s := m.(*Sample)
n := len(s.labelX)
s.labelX = append(s.labelX, label{})
return decodeMessage(b, &s.labelX[n])
},
}
func (p label) decoder() []decoder {
return labelDecoder
}
func (p label) encode(b *buffer) {
encodeInt64Opt(b, 1, p.keyX)
encodeInt64Opt(b, 2, p.strX)
encodeInt64Opt(b, 3, p.numX)
encodeInt64Opt(b, 4, p.unitX)
}
var labelDecoder = []decoder{
nil, // 0
// optional int64 key = 1
func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).keyX) },
// optional int64 str = 2
func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).strX) },
// optional int64 num = 3
func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).numX) },
// optional int64 num = 4
func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).unitX) },
}
func (p *Mapping) decoder() []decoder {
return mappingDecoder
}
func (p *Mapping) encode(b *buffer) {
encodeUint64Opt(b, 1, p.ID)
encodeUint64Opt(b, 2, p.Start)
encodeUint64Opt(b, 3, p.Limit)
encodeUint64Opt(b, 4, p.Offset)
encodeInt64Opt(b, 5, p.fileX)
encodeInt64Opt(b, 6, p.buildIDX)
encodeBoolOpt(b, 7, p.HasFunctions)
encodeBoolOpt(b, 8, p.HasFilenames)
encodeBoolOpt(b, 9, p.HasLineNumbers)
encodeBoolOpt(b, 10, p.HasInlineFrames)
}
var mappingDecoder = []decoder{
nil, // 0
func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).ID) }, // optional uint64 id = 1
func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Start) }, // optional uint64 memory_offset = 2
func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Limit) }, // optional uint64 memory_limit = 3
func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Offset) }, // optional uint64 file_offset = 4
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Mapping).fileX) }, // optional int64 filename = 5
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Mapping).buildIDX) }, // optional int64 build_id = 6
func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasFunctions) }, // optional bool has_functions = 7
func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasFilenames) }, // optional bool has_filenames = 8
func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasLineNumbers) }, // optional bool has_line_numbers = 9
func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasInlineFrames) }, // optional bool has_inline_frames = 10
}
func (p *Location) decoder() []decoder {
return locationDecoder
}
func (p *Location) encode(b *buffer) {
encodeUint64Opt(b, 1, p.ID)
encodeUint64Opt(b, 2, p.mappingIDX)
encodeUint64Opt(b, 3, p.Address)
for i := range p.Line {
encodeMessage(b, 4, &p.Line[i])
}
encodeBoolOpt(b, 5, p.IsFolded)
}
var locationDecoder = []decoder{
nil, // 0
func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).ID) }, // optional uint64 id = 1;
func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).mappingIDX) }, // optional uint64 mapping_id = 2;
func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).Address) }, // optional uint64 address = 3;
func(b *buffer, m message) error { // repeated Line line = 4
pp := m.(*Location)
n := len(pp.Line)
pp.Line = append(pp.Line, Line{})
return decodeMessage(b, &pp.Line[n])
},
func(b *buffer, m message) error { return decodeBool(b, &m.(*Location).IsFolded) }, // optional bool is_folded = 5;
}
func (p *Line) decoder() []decoder {
return lineDecoder
}
func (p *Line) encode(b *buffer) {
encodeUint64Opt(b, 1, p.functionIDX)
encodeInt64Opt(b, 2, p.Line)
}
var lineDecoder = []decoder{
nil, // 0
// optional uint64 function_id = 1
func(b *buffer, m message) error { return decodeUint64(b, &m.(*Line).functionIDX) },
// optional int64 line = 2
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Line).Line) },
}
func (p *Function) decoder() []decoder {
return functionDecoder
}
func (p *Function) encode(b *buffer) {
encodeUint64Opt(b, 1, p.ID)
encodeInt64Opt(b, 2, p.nameX)
encodeInt64Opt(b, 3, p.systemNameX)
encodeInt64Opt(b, 4, p.filenameX)
encodeInt64Opt(b, 5, p.StartLine)
}
var functionDecoder = []decoder{
nil, // 0
// optional uint64 id = 1
func(b *buffer, m message) error { return decodeUint64(b, &m.(*Function).ID) },
// optional int64 function_name = 2
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).nameX) },
// optional int64 function_system_name = 3
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).systemNameX) },
// repeated int64 filename = 4
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).filenameX) },
// optional int64 start_line = 5
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).StartLine) },
}
func addString(strings map[string]int, s string) int64 {
i, ok := strings[s]
if !ok {
i = len(strings)
strings[s] = i
}
return int64(i)
}
func getString(strings []string, strng *int64, err error) (string, error) {
if err != nil {
return "", err
}
s := int(*strng)
if s < 0 || s >= len(strings) {
return "", errMalformed
}
*strng = 0
return strings[s], nil
}

270
vendor/github.com/google/pprof/profile/filter.go generated vendored Normal file
View File

@ -0,0 +1,270 @@
// Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package profile
// Implements methods to filter samples from profiles.
import "regexp"
// FilterSamplesByName filters the samples in a profile and only keeps
// samples where at least one frame matches focus but none match ignore.
// Returns true is the corresponding regexp matched at least one sample.
func (p *Profile) FilterSamplesByName(focus, ignore, hide, show *regexp.Regexp) (fm, im, hm, hnm bool) {
focusOrIgnore := make(map[uint64]bool)
hidden := make(map[uint64]bool)
for _, l := range p.Location {
if ignore != nil && l.matchesName(ignore) {
im = true
focusOrIgnore[l.ID] = false
} else if focus == nil || l.matchesName(focus) {
fm = true
focusOrIgnore[l.ID] = true
}
if hide != nil && l.matchesName(hide) {
hm = true
l.Line = l.unmatchedLines(hide)
if len(l.Line) == 0 {
hidden[l.ID] = true
}
}
if show != nil {
l.Line = l.matchedLines(show)
if len(l.Line) == 0 {
hidden[l.ID] = true
} else {
hnm = true
}
}
}
s := make([]*Sample, 0, len(p.Sample))
for _, sample := range p.Sample {
if focusedAndNotIgnored(sample.Location, focusOrIgnore) {
if len(hidden) > 0 {
var locs []*Location
for _, loc := range sample.Location {
if !hidden[loc.ID] {
locs = append(locs, loc)
}
}
if len(locs) == 0 {
// Remove sample with no locations (by not adding it to s).
continue
}
sample.Location = locs
}
s = append(s, sample)
}
}
p.Sample = s
return
}
// ShowFrom drops all stack frames above the highest matching frame and returns
// whether a match was found. If showFrom is nil it returns false and does not
// modify the profile.
//
// Example: consider a sample with frames [A, B, C, B], where A is the root.
// ShowFrom(nil) returns false and has frames [A, B, C, B].
// ShowFrom(A) returns true and has frames [A, B, C, B].
// ShowFrom(B) returns true and has frames [B, C, B].
// ShowFrom(C) returns true and has frames [C, B].
// ShowFrom(D) returns false and drops the sample because no frames remain.
func (p *Profile) ShowFrom(showFrom *regexp.Regexp) (matched bool) {
if showFrom == nil {
return false
}
// showFromLocs stores location IDs that matched ShowFrom.
showFromLocs := make(map[uint64]bool)
// Apply to locations.
for _, loc := range p.Location {
if filterShowFromLocation(loc, showFrom) {
showFromLocs[loc.ID] = true
matched = true
}
}
// For all samples, strip locations after the highest matching one.
s := make([]*Sample, 0, len(p.Sample))
for _, sample := range p.Sample {
for i := len(sample.Location) - 1; i >= 0; i-- {
if showFromLocs[sample.Location[i].ID] {
sample.Location = sample.Location[:i+1]
s = append(s, sample)
break
}
}
}
p.Sample = s
return matched
}
// filterShowFromLocation tests a showFrom regex against a location, removes
// lines after the last match and returns whether a match was found. If the
// mapping is matched, then all lines are kept.
func filterShowFromLocation(loc *Location, showFrom *regexp.Regexp) bool {
if m := loc.Mapping; m != nil && showFrom.MatchString(m.File) {
return true
}
if i := loc.lastMatchedLineIndex(showFrom); i >= 0 {
loc.Line = loc.Line[:i+1]
return true
}
return false
}
// lastMatchedLineIndex returns the index of the last line that matches a regex,
// or -1 if no match is found.
func (loc *Location) lastMatchedLineIndex(re *regexp.Regexp) int {
for i := len(loc.Line) - 1; i >= 0; i-- {
if fn := loc.Line[i].Function; fn != nil {
if re.MatchString(fn.Name) || re.MatchString(fn.Filename) {
return i
}
}
}
return -1
}
// FilterTagsByName filters the tags in a profile and only keeps
// tags that match show and not hide.
func (p *Profile) FilterTagsByName(show, hide *regexp.Regexp) (sm, hm bool) {
matchRemove := func(name string) bool {
matchShow := show == nil || show.MatchString(name)
matchHide := hide != nil && hide.MatchString(name)
if matchShow {
sm = true
}
if matchHide {
hm = true
}
return !matchShow || matchHide
}
for _, s := range p.Sample {
for lab := range s.Label {
if matchRemove(lab) {
delete(s.Label, lab)
}
}
for lab := range s.NumLabel {
if matchRemove(lab) {
delete(s.NumLabel, lab)
}
}
}
return
}
// matchesName returns whether the location matches the regular
// expression. It checks any available function names, file names, and
// mapping object filename.
func (loc *Location) matchesName(re *regexp.Regexp) bool {
for _, ln := range loc.Line {
if fn := ln.Function; fn != nil {
if re.MatchString(fn.Name) || re.MatchString(fn.Filename) {
return true
}
}
}
if m := loc.Mapping; m != nil && re.MatchString(m.File) {
return true
}
return false
}
// unmatchedLines returns the lines in the location that do not match
// the regular expression.
func (loc *Location) unmatchedLines(re *regexp.Regexp) []Line {
if m := loc.Mapping; m != nil && re.MatchString(m.File) {
return nil
}
var lines []Line
for _, ln := range loc.Line {
if fn := ln.Function; fn != nil {
if re.MatchString(fn.Name) || re.MatchString(fn.Filename) {
continue
}
}
lines = append(lines, ln)
}
return lines
}
// matchedLines returns the lines in the location that match
// the regular expression.
func (loc *Location) matchedLines(re *regexp.Regexp) []Line {
if m := loc.Mapping; m != nil && re.MatchString(m.File) {
return loc.Line
}
var lines []Line
for _, ln := range loc.Line {
if fn := ln.Function; fn != nil {
if !re.MatchString(fn.Name) && !re.MatchString(fn.Filename) {
continue
}
}
lines = append(lines, ln)
}
return lines
}
// focusedAndNotIgnored looks up a slice of ids against a map of
// focused/ignored locations. The map only contains locations that are
// explicitly focused or ignored. Returns whether there is at least
// one focused location but no ignored locations.
func focusedAndNotIgnored(locs []*Location, m map[uint64]bool) bool {
var f bool
for _, loc := range locs {
if focus, focusOrIgnore := m[loc.ID]; focusOrIgnore {
if focus {
// Found focused location. Must keep searching in case there
// is an ignored one as well.
f = true
} else {
// Found ignored location. Can return false right away.
return false
}
}
}
return f
}
// TagMatch selects tags for filtering
type TagMatch func(s *Sample) bool
// FilterSamplesByTag removes all samples from the profile, except
// those that match focus and do not match the ignore regular
// expression.
func (p *Profile) FilterSamplesByTag(focus, ignore TagMatch) (fm, im bool) {
samples := make([]*Sample, 0, len(p.Sample))
for _, s := range p.Sample {
focused, ignored := true, false
if focus != nil {
focused = focus(s)
}
if ignore != nil {
ignored = ignore(s)
}
fm = fm || focused
im = im || ignored
if focused && !ignored {
samples = append(samples, s)
}
}
p.Sample = samples
return
}

64
vendor/github.com/google/pprof/profile/index.go generated vendored Normal file
View File

@ -0,0 +1,64 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package profile
import (
"fmt"
"strconv"
"strings"
)
// SampleIndexByName returns the appropriate index for a value of sample index.
// If numeric, it returns the number, otherwise it looks up the text in the
// profile sample types.
func (p *Profile) SampleIndexByName(sampleIndex string) (int, error) {
if sampleIndex == "" {
if dst := p.DefaultSampleType; dst != "" {
for i, t := range sampleTypes(p) {
if t == dst {
return i, nil
}
}
}
// By default select the last sample value
return len(p.SampleType) - 1, nil
}
if i, err := strconv.Atoi(sampleIndex); err == nil {
if i < 0 || i >= len(p.SampleType) {
return 0, fmt.Errorf("sample_index %s is outside the range [0..%d]", sampleIndex, len(p.SampleType)-1)
}
return i, nil
}
// Remove the inuse_ prefix to support legacy pprof options
// "inuse_space" and "inuse_objects" for profiles containing types
// "space" and "objects".
noInuse := strings.TrimPrefix(sampleIndex, "inuse_")
for i, t := range p.SampleType {
if t.Type == sampleIndex || t.Type == noInuse {
return i, nil
}
}
return 0, fmt.Errorf("sample_index %q must be one of: %v", sampleIndex, sampleTypes(p))
}
func sampleTypes(p *Profile) []string {
types := make([]string, len(p.SampleType))
for i, t := range p.SampleType {
types[i] = t.Type
}
return types
}

View File

@ -0,0 +1,315 @@
// Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// This file implements parsers to convert java legacy profiles into
// the profile.proto format.
package profile
import (
"bytes"
"fmt"
"io"
"path/filepath"
"regexp"
"strconv"
"strings"
)
var (
attributeRx = regexp.MustCompile(`([\w ]+)=([\w ]+)`)
javaSampleRx = regexp.MustCompile(` *(\d+) +(\d+) +@ +([ x0-9a-f]*)`)
javaLocationRx = regexp.MustCompile(`^\s*0x([[:xdigit:]]+)\s+(.*)\s*$`)
javaLocationFileLineRx = regexp.MustCompile(`^(.*)\s+\((.+):(-?[[:digit:]]+)\)$`)
javaLocationPathRx = regexp.MustCompile(`^(.*)\s+\((.*)\)$`)
)
// javaCPUProfile returns a new Profile from profilez data.
// b is the profile bytes after the header, period is the profiling
// period, and parse is a function to parse 8-byte chunks from the
// profile in its native endianness.
func javaCPUProfile(b []byte, period int64, parse func(b []byte) (uint64, []byte)) (*Profile, error) {
p := &Profile{
Period: period * 1000,
PeriodType: &ValueType{Type: "cpu", Unit: "nanoseconds"},
SampleType: []*ValueType{{Type: "samples", Unit: "count"}, {Type: "cpu", Unit: "nanoseconds"}},
}
var err error
var locs map[uint64]*Location
if b, locs, err = parseCPUSamples(b, parse, false, p); err != nil {
return nil, err
}
if err = parseJavaLocations(b, locs, p); err != nil {
return nil, err
}
// Strip out addresses for better merge.
if err = p.Aggregate(true, true, true, true, false); err != nil {
return nil, err
}
return p, nil
}
// parseJavaProfile returns a new profile from heapz or contentionz
// data. b is the profile bytes after the header.
func parseJavaProfile(b []byte) (*Profile, error) {
h := bytes.SplitAfterN(b, []byte("\n"), 2)
if len(h) < 2 {
return nil, errUnrecognized
}
p := &Profile{
PeriodType: &ValueType{},
}
header := string(bytes.TrimSpace(h[0]))
var err error
var pType string
switch header {
case "--- heapz 1 ---":
pType = "heap"
case "--- contentionz 1 ---":
pType = "contention"
default:
return nil, errUnrecognized
}
if b, err = parseJavaHeader(pType, h[1], p); err != nil {
return nil, err
}
var locs map[uint64]*Location
if b, locs, err = parseJavaSamples(pType, b, p); err != nil {
return nil, err
}
if err = parseJavaLocations(b, locs, p); err != nil {
return nil, err
}
// Strip out addresses for better merge.
if err = p.Aggregate(true, true, true, true, false); err != nil {
return nil, err
}
return p, nil
}
// parseJavaHeader parses the attribute section on a java profile and
// populates a profile. Returns the remainder of the buffer after all
// attributes.
func parseJavaHeader(pType string, b []byte, p *Profile) ([]byte, error) {
nextNewLine := bytes.IndexByte(b, byte('\n'))
for nextNewLine != -1 {
line := string(bytes.TrimSpace(b[0:nextNewLine]))
if line != "" {
h := attributeRx.FindStringSubmatch(line)
if h == nil {
// Not a valid attribute, exit.
return b, nil
}
attribute, value := strings.TrimSpace(h[1]), strings.TrimSpace(h[2])
var err error
switch pType + "/" + attribute {
case "heap/format", "cpu/format", "contention/format":
if value != "java" {
return nil, errUnrecognized
}
case "heap/resolution":
p.SampleType = []*ValueType{
{Type: "inuse_objects", Unit: "count"},
{Type: "inuse_space", Unit: value},
}
case "contention/resolution":
p.SampleType = []*ValueType{
{Type: "contentions", Unit: "count"},
{Type: "delay", Unit: value},
}
case "contention/sampling period":
p.PeriodType = &ValueType{
Type: "contentions", Unit: "count",
}
if p.Period, err = strconv.ParseInt(value, 0, 64); err != nil {
return nil, fmt.Errorf("failed to parse attribute %s: %v", line, err)
}
case "contention/ms since reset":
millis, err := strconv.ParseInt(value, 0, 64)
if err != nil {
return nil, fmt.Errorf("failed to parse attribute %s: %v", line, err)
}
p.DurationNanos = millis * 1000 * 1000
default:
return nil, errUnrecognized
}
}
// Grab next line.
b = b[nextNewLine+1:]
nextNewLine = bytes.IndexByte(b, byte('\n'))
}
return b, nil
}
// parseJavaSamples parses the samples from a java profile and
// populates the Samples in a profile. Returns the remainder of the
// buffer after the samples.
func parseJavaSamples(pType string, b []byte, p *Profile) ([]byte, map[uint64]*Location, error) {
nextNewLine := bytes.IndexByte(b, byte('\n'))
locs := make(map[uint64]*Location)
for nextNewLine != -1 {
line := string(bytes.TrimSpace(b[0:nextNewLine]))
if line != "" {
sample := javaSampleRx.FindStringSubmatch(line)
if sample == nil {
// Not a valid sample, exit.
return b, locs, nil
}
// Java profiles have data/fields inverted compared to other
// profile types.
var err error
value1, value2, value3 := sample[2], sample[1], sample[3]
addrs, err := parseHexAddresses(value3)
if err != nil {
return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err)
}
var sloc []*Location
for _, addr := range addrs {
loc := locs[addr]
if locs[addr] == nil {
loc = &Location{
Address: addr,
}
p.Location = append(p.Location, loc)
locs[addr] = loc
}
sloc = append(sloc, loc)
}
s := &Sample{
Value: make([]int64, 2),
Location: sloc,
}
if s.Value[0], err = strconv.ParseInt(value1, 0, 64); err != nil {
return nil, nil, fmt.Errorf("parsing sample %s: %v", line, err)
}
if s.Value[1], err = strconv.ParseInt(value2, 0, 64); err != nil {
return nil, nil, fmt.Errorf("parsing sample %s: %v", line, err)
}
switch pType {
case "heap":
const javaHeapzSamplingRate = 524288 // 512K
if s.Value[0] == 0 {
return nil, nil, fmt.Errorf("parsing sample %s: second value must be non-zero", line)
}
s.NumLabel = map[string][]int64{"bytes": {s.Value[1] / s.Value[0]}}
s.Value[0], s.Value[1] = scaleHeapSample(s.Value[0], s.Value[1], javaHeapzSamplingRate)
case "contention":
if period := p.Period; period != 0 {
s.Value[0] = s.Value[0] * p.Period
s.Value[1] = s.Value[1] * p.Period
}
}
p.Sample = append(p.Sample, s)
}
// Grab next line.
b = b[nextNewLine+1:]
nextNewLine = bytes.IndexByte(b, byte('\n'))
}
return b, locs, nil
}
// parseJavaLocations parses the location information in a java
// profile and populates the Locations in a profile. It uses the
// location addresses from the profile as both the ID of each
// location.
func parseJavaLocations(b []byte, locs map[uint64]*Location, p *Profile) error {
r := bytes.NewBuffer(b)
fns := make(map[string]*Function)
for {
line, err := r.ReadString('\n')
if err != nil {
if err != io.EOF {
return err
}
if line == "" {
break
}
}
if line = strings.TrimSpace(line); line == "" {
continue
}
jloc := javaLocationRx.FindStringSubmatch(line)
if len(jloc) != 3 {
continue
}
addr, err := strconv.ParseUint(jloc[1], 16, 64)
if err != nil {
return fmt.Errorf("parsing sample %s: %v", line, err)
}
loc := locs[addr]
if loc == nil {
// Unused/unseen
continue
}
var lineFunc, lineFile string
var lineNo int64
if fileLine := javaLocationFileLineRx.FindStringSubmatch(jloc[2]); len(fileLine) == 4 {
// Found a line of the form: "function (file:line)"
lineFunc, lineFile = fileLine[1], fileLine[2]
if n, err := strconv.ParseInt(fileLine[3], 10, 64); err == nil && n > 0 {
lineNo = n
}
} else if filePath := javaLocationPathRx.FindStringSubmatch(jloc[2]); len(filePath) == 3 {
// If there's not a file:line, it's a shared library path.
// The path isn't interesting, so just give the .so.
lineFunc, lineFile = filePath[1], filepath.Base(filePath[2])
} else if strings.Contains(jloc[2], "generated stub/JIT") {
lineFunc = "STUB"
} else {
// Treat whole line as the function name. This is used by the
// java agent for internal states such as "GC" or "VM".
lineFunc = jloc[2]
}
fn := fns[lineFunc]
if fn == nil {
fn = &Function{
Name: lineFunc,
SystemName: lineFunc,
Filename: lineFile,
}
fns[lineFunc] = fn
p.Function = append(p.Function, fn)
}
loc.Line = []Line{
{
Function: fn,
Line: lineNo,
},
}
loc.Address = 0
}
p.remapLocationIDs()
p.remapFunctionIDs()
p.remapMappingIDs()
return nil
}

1225
vendor/github.com/google/pprof/profile/legacy_profile.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

481
vendor/github.com/google/pprof/profile/merge.go generated vendored Normal file
View File

@ -0,0 +1,481 @@
// Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package profile
import (
"fmt"
"sort"
"strconv"
"strings"
)
// Compact performs garbage collection on a profile to remove any
// unreferenced fields. This is useful to reduce the size of a profile
// after samples or locations have been removed.
func (p *Profile) Compact() *Profile {
p, _ = Merge([]*Profile{p})
return p
}
// Merge merges all the profiles in profs into a single Profile.
// Returns a new profile independent of the input profiles. The merged
// profile is compacted to eliminate unused samples, locations,
// functions and mappings. Profiles must have identical profile sample
// and period types or the merge will fail. profile.Period of the
// resulting profile will be the maximum of all profiles, and
// profile.TimeNanos will be the earliest nonzero one. Merges are
// associative with the caveat of the first profile having some
// specialization in how headers are combined. There may be other
// subtleties now or in the future regarding associativity.
func Merge(srcs []*Profile) (*Profile, error) {
if len(srcs) == 0 {
return nil, fmt.Errorf("no profiles to merge")
}
p, err := combineHeaders(srcs)
if err != nil {
return nil, err
}
pm := &profileMerger{
p: p,
samples: make(map[sampleKey]*Sample, len(srcs[0].Sample)),
locations: make(map[locationKey]*Location, len(srcs[0].Location)),
functions: make(map[functionKey]*Function, len(srcs[0].Function)),
mappings: make(map[mappingKey]*Mapping, len(srcs[0].Mapping)),
}
for _, src := range srcs {
// Clear the profile-specific hash tables
pm.locationsByID = make(map[uint64]*Location, len(src.Location))
pm.functionsByID = make(map[uint64]*Function, len(src.Function))
pm.mappingsByID = make(map[uint64]mapInfo, len(src.Mapping))
if len(pm.mappings) == 0 && len(src.Mapping) > 0 {
// The Mapping list has the property that the first mapping
// represents the main binary. Take the first Mapping we see,
// otherwise the operations below will add mappings in an
// arbitrary order.
pm.mapMapping(src.Mapping[0])
}
for _, s := range src.Sample {
if !isZeroSample(s) {
pm.mapSample(s)
}
}
}
for _, s := range p.Sample {
if isZeroSample(s) {
// If there are any zero samples, re-merge the profile to GC
// them.
return Merge([]*Profile{p})
}
}
return p, nil
}
// Normalize normalizes the source profile by multiplying each value in profile by the
// ratio of the sum of the base profile's values of that sample type to the sum of the
// source profile's value of that sample type.
func (p *Profile) Normalize(pb *Profile) error {
if err := p.compatible(pb); err != nil {
return err
}
baseVals := make([]int64, len(p.SampleType))
for _, s := range pb.Sample {
for i, v := range s.Value {
baseVals[i] += v
}
}
srcVals := make([]int64, len(p.SampleType))
for _, s := range p.Sample {
for i, v := range s.Value {
srcVals[i] += v
}
}
normScale := make([]float64, len(baseVals))
for i := range baseVals {
if srcVals[i] == 0 {
normScale[i] = 0.0
} else {
normScale[i] = float64(baseVals[i]) / float64(srcVals[i])
}
}
p.ScaleN(normScale)
return nil
}
func isZeroSample(s *Sample) bool {
for _, v := range s.Value {
if v != 0 {
return false
}
}
return true
}
type profileMerger struct {
p *Profile
// Memoization tables within a profile.
locationsByID map[uint64]*Location
functionsByID map[uint64]*Function
mappingsByID map[uint64]mapInfo
// Memoization tables for profile entities.
samples map[sampleKey]*Sample
locations map[locationKey]*Location
functions map[functionKey]*Function
mappings map[mappingKey]*Mapping
}
type mapInfo struct {
m *Mapping
offset int64
}
func (pm *profileMerger) mapSample(src *Sample) *Sample {
s := &Sample{
Location: make([]*Location, len(src.Location)),
Value: make([]int64, len(src.Value)),
Label: make(map[string][]string, len(src.Label)),
NumLabel: make(map[string][]int64, len(src.NumLabel)),
NumUnit: make(map[string][]string, len(src.NumLabel)),
}
for i, l := range src.Location {
s.Location[i] = pm.mapLocation(l)
}
for k, v := range src.Label {
vv := make([]string, len(v))
copy(vv, v)
s.Label[k] = vv
}
for k, v := range src.NumLabel {
u := src.NumUnit[k]
vv := make([]int64, len(v))
uu := make([]string, len(u))
copy(vv, v)
copy(uu, u)
s.NumLabel[k] = vv
s.NumUnit[k] = uu
}
// Check memoization table. Must be done on the remapped location to
// account for the remapped mapping. Add current values to the
// existing sample.
k := s.key()
if ss, ok := pm.samples[k]; ok {
for i, v := range src.Value {
ss.Value[i] += v
}
return ss
}
copy(s.Value, src.Value)
pm.samples[k] = s
pm.p.Sample = append(pm.p.Sample, s)
return s
}
// key generates sampleKey to be used as a key for maps.
func (sample *Sample) key() sampleKey {
ids := make([]string, len(sample.Location))
for i, l := range sample.Location {
ids[i] = strconv.FormatUint(l.ID, 16)
}
labels := make([]string, 0, len(sample.Label))
for k, v := range sample.Label {
labels = append(labels, fmt.Sprintf("%q%q", k, v))
}
sort.Strings(labels)
numlabels := make([]string, 0, len(sample.NumLabel))
for k, v := range sample.NumLabel {
numlabels = append(numlabels, fmt.Sprintf("%q%x%x", k, v, sample.NumUnit[k]))
}
sort.Strings(numlabels)
return sampleKey{
strings.Join(ids, "|"),
strings.Join(labels, ""),
strings.Join(numlabels, ""),
}
}
type sampleKey struct {
locations string
labels string
numlabels string
}
func (pm *profileMerger) mapLocation(src *Location) *Location {
if src == nil {
return nil
}
if l, ok := pm.locationsByID[src.ID]; ok {
return l
}
mi := pm.mapMapping(src.Mapping)
l := &Location{
ID: uint64(len(pm.p.Location) + 1),
Mapping: mi.m,
Address: uint64(int64(src.Address) + mi.offset),
Line: make([]Line, len(src.Line)),
IsFolded: src.IsFolded,
}
for i, ln := range src.Line {
l.Line[i] = pm.mapLine(ln)
}
// Check memoization table. Must be done on the remapped location to
// account for the remapped mapping ID.
k := l.key()
if ll, ok := pm.locations[k]; ok {
pm.locationsByID[src.ID] = ll
return ll
}
pm.locationsByID[src.ID] = l
pm.locations[k] = l
pm.p.Location = append(pm.p.Location, l)
return l
}
// key generates locationKey to be used as a key for maps.
func (l *Location) key() locationKey {
key := locationKey{
addr: l.Address,
isFolded: l.IsFolded,
}
if l.Mapping != nil {
// Normalizes address to handle address space randomization.
key.addr -= l.Mapping.Start
key.mappingID = l.Mapping.ID
}
lines := make([]string, len(l.Line)*2)
for i, line := range l.Line {
if line.Function != nil {
lines[i*2] = strconv.FormatUint(line.Function.ID, 16)
}
lines[i*2+1] = strconv.FormatInt(line.Line, 16)
}
key.lines = strings.Join(lines, "|")
return key
}
type locationKey struct {
addr, mappingID uint64
lines string
isFolded bool
}
func (pm *profileMerger) mapMapping(src *Mapping) mapInfo {
if src == nil {
return mapInfo{}
}
if mi, ok := pm.mappingsByID[src.ID]; ok {
return mi
}
// Check memoization tables.
mk := src.key()
if m, ok := pm.mappings[mk]; ok {
mi := mapInfo{m, int64(m.Start) - int64(src.Start)}
pm.mappingsByID[src.ID] = mi
return mi
}
m := &Mapping{
ID: uint64(len(pm.p.Mapping) + 1),
Start: src.Start,
Limit: src.Limit,
Offset: src.Offset,
File: src.File,
BuildID: src.BuildID,
HasFunctions: src.HasFunctions,
HasFilenames: src.HasFilenames,
HasLineNumbers: src.HasLineNumbers,
HasInlineFrames: src.HasInlineFrames,
}
pm.p.Mapping = append(pm.p.Mapping, m)
// Update memoization tables.
pm.mappings[mk] = m
mi := mapInfo{m, 0}
pm.mappingsByID[src.ID] = mi
return mi
}
// key generates encoded strings of Mapping to be used as a key for
// maps.
func (m *Mapping) key() mappingKey {
// Normalize addresses to handle address space randomization.
// Round up to next 4K boundary to avoid minor discrepancies.
const mapsizeRounding = 0x1000
size := m.Limit - m.Start
size = size + mapsizeRounding - 1
size = size - (size % mapsizeRounding)
key := mappingKey{
size: size,
offset: m.Offset,
}
switch {
case m.BuildID != "":
key.buildIDOrFile = m.BuildID
case m.File != "":
key.buildIDOrFile = m.File
default:
// A mapping containing neither build ID nor file name is a fake mapping. A
// key with empty buildIDOrFile is used for fake mappings so that they are
// treated as the same mapping during merging.
}
return key
}
type mappingKey struct {
size, offset uint64
buildIDOrFile string
}
func (pm *profileMerger) mapLine(src Line) Line {
ln := Line{
Function: pm.mapFunction(src.Function),
Line: src.Line,
}
return ln
}
func (pm *profileMerger) mapFunction(src *Function) *Function {
if src == nil {
return nil
}
if f, ok := pm.functionsByID[src.ID]; ok {
return f
}
k := src.key()
if f, ok := pm.functions[k]; ok {
pm.functionsByID[src.ID] = f
return f
}
f := &Function{
ID: uint64(len(pm.p.Function) + 1),
Name: src.Name,
SystemName: src.SystemName,
Filename: src.Filename,
StartLine: src.StartLine,
}
pm.functions[k] = f
pm.functionsByID[src.ID] = f
pm.p.Function = append(pm.p.Function, f)
return f
}
// key generates a struct to be used as a key for maps.
func (f *Function) key() functionKey {
return functionKey{
f.StartLine,
f.Name,
f.SystemName,
f.Filename,
}
}
type functionKey struct {
startLine int64
name, systemName, fileName string
}
// combineHeaders checks that all profiles can be merged and returns
// their combined profile.
func combineHeaders(srcs []*Profile) (*Profile, error) {
for _, s := range srcs[1:] {
if err := srcs[0].compatible(s); err != nil {
return nil, err
}
}
var timeNanos, durationNanos, period int64
var comments []string
seenComments := map[string]bool{}
var defaultSampleType string
for _, s := range srcs {
if timeNanos == 0 || s.TimeNanos < timeNanos {
timeNanos = s.TimeNanos
}
durationNanos += s.DurationNanos
if period == 0 || period < s.Period {
period = s.Period
}
for _, c := range s.Comments {
if seen := seenComments[c]; !seen {
comments = append(comments, c)
seenComments[c] = true
}
}
if defaultSampleType == "" {
defaultSampleType = s.DefaultSampleType
}
}
p := &Profile{
SampleType: make([]*ValueType, len(srcs[0].SampleType)),
DropFrames: srcs[0].DropFrames,
KeepFrames: srcs[0].KeepFrames,
TimeNanos: timeNanos,
DurationNanos: durationNanos,
PeriodType: srcs[0].PeriodType,
Period: period,
Comments: comments,
DefaultSampleType: defaultSampleType,
}
copy(p.SampleType, srcs[0].SampleType)
return p, nil
}
// compatible determines if two profiles can be compared/merged.
// returns nil if the profiles are compatible; otherwise an error with
// details on the incompatibility.
func (p *Profile) compatible(pb *Profile) error {
if !equalValueType(p.PeriodType, pb.PeriodType) {
return fmt.Errorf("incompatible period types %v and %v", p.PeriodType, pb.PeriodType)
}
if len(p.SampleType) != len(pb.SampleType) {
return fmt.Errorf("incompatible sample types %v and %v", p.SampleType, pb.SampleType)
}
for i := range p.SampleType {
if !equalValueType(p.SampleType[i], pb.SampleType[i]) {
return fmt.Errorf("incompatible sample types %v and %v", p.SampleType, pb.SampleType)
}
}
return nil
}
// equalValueType returns true if the two value types are semantically
// equal. It ignores the internal fields used during encode/decode.
func equalValueType(st1, st2 *ValueType) bool {
return st1.Type == st2.Type && st1.Unit == st2.Unit
}

805
vendor/github.com/google/pprof/profile/profile.go generated vendored Normal file
View File

@ -0,0 +1,805 @@
// Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package profile provides a representation of profile.proto and
// methods to encode/decode profiles in this format.
package profile
import (
"bytes"
"compress/gzip"
"fmt"
"io"
"io/ioutil"
"math"
"path/filepath"
"regexp"
"sort"
"strings"
"sync"
"time"
)
// Profile is an in-memory representation of profile.proto.
type Profile struct {
SampleType []*ValueType
DefaultSampleType string
Sample []*Sample
Mapping []*Mapping
Location []*Location
Function []*Function
Comments []string
DropFrames string
KeepFrames string
TimeNanos int64
DurationNanos int64
PeriodType *ValueType
Period int64
// The following fields are modified during encoding and copying,
// so are protected by a Mutex.
encodeMu sync.Mutex
commentX []int64
dropFramesX int64
keepFramesX int64
stringTable []string
defaultSampleTypeX int64
}
// ValueType corresponds to Profile.ValueType
type ValueType struct {
Type string // cpu, wall, inuse_space, etc
Unit string // seconds, nanoseconds, bytes, etc
typeX int64
unitX int64
}
// Sample corresponds to Profile.Sample
type Sample struct {
Location []*Location
Value []int64
Label map[string][]string
NumLabel map[string][]int64
NumUnit map[string][]string
locationIDX []uint64
labelX []label
}
// label corresponds to Profile.Label
type label struct {
keyX int64
// Exactly one of the two following values must be set
strX int64
numX int64 // Integer value for this label
// can be set if numX has value
unitX int64
}
// Mapping corresponds to Profile.Mapping
type Mapping struct {
ID uint64
Start uint64
Limit uint64
Offset uint64
File string
BuildID string
HasFunctions bool
HasFilenames bool
HasLineNumbers bool
HasInlineFrames bool
fileX int64
buildIDX int64
}
// Location corresponds to Profile.Location
type Location struct {
ID uint64
Mapping *Mapping
Address uint64
Line []Line
IsFolded bool
mappingIDX uint64
}
// Line corresponds to Profile.Line
type Line struct {
Function *Function
Line int64
functionIDX uint64
}
// Function corresponds to Profile.Function
type Function struct {
ID uint64
Name string
SystemName string
Filename string
StartLine int64
nameX int64
systemNameX int64
filenameX int64
}
// Parse parses a profile and checks for its validity. The input
// may be a gzip-compressed encoded protobuf or one of many legacy
// profile formats which may be unsupported in the future.
func Parse(r io.Reader) (*Profile, error) {
data, err := ioutil.ReadAll(r)
if err != nil {
return nil, err
}
return ParseData(data)
}
// ParseData parses a profile from a buffer and checks for its
// validity.
func ParseData(data []byte) (*Profile, error) {
var p *Profile
var err error
if len(data) >= 2 && data[0] == 0x1f && data[1] == 0x8b {
gz, err := gzip.NewReader(bytes.NewBuffer(data))
if err == nil {
data, err = ioutil.ReadAll(gz)
}
if err != nil {
return nil, fmt.Errorf("decompressing profile: %v", err)
}
}
if p, err = ParseUncompressed(data); err != nil && err != errNoData && err != errConcatProfile {
p, err = parseLegacy(data)
}
if err != nil {
return nil, fmt.Errorf("parsing profile: %v", err)
}
if err := p.CheckValid(); err != nil {
return nil, fmt.Errorf("malformed profile: %v", err)
}
return p, nil
}
var errUnrecognized = fmt.Errorf("unrecognized profile format")
var errMalformed = fmt.Errorf("malformed profile format")
var errNoData = fmt.Errorf("empty input file")
var errConcatProfile = fmt.Errorf("concatenated profiles detected")
func parseLegacy(data []byte) (*Profile, error) {
parsers := []func([]byte) (*Profile, error){
parseCPU,
parseHeap,
parseGoCount, // goroutine, threadcreate
parseThread,
parseContention,
parseJavaProfile,
}
for _, parser := range parsers {
p, err := parser(data)
if err == nil {
p.addLegacyFrameInfo()
return p, nil
}
if err != errUnrecognized {
return nil, err
}
}
return nil, errUnrecognized
}
// ParseUncompressed parses an uncompressed protobuf into a profile.
func ParseUncompressed(data []byte) (*Profile, error) {
if len(data) == 0 {
return nil, errNoData
}
p := &Profile{}
if err := unmarshal(data, p); err != nil {
return nil, err
}
if err := p.postDecode(); err != nil {
return nil, err
}
return p, nil
}
var libRx = regexp.MustCompile(`([.]so$|[.]so[._][0-9]+)`)
// massageMappings applies heuristic-based changes to the profile
// mappings to account for quirks of some environments.
func (p *Profile) massageMappings() {
// Merge adjacent regions with matching names, checking that the offsets match
if len(p.Mapping) > 1 {
mappings := []*Mapping{p.Mapping[0]}
for _, m := range p.Mapping[1:] {
lm := mappings[len(mappings)-1]
if adjacent(lm, m) {
lm.Limit = m.Limit
if m.File != "" {
lm.File = m.File
}
if m.BuildID != "" {
lm.BuildID = m.BuildID
}
p.updateLocationMapping(m, lm)
continue
}
mappings = append(mappings, m)
}
p.Mapping = mappings
}
// Use heuristics to identify main binary and move it to the top of the list of mappings
for i, m := range p.Mapping {
file := strings.TrimSpace(strings.Replace(m.File, "(deleted)", "", -1))
if len(file) == 0 {
continue
}
if len(libRx.FindStringSubmatch(file)) > 0 {
continue
}
if file[0] == '[' {
continue
}
// Swap what we guess is main to position 0.
p.Mapping[0], p.Mapping[i] = p.Mapping[i], p.Mapping[0]
break
}
// Keep the mapping IDs neatly sorted
for i, m := range p.Mapping {
m.ID = uint64(i + 1)
}
}
// adjacent returns whether two mapping entries represent the same
// mapping that has been split into two. Check that their addresses are adjacent,
// and if the offsets match, if they are available.
func adjacent(m1, m2 *Mapping) bool {
if m1.File != "" && m2.File != "" {
if m1.File != m2.File {
return false
}
}
if m1.BuildID != "" && m2.BuildID != "" {
if m1.BuildID != m2.BuildID {
return false
}
}
if m1.Limit != m2.Start {
return false
}
if m1.Offset != 0 && m2.Offset != 0 {
offset := m1.Offset + (m1.Limit - m1.Start)
if offset != m2.Offset {
return false
}
}
return true
}
func (p *Profile) updateLocationMapping(from, to *Mapping) {
for _, l := range p.Location {
if l.Mapping == from {
l.Mapping = to
}
}
}
func serialize(p *Profile) []byte {
p.encodeMu.Lock()
p.preEncode()
b := marshal(p)
p.encodeMu.Unlock()
return b
}
// Write writes the profile as a gzip-compressed marshaled protobuf.
func (p *Profile) Write(w io.Writer) error {
zw := gzip.NewWriter(w)
defer zw.Close()
_, err := zw.Write(serialize(p))
return err
}
// WriteUncompressed writes the profile as a marshaled protobuf.
func (p *Profile) WriteUncompressed(w io.Writer) error {
_, err := w.Write(serialize(p))
return err
}
// CheckValid tests whether the profile is valid. Checks include, but are
// not limited to:
// - len(Profile.Sample[n].value) == len(Profile.value_unit)
// - Sample.id has a corresponding Profile.Location
func (p *Profile) CheckValid() error {
// Check that sample values are consistent
sampleLen := len(p.SampleType)
if sampleLen == 0 && len(p.Sample) != 0 {
return fmt.Errorf("missing sample type information")
}
for _, s := range p.Sample {
if s == nil {
return fmt.Errorf("profile has nil sample")
}
if len(s.Value) != sampleLen {
return fmt.Errorf("mismatch: sample has %d values vs. %d types", len(s.Value), len(p.SampleType))
}
for _, l := range s.Location {
if l == nil {
return fmt.Errorf("sample has nil location")
}
}
}
// Check that all mappings/locations/functions are in the tables
// Check that there are no duplicate ids
mappings := make(map[uint64]*Mapping, len(p.Mapping))
for _, m := range p.Mapping {
if m == nil {
return fmt.Errorf("profile has nil mapping")
}
if m.ID == 0 {
return fmt.Errorf("found mapping with reserved ID=0")
}
if mappings[m.ID] != nil {
return fmt.Errorf("multiple mappings with same id: %d", m.ID)
}
mappings[m.ID] = m
}
functions := make(map[uint64]*Function, len(p.Function))
for _, f := range p.Function {
if f == nil {
return fmt.Errorf("profile has nil function")
}
if f.ID == 0 {
return fmt.Errorf("found function with reserved ID=0")
}
if functions[f.ID] != nil {
return fmt.Errorf("multiple functions with same id: %d", f.ID)
}
functions[f.ID] = f
}
locations := make(map[uint64]*Location, len(p.Location))
for _, l := range p.Location {
if l == nil {
return fmt.Errorf("profile has nil location")
}
if l.ID == 0 {
return fmt.Errorf("found location with reserved id=0")
}
if locations[l.ID] != nil {
return fmt.Errorf("multiple locations with same id: %d", l.ID)
}
locations[l.ID] = l
if m := l.Mapping; m != nil {
if m.ID == 0 || mappings[m.ID] != m {
return fmt.Errorf("inconsistent mapping %p: %d", m, m.ID)
}
}
for _, ln := range l.Line {
f := ln.Function
if f == nil {
return fmt.Errorf("location id: %d has a line with nil function", l.ID)
}
if f.ID == 0 || functions[f.ID] != f {
return fmt.Errorf("inconsistent function %p: %d", f, f.ID)
}
}
}
return nil
}
// Aggregate merges the locations in the profile into equivalence
// classes preserving the request attributes. It also updates the
// samples to point to the merged locations.
func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, address bool) error {
for _, m := range p.Mapping {
m.HasInlineFrames = m.HasInlineFrames && inlineFrame
m.HasFunctions = m.HasFunctions && function
m.HasFilenames = m.HasFilenames && filename
m.HasLineNumbers = m.HasLineNumbers && linenumber
}
// Aggregate functions
if !function || !filename {
for _, f := range p.Function {
if !function {
f.Name = ""
f.SystemName = ""
}
if !filename {
f.Filename = ""
}
}
}
// Aggregate locations
if !inlineFrame || !address || !linenumber {
for _, l := range p.Location {
if !inlineFrame && len(l.Line) > 1 {
l.Line = l.Line[len(l.Line)-1:]
}
if !linenumber {
for i := range l.Line {
l.Line[i].Line = 0
}
}
if !address {
l.Address = 0
}
}
}
return p.CheckValid()
}
// NumLabelUnits returns a map of numeric label keys to the units
// associated with those keys and a map of those keys to any units
// that were encountered but not used.
// Unit for a given key is the first encountered unit for that key. If multiple
// units are encountered for values paired with a particular key, then the first
// unit encountered is used and all other units are returned in sorted order
// in map of ignored units.
// If no units are encountered for a particular key, the unit is then inferred
// based on the key.
func (p *Profile) NumLabelUnits() (map[string]string, map[string][]string) {
numLabelUnits := map[string]string{}
ignoredUnits := map[string]map[string]bool{}
encounteredKeys := map[string]bool{}
// Determine units based on numeric tags for each sample.
for _, s := range p.Sample {
for k := range s.NumLabel {
encounteredKeys[k] = true
for _, unit := range s.NumUnit[k] {
if unit == "" {
continue
}
if wantUnit, ok := numLabelUnits[k]; !ok {
numLabelUnits[k] = unit
} else if wantUnit != unit {
if v, ok := ignoredUnits[k]; ok {
v[unit] = true
} else {
ignoredUnits[k] = map[string]bool{unit: true}
}
}
}
}
}
// Infer units for keys without any units associated with
// numeric tag values.
for key := range encounteredKeys {
unit := numLabelUnits[key]
if unit == "" {
switch key {
case "alignment", "request":
numLabelUnits[key] = "bytes"
default:
numLabelUnits[key] = key
}
}
}
// Copy ignored units into more readable format
unitsIgnored := make(map[string][]string, len(ignoredUnits))
for key, values := range ignoredUnits {
units := make([]string, len(values))
i := 0
for unit := range values {
units[i] = unit
i++
}
sort.Strings(units)
unitsIgnored[key] = units
}
return numLabelUnits, unitsIgnored
}
// String dumps a text representation of a profile. Intended mainly
// for debugging purposes.
func (p *Profile) String() string {
ss := make([]string, 0, len(p.Comments)+len(p.Sample)+len(p.Mapping)+len(p.Location))
for _, c := range p.Comments {
ss = append(ss, "Comment: "+c)
}
if pt := p.PeriodType; pt != nil {
ss = append(ss, fmt.Sprintf("PeriodType: %s %s", pt.Type, pt.Unit))
}
ss = append(ss, fmt.Sprintf("Period: %d", p.Period))
if p.TimeNanos != 0 {
ss = append(ss, fmt.Sprintf("Time: %v", time.Unix(0, p.TimeNanos)))
}
if p.DurationNanos != 0 {
ss = append(ss, fmt.Sprintf("Duration: %.4v", time.Duration(p.DurationNanos)))
}
ss = append(ss, "Samples:")
var sh1 string
for _, s := range p.SampleType {
dflt := ""
if s.Type == p.DefaultSampleType {
dflt = "[dflt]"
}
sh1 = sh1 + fmt.Sprintf("%s/%s%s ", s.Type, s.Unit, dflt)
}
ss = append(ss, strings.TrimSpace(sh1))
for _, s := range p.Sample {
ss = append(ss, s.string())
}
ss = append(ss, "Locations")
for _, l := range p.Location {
ss = append(ss, l.string())
}
ss = append(ss, "Mappings")
for _, m := range p.Mapping {
ss = append(ss, m.string())
}
return strings.Join(ss, "\n") + "\n"
}
// string dumps a text representation of a mapping. Intended mainly
// for debugging purposes.
func (m *Mapping) string() string {
bits := ""
if m.HasFunctions {
bits = bits + "[FN]"
}
if m.HasFilenames {
bits = bits + "[FL]"
}
if m.HasLineNumbers {
bits = bits + "[LN]"
}
if m.HasInlineFrames {
bits = bits + "[IN]"
}
return fmt.Sprintf("%d: %#x/%#x/%#x %s %s %s",
m.ID,
m.Start, m.Limit, m.Offset,
m.File,
m.BuildID,
bits)
}
// string dumps a text representation of a location. Intended mainly
// for debugging purposes.
func (l *Location) string() string {
ss := []string{}
locStr := fmt.Sprintf("%6d: %#x ", l.ID, l.Address)
if m := l.Mapping; m != nil {
locStr = locStr + fmt.Sprintf("M=%d ", m.ID)
}
if l.IsFolded {
locStr = locStr + "[F] "
}
if len(l.Line) == 0 {
ss = append(ss, locStr)
}
for li := range l.Line {
lnStr := "??"
if fn := l.Line[li].Function; fn != nil {
lnStr = fmt.Sprintf("%s %s:%d s=%d",
fn.Name,
fn.Filename,
l.Line[li].Line,
fn.StartLine)
if fn.Name != fn.SystemName {
lnStr = lnStr + "(" + fn.SystemName + ")"
}
}
ss = append(ss, locStr+lnStr)
// Do not print location details past the first line
locStr = " "
}
return strings.Join(ss, "\n")
}
// string dumps a text representation of a sample. Intended mainly
// for debugging purposes.
func (s *Sample) string() string {
ss := []string{}
var sv string
for _, v := range s.Value {
sv = fmt.Sprintf("%s %10d", sv, v)
}
sv = sv + ": "
for _, l := range s.Location {
sv = sv + fmt.Sprintf("%d ", l.ID)
}
ss = append(ss, sv)
const labelHeader = " "
if len(s.Label) > 0 {
ss = append(ss, labelHeader+labelsToString(s.Label))
}
if len(s.NumLabel) > 0 {
ss = append(ss, labelHeader+numLabelsToString(s.NumLabel, s.NumUnit))
}
return strings.Join(ss, "\n")
}
// labelsToString returns a string representation of a
// map representing labels.
func labelsToString(labels map[string][]string) string {
ls := []string{}
for k, v := range labels {
ls = append(ls, fmt.Sprintf("%s:%v", k, v))
}
sort.Strings(ls)
return strings.Join(ls, " ")
}
// numLabelsToString returns a string representation of a map
// representing numeric labels.
func numLabelsToString(numLabels map[string][]int64, numUnits map[string][]string) string {
ls := []string{}
for k, v := range numLabels {
units := numUnits[k]
var labelString string
if len(units) == len(v) {
values := make([]string, len(v))
for i, vv := range v {
values[i] = fmt.Sprintf("%d %s", vv, units[i])
}
labelString = fmt.Sprintf("%s:%v", k, values)
} else {
labelString = fmt.Sprintf("%s:%v", k, v)
}
ls = append(ls, labelString)
}
sort.Strings(ls)
return strings.Join(ls, " ")
}
// SetLabel sets the specified key to the specified value for all samples in the
// profile.
func (p *Profile) SetLabel(key string, value []string) {
for _, sample := range p.Sample {
if sample.Label == nil {
sample.Label = map[string][]string{key: value}
} else {
sample.Label[key] = value
}
}
}
// RemoveLabel removes all labels associated with the specified key for all
// samples in the profile.
func (p *Profile) RemoveLabel(key string) {
for _, sample := range p.Sample {
delete(sample.Label, key)
}
}
// HasLabel returns true if a sample has a label with indicated key and value.
func (s *Sample) HasLabel(key, value string) bool {
for _, v := range s.Label[key] {
if v == value {
return true
}
}
return false
}
// DiffBaseSample returns true if a sample belongs to the diff base and false
// otherwise.
func (s *Sample) DiffBaseSample() bool {
return s.HasLabel("pprof::base", "true")
}
// Scale multiplies all sample values in a profile by a constant and keeps
// only samples that have at least one non-zero value.
func (p *Profile) Scale(ratio float64) {
if ratio == 1 {
return
}
ratios := make([]float64, len(p.SampleType))
for i := range p.SampleType {
ratios[i] = ratio
}
p.ScaleN(ratios)
}
// ScaleN multiplies each sample values in a sample by a different amount
// and keeps only samples that have at least one non-zero value.
func (p *Profile) ScaleN(ratios []float64) error {
if len(p.SampleType) != len(ratios) {
return fmt.Errorf("mismatched scale ratios, got %d, want %d", len(ratios), len(p.SampleType))
}
allOnes := true
for _, r := range ratios {
if r != 1 {
allOnes = false
break
}
}
if allOnes {
return nil
}
fillIdx := 0
for _, s := range p.Sample {
keepSample := false
for i, v := range s.Value {
if ratios[i] != 1 {
val := int64(math.Round(float64(v) * ratios[i]))
s.Value[i] = val
keepSample = keepSample || val != 0
}
}
if keepSample {
p.Sample[fillIdx] = s
fillIdx++
}
}
p.Sample = p.Sample[:fillIdx]
return nil
}
// HasFunctions determines if all locations in this profile have
// symbolized function information.
func (p *Profile) HasFunctions() bool {
for _, l := range p.Location {
if l.Mapping != nil && !l.Mapping.HasFunctions {
return false
}
}
return true
}
// HasFileLines determines if all locations in this profile have
// symbolized file and line number information.
func (p *Profile) HasFileLines() bool {
for _, l := range p.Location {
if l.Mapping != nil && (!l.Mapping.HasFilenames || !l.Mapping.HasLineNumbers) {
return false
}
}
return true
}
// Unsymbolizable returns true if a mapping points to a binary for which
// locations can't be symbolized in principle, at least now. Examples are
// "[vdso]", [vsyscall]" and some others, see the code.
func (m *Mapping) Unsymbolizable() bool {
name := filepath.Base(m.File)
return strings.HasPrefix(name, "[") || strings.HasPrefix(name, "linux-vdso") || strings.HasPrefix(m.File, "/dev/dri/")
}
// Copy makes a fully independent copy of a profile.
func (p *Profile) Copy() *Profile {
pp := &Profile{}
if err := unmarshal(serialize(p), pp); err != nil {
panic(err)
}
if err := pp.postDecode(); err != nil {
panic(err)
}
return pp
}

370
vendor/github.com/google/pprof/profile/proto.go generated vendored Normal file
View File

@ -0,0 +1,370 @@
// Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// This file is a simple protocol buffer encoder and decoder.
// The format is described at
// https://developers.google.com/protocol-buffers/docs/encoding
//
// A protocol message must implement the message interface:
// decoder() []decoder
// encode(*buffer)
//
// The decode method returns a slice indexed by field number that gives the
// function to decode that field.
// The encode method encodes its receiver into the given buffer.
//
// The two methods are simple enough to be implemented by hand rather than
// by using a protocol compiler.
//
// See profile.go for examples of messages implementing this interface.
//
// There is no support for groups, message sets, or "has" bits.
package profile
import (
"errors"
"fmt"
)
type buffer struct {
field int // field tag
typ int // proto wire type code for field
u64 uint64
data []byte
tmp [16]byte
}
type decoder func(*buffer, message) error
type message interface {
decoder() []decoder
encode(*buffer)
}
func marshal(m message) []byte {
var b buffer
m.encode(&b)
return b.data
}
func encodeVarint(b *buffer, x uint64) {
for x >= 128 {
b.data = append(b.data, byte(x)|0x80)
x >>= 7
}
b.data = append(b.data, byte(x))
}
func encodeLength(b *buffer, tag int, len int) {
encodeVarint(b, uint64(tag)<<3|2)
encodeVarint(b, uint64(len))
}
func encodeUint64(b *buffer, tag int, x uint64) {
// append varint to b.data
encodeVarint(b, uint64(tag)<<3)
encodeVarint(b, x)
}
func encodeUint64s(b *buffer, tag int, x []uint64) {
if len(x) > 2 {
// Use packed encoding
n1 := len(b.data)
for _, u := range x {
encodeVarint(b, u)
}
n2 := len(b.data)
encodeLength(b, tag, n2-n1)
n3 := len(b.data)
copy(b.tmp[:], b.data[n2:n3])
copy(b.data[n1+(n3-n2):], b.data[n1:n2])
copy(b.data[n1:], b.tmp[:n3-n2])
return
}
for _, u := range x {
encodeUint64(b, tag, u)
}
}
func encodeUint64Opt(b *buffer, tag int, x uint64) {
if x == 0 {
return
}
encodeUint64(b, tag, x)
}
func encodeInt64(b *buffer, tag int, x int64) {
u := uint64(x)
encodeUint64(b, tag, u)
}
func encodeInt64s(b *buffer, tag int, x []int64) {
if len(x) > 2 {
// Use packed encoding
n1 := len(b.data)
for _, u := range x {
encodeVarint(b, uint64(u))
}
n2 := len(b.data)
encodeLength(b, tag, n2-n1)
n3 := len(b.data)
copy(b.tmp[:], b.data[n2:n3])
copy(b.data[n1+(n3-n2):], b.data[n1:n2])
copy(b.data[n1:], b.tmp[:n3-n2])
return
}
for _, u := range x {
encodeInt64(b, tag, u)
}
}
func encodeInt64Opt(b *buffer, tag int, x int64) {
if x == 0 {
return
}
encodeInt64(b, tag, x)
}
func encodeString(b *buffer, tag int, x string) {
encodeLength(b, tag, len(x))
b.data = append(b.data, x...)
}
func encodeStrings(b *buffer, tag int, x []string) {
for _, s := range x {
encodeString(b, tag, s)
}
}
func encodeBool(b *buffer, tag int, x bool) {
if x {
encodeUint64(b, tag, 1)
} else {
encodeUint64(b, tag, 0)
}
}
func encodeBoolOpt(b *buffer, tag int, x bool) {
if x {
encodeBool(b, tag, x)
}
}
func encodeMessage(b *buffer, tag int, m message) {
n1 := len(b.data)
m.encode(b)
n2 := len(b.data)
encodeLength(b, tag, n2-n1)
n3 := len(b.data)
copy(b.tmp[:], b.data[n2:n3])
copy(b.data[n1+(n3-n2):], b.data[n1:n2])
copy(b.data[n1:], b.tmp[:n3-n2])
}
func unmarshal(data []byte, m message) (err error) {
b := buffer{data: data, typ: 2}
return decodeMessage(&b, m)
}
func le64(p []byte) uint64 {
return uint64(p[0]) | uint64(p[1])<<8 | uint64(p[2])<<16 | uint64(p[3])<<24 | uint64(p[4])<<32 | uint64(p[5])<<40 | uint64(p[6])<<48 | uint64(p[7])<<56
}
func le32(p []byte) uint32 {
return uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24
}
func decodeVarint(data []byte) (uint64, []byte, error) {
var u uint64
for i := 0; ; i++ {
if i >= 10 || i >= len(data) {
return 0, nil, errors.New("bad varint")
}
u |= uint64(data[i]&0x7F) << uint(7*i)
if data[i]&0x80 == 0 {
return u, data[i+1:], nil
}
}
}
func decodeField(b *buffer, data []byte) ([]byte, error) {
x, data, err := decodeVarint(data)
if err != nil {
return nil, err
}
b.field = int(x >> 3)
b.typ = int(x & 7)
b.data = nil
b.u64 = 0
switch b.typ {
case 0:
b.u64, data, err = decodeVarint(data)
if err != nil {
return nil, err
}
case 1:
if len(data) < 8 {
return nil, errors.New("not enough data")
}
b.u64 = le64(data[:8])
data = data[8:]
case 2:
var n uint64
n, data, err = decodeVarint(data)
if err != nil {
return nil, err
}
if n > uint64(len(data)) {
return nil, errors.New("too much data")
}
b.data = data[:n]
data = data[n:]
case 5:
if len(data) < 4 {
return nil, errors.New("not enough data")
}
b.u64 = uint64(le32(data[:4]))
data = data[4:]
default:
return nil, fmt.Errorf("unknown wire type: %d", b.typ)
}
return data, nil
}
func checkType(b *buffer, typ int) error {
if b.typ != typ {
return errors.New("type mismatch")
}
return nil
}
func decodeMessage(b *buffer, m message) error {
if err := checkType(b, 2); err != nil {
return err
}
dec := m.decoder()
data := b.data
for len(data) > 0 {
// pull varint field# + type
var err error
data, err = decodeField(b, data)
if err != nil {
return err
}
if b.field >= len(dec) || dec[b.field] == nil {
continue
}
if err := dec[b.field](b, m); err != nil {
return err
}
}
return nil
}
func decodeInt64(b *buffer, x *int64) error {
if err := checkType(b, 0); err != nil {
return err
}
*x = int64(b.u64)
return nil
}
func decodeInt64s(b *buffer, x *[]int64) error {
if b.typ == 2 {
// Packed encoding
data := b.data
tmp := make([]int64, 0, len(data)) // Maximally sized
for len(data) > 0 {
var u uint64
var err error
if u, data, err = decodeVarint(data); err != nil {
return err
}
tmp = append(tmp, int64(u))
}
*x = append(*x, tmp...)
return nil
}
var i int64
if err := decodeInt64(b, &i); err != nil {
return err
}
*x = append(*x, i)
return nil
}
func decodeUint64(b *buffer, x *uint64) error {
if err := checkType(b, 0); err != nil {
return err
}
*x = b.u64
return nil
}
func decodeUint64s(b *buffer, x *[]uint64) error {
if b.typ == 2 {
data := b.data
// Packed encoding
tmp := make([]uint64, 0, len(data)) // Maximally sized
for len(data) > 0 {
var u uint64
var err error
if u, data, err = decodeVarint(data); err != nil {
return err
}
tmp = append(tmp, u)
}
*x = append(*x, tmp...)
return nil
}
var u uint64
if err := decodeUint64(b, &u); err != nil {
return err
}
*x = append(*x, u)
return nil
}
func decodeString(b *buffer, x *string) error {
if err := checkType(b, 2); err != nil {
return err
}
*x = string(b.data)
return nil
}
func decodeStrings(b *buffer, x *[]string) error {
var s string
if err := decodeString(b, &s); err != nil {
return err
}
*x = append(*x, s)
return nil
}
func decodeBool(b *buffer, x *bool) error {
if err := checkType(b, 0); err != nil {
return err
}
if int64(b.u64) == 0 {
*x = false
} else {
*x = true
}
return nil
}

178
vendor/github.com/google/pprof/profile/prune.go generated vendored Normal file
View File

@ -0,0 +1,178 @@
// Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Implements methods to remove frames from profiles.
package profile
import (
"fmt"
"regexp"
"strings"
)
var (
reservedNames = []string{"(anonymous namespace)", "operator()"}
bracketRx = func() *regexp.Regexp {
var quotedNames []string
for _, name := range append(reservedNames, "(") {
quotedNames = append(quotedNames, regexp.QuoteMeta(name))
}
return regexp.MustCompile(strings.Join(quotedNames, "|"))
}()
)
// simplifyFunc does some primitive simplification of function names.
func simplifyFunc(f string) string {
// Account for leading '.' on the PPC ELF v1 ABI.
funcName := strings.TrimPrefix(f, ".")
// Account for unsimplified names -- try to remove the argument list by trimming
// starting from the first '(', but skipping reserved names that have '('.
for _, ind := range bracketRx.FindAllStringSubmatchIndex(funcName, -1) {
foundReserved := false
for _, res := range reservedNames {
if funcName[ind[0]:ind[1]] == res {
foundReserved = true
break
}
}
if !foundReserved {
funcName = funcName[:ind[0]]
break
}
}
return funcName
}
// Prune removes all nodes beneath a node matching dropRx, and not
// matching keepRx. If the root node of a Sample matches, the sample
// will have an empty stack.
func (p *Profile) Prune(dropRx, keepRx *regexp.Regexp) {
prune := make(map[uint64]bool)
pruneBeneath := make(map[uint64]bool)
for _, loc := range p.Location {
var i int
for i = len(loc.Line) - 1; i >= 0; i-- {
if fn := loc.Line[i].Function; fn != nil && fn.Name != "" {
funcName := simplifyFunc(fn.Name)
if dropRx.MatchString(funcName) {
if keepRx == nil || !keepRx.MatchString(funcName) {
break
}
}
}
}
if i >= 0 {
// Found matching entry to prune.
pruneBeneath[loc.ID] = true
// Remove the matching location.
if i == len(loc.Line)-1 {
// Matched the top entry: prune the whole location.
prune[loc.ID] = true
} else {
loc.Line = loc.Line[i+1:]
}
}
}
// Prune locs from each Sample
for _, sample := range p.Sample {
// Scan from the root to the leaves to find the prune location.
// Do not prune frames before the first user frame, to avoid
// pruning everything.
foundUser := false
for i := len(sample.Location) - 1; i >= 0; i-- {
id := sample.Location[i].ID
if !prune[id] && !pruneBeneath[id] {
foundUser = true
continue
}
if !foundUser {
continue
}
if prune[id] {
sample.Location = sample.Location[i+1:]
break
}
if pruneBeneath[id] {
sample.Location = sample.Location[i:]
break
}
}
}
}
// RemoveUninteresting prunes and elides profiles using built-in
// tables of uninteresting function names.
func (p *Profile) RemoveUninteresting() error {
var keep, drop *regexp.Regexp
var err error
if p.DropFrames != "" {
if drop, err = regexp.Compile("^(" + p.DropFrames + ")$"); err != nil {
return fmt.Errorf("failed to compile regexp %s: %v", p.DropFrames, err)
}
if p.KeepFrames != "" {
if keep, err = regexp.Compile("^(" + p.KeepFrames + ")$"); err != nil {
return fmt.Errorf("failed to compile regexp %s: %v", p.KeepFrames, err)
}
}
p.Prune(drop, keep)
}
return nil
}
// PruneFrom removes all nodes beneath the lowest node matching dropRx, not including itself.
//
// Please see the example below to understand this method as well as
// the difference from Prune method.
//
// A sample contains Location of [A,B,C,B,D] where D is the top frame and there's no inline.
//
// PruneFrom(A) returns [A,B,C,B,D] because there's no node beneath A.
// Prune(A, nil) returns [B,C,B,D] by removing A itself.
//
// PruneFrom(B) returns [B,C,B,D] by removing all nodes beneath the first B when scanning from the bottom.
// Prune(B, nil) returns [D] because a matching node is found by scanning from the root.
func (p *Profile) PruneFrom(dropRx *regexp.Regexp) {
pruneBeneath := make(map[uint64]bool)
for _, loc := range p.Location {
for i := 0; i < len(loc.Line); i++ {
if fn := loc.Line[i].Function; fn != nil && fn.Name != "" {
funcName := simplifyFunc(fn.Name)
if dropRx.MatchString(funcName) {
// Found matching entry to prune.
pruneBeneath[loc.ID] = true
loc.Line = loc.Line[i:]
break
}
}
}
}
// Prune locs from each Sample
for _, sample := range p.Sample {
// Scan from the bottom leaf to the root to find the prune location.
for i, loc := range sample.Location {
if pruneBeneath[loc.ID] {
sample.Location = sample.Location[i:]
break
}
}
}
}

2
vendor/modules.txt vendored
View File

@ -384,6 +384,8 @@ github.com/google/go-querystring/query
# github.com/google/gofuzz v1.2.0
github.com/google/gofuzz
github.com/google/gofuzz/bytesource
# github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22
github.com/google/pprof/profile
# github.com/google/tcpproxy v0.0.0-20180808230851-dfa16c61dad2
github.com/google/tcpproxy
# github.com/googleapis/gax-go/v2 v2.0.5

View File

@ -54,7 +54,7 @@ all targets for 2 minutes.
- `-duration` - Optional, the total time to capture data for from the target agent. Must
be greater than the interval and longer than 10 seconds. Defaults to 2 minutes.
- `-interval` - Optional, the interval at which to capture dynamic data, such as logs
- `-interval` - Optional, the interval at which to capture dynamic data, such as heap
and metrics. Must be longer than 5 seconds. Defaults to 30 seconds.
- `-capture` - Optional, can be specified multiple times for each [capture target](#capture-targets)
@ -71,14 +71,14 @@ all targets for 2 minutes.
The `-capture` flag can be specified multiple times to capture specific
information when `debug` is running. By default, it captures all information.
| Target | Description |
| --------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `agent` | Version and configuration information about the agent. |
| `host` | Information about resources on the host running the target agent such as CPU, memory, and disk. |
| `cluster` | A list of all the WAN and LAN members in the cluster. |
| `metrics` | Metrics from the in-memory metrics endpoint in the target, captured at the interval. |
| `logs` | `DEBUG` level logs for the target agent, captured for the interval. |
| `pprof` | Golang heap, CPU, goroutine, and trace profiling. This information is not retrieved unless [`enable_debug`](/docs/agent/options#enable_debug) is set to `true` on the target agent. |
| Target | Description |
| --------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `agent` | Version and configuration information about the agent. |
| `host` | Information about resources on the host running the target agent such as CPU, memory, and disk. |
| `cluster` | A list of all the WAN and LAN members in the cluster. |
| `metrics` | Metrics from the in-memory metrics endpoint in the target, captured at the interval. |
| `logs` | `DEBUG` level logs for the target agent, captured for the duration. |
| `pprof` | Golang heap, CPU, goroutine, and trace profiling. CPU and traces are captured for `duration` in a single file while heap and goroutine are separate snapshots for each `interval`. This information is not retrieved unless [`enable_debug`](/docs/agent/options#enable_debug) is set to `true` on the target agent or ACLs are enable and an ACL token with `operator:read` is provided. |
## Examples