2020-06-25 16:51:23 +00:00
|
|
|
package command
|
|
|
|
|
|
|
|
import (
|
|
|
|
"archive/tar"
|
|
|
|
"compress/gzip"
|
|
|
|
"context"
|
2020-08-11 17:14:28 +00:00
|
|
|
"crypto/tls"
|
2020-06-25 16:51:23 +00:00
|
|
|
"encoding/json"
|
|
|
|
"fmt"
|
|
|
|
"html/template"
|
|
|
|
"io"
|
|
|
|
"io/ioutil"
|
|
|
|
"net/http"
|
|
|
|
"os"
|
|
|
|
"os/signal"
|
|
|
|
"path/filepath"
|
2020-08-11 17:14:28 +00:00
|
|
|
"strconv"
|
2020-06-25 16:51:23 +00:00
|
|
|
"strings"
|
|
|
|
"syscall"
|
|
|
|
"time"
|
|
|
|
|
2020-08-11 17:14:28 +00:00
|
|
|
"github.com/hashicorp/go-cleanhttp"
|
2020-06-25 16:51:23 +00:00
|
|
|
"github.com/hashicorp/nomad/api"
|
|
|
|
"github.com/posener/complete"
|
|
|
|
)
|
|
|
|
|
2020-08-11 19:39:44 +00:00
|
|
|
type OperatorDebugCommand struct {
|
2020-06-25 16:51:23 +00:00
|
|
|
Meta
|
|
|
|
|
2020-08-11 17:14:28 +00:00
|
|
|
timestamp string
|
|
|
|
collectDir string
|
|
|
|
duration time.Duration
|
|
|
|
interval time.Duration
|
|
|
|
logLevel string
|
|
|
|
stale bool
|
|
|
|
nodeIDs []string
|
|
|
|
serverIDs []string
|
|
|
|
consul *external
|
|
|
|
vault *external
|
|
|
|
manifest []string
|
|
|
|
ctx context.Context
|
|
|
|
cancel context.CancelFunc
|
2020-06-25 16:51:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
const (
|
2020-08-11 19:39:44 +00:00
|
|
|
userAgent = "nomad operator debug"
|
2020-06-25 16:51:23 +00:00
|
|
|
)
|
|
|
|
|
2020-08-11 19:39:44 +00:00
|
|
|
func (c *OperatorDebugCommand) Help() string {
|
2020-06-25 16:51:23 +00:00
|
|
|
helpText := `
|
2020-08-11 19:39:44 +00:00
|
|
|
Usage: nomad operator debug [options]
|
2020-06-25 16:51:23 +00:00
|
|
|
|
|
|
|
Build an archive containing Nomad cluster configuration and state, and Consul and Vault
|
|
|
|
status. Include logs and pprof profiles for selected servers and client nodes.
|
|
|
|
|
|
|
|
General Options:
|
|
|
|
|
|
|
|
` + generalOptionsUsage() + `
|
|
|
|
|
|
|
|
Debug Options:
|
|
|
|
|
2020-08-11 19:39:44 +00:00
|
|
|
-duration=<duration>
|
2020-08-11 17:14:28 +00:00
|
|
|
The duration of the log monitor command. Defaults to 2m.
|
2020-06-25 16:51:23 +00:00
|
|
|
|
2020-08-11 17:14:28 +00:00
|
|
|
-interval=<interval>
|
|
|
|
The interval between snapshots of the Nomad state. If unspecified, only one snapshot is
|
|
|
|
captured.
|
2020-06-25 16:51:23 +00:00
|
|
|
|
2020-08-11 17:14:28 +00:00
|
|
|
-log-level=<level>
|
|
|
|
The log level to monitor. Defaults to DEBUG.
|
2020-06-25 16:51:23 +00:00
|
|
|
|
2020-08-11 17:14:28 +00:00
|
|
|
-node-id=<node>,<node>
|
|
|
|
Comma separated list of Nomad client node ids, to monitor for logs and include pprof
|
|
|
|
profiles. Accepts id prefixes.
|
2020-06-25 16:51:23 +00:00
|
|
|
|
2020-08-11 17:14:28 +00:00
|
|
|
-server-id=<server>,<server>
|
2020-10-14 19:16:10 +00:00
|
|
|
Comma separated list of Nomad server names, "leader", or "all" to monitor for logs and include pprof
|
2020-08-11 17:14:28 +00:00
|
|
|
profiles.
|
2020-06-25 16:51:23 +00:00
|
|
|
|
2020-08-11 17:14:28 +00:00
|
|
|
-stale=<true|false>
|
|
|
|
If "false", the default, get membership data from the cluster leader. If the cluster is in
|
|
|
|
an outage unable to establish leadership, it may be necessary to get the configuration from
|
|
|
|
a non-leader server.
|
2020-06-25 16:51:23 +00:00
|
|
|
|
2020-08-11 17:14:28 +00:00
|
|
|
-output=<path>
|
|
|
|
Path to the parent directory of the output directory. If not specified, an archive is built
|
|
|
|
in the current directory.
|
2020-06-25 16:51:23 +00:00
|
|
|
|
2020-08-11 17:14:28 +00:00
|
|
|
-consul-http-addr=<addr>
|
2020-08-11 19:39:44 +00:00
|
|
|
The address and port of the Consul HTTP agent. Overrides the CONSUL_HTTP_ADDR environment variable.
|
2020-08-11 17:14:28 +00:00
|
|
|
|
|
|
|
-consul-token=<token>
|
|
|
|
Token used to query Consul. Overrides the CONSUL_HTTP_TOKEN environment
|
|
|
|
variable and the Consul token file.
|
|
|
|
|
|
|
|
-consul-token-file=<path>
|
|
|
|
Path to the Consul token file. Overrides the CONSUL_HTTP_TOKEN_FILE
|
|
|
|
environment variable.
|
|
|
|
|
|
|
|
-consul-client-cert=<path>
|
|
|
|
Path to the Consul client cert file. Overrides the CONSUL_CLIENT_CERT
|
|
|
|
environment variable.
|
|
|
|
|
|
|
|
-consul-client-key=<path>
|
|
|
|
Path to the Consul client key file. Overrides the CONSUL_CLIENT_KEY
|
|
|
|
environment variable.
|
|
|
|
|
|
|
|
-consul-ca-cert=<path>
|
|
|
|
Path to a CA file to use with Consul. Overrides the CONSUL_CACERT
|
|
|
|
environment variable and the Consul CA path.
|
|
|
|
|
|
|
|
-consul-ca-path=<path>
|
|
|
|
Path to a directory of PEM encoded CA cert files to verify the Consul
|
|
|
|
certificate. Overrides the CONSUL_CAPATH environment variable.
|
|
|
|
|
|
|
|
-vault-address=<addr>
|
|
|
|
The address and port of the Vault HTTP agent. Overrides the VAULT_ADDR
|
|
|
|
environment variable.
|
|
|
|
|
|
|
|
-vault-token=<token>
|
|
|
|
Token used to query Vault. Overrides the VAULT_TOKEN environment
|
|
|
|
variable.
|
|
|
|
|
|
|
|
-vault-client-cert=<path>
|
|
|
|
Path to the Vault client cert file. Overrides the VAULT_CLIENT_CERT
|
|
|
|
environment variable.
|
|
|
|
|
|
|
|
-vault-client-key=<path>
|
|
|
|
Path to the Vault client key file. Overrides the VAULT_CLIENT_KEY
|
|
|
|
environment variable.
|
|
|
|
|
|
|
|
-vault-ca-cert=<path>
|
|
|
|
Path to a CA file to use with Vault. Overrides the VAULT_CACERT
|
|
|
|
environment variable and the Vault CA path.
|
|
|
|
|
|
|
|
-vault-ca-path=<path>
|
|
|
|
Path to a directory of PEM encoded CA cert files to verify the Vault
|
|
|
|
certificate. Overrides the VAULT_CAPATH environment variable.
|
2020-06-25 16:51:23 +00:00
|
|
|
`
|
|
|
|
return strings.TrimSpace(helpText)
|
|
|
|
}
|
|
|
|
|
2020-08-11 19:39:44 +00:00
|
|
|
func (c *OperatorDebugCommand) Synopsis() string {
|
2020-06-25 16:51:23 +00:00
|
|
|
return "Build a debug archive"
|
|
|
|
}
|
|
|
|
|
2020-08-11 19:39:44 +00:00
|
|
|
func (c *OperatorDebugCommand) AutocompleteFlags() complete.Flags {
|
2020-06-25 16:51:23 +00:00
|
|
|
return mergeAutocompleteFlags(c.Meta.AutocompleteFlags(FlagSetClient),
|
|
|
|
complete.Flags{
|
|
|
|
"-duration": complete.PredictAnything,
|
|
|
|
"-interval": complete.PredictAnything,
|
|
|
|
"-log-level": complete.PredictAnything,
|
|
|
|
"-node-id": complete.PredictAnything,
|
|
|
|
"-server-id": complete.PredictAnything,
|
|
|
|
"-output": complete.PredictAnything,
|
|
|
|
"-consul-token": complete.PredictAnything,
|
|
|
|
"-vault-token": complete.PredictAnything,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2020-08-11 19:39:44 +00:00
|
|
|
func (c *OperatorDebugCommand) AutocompleteArgs() complete.Predictor {
|
2020-06-25 16:51:23 +00:00
|
|
|
return complete.PredictNothing
|
|
|
|
}
|
|
|
|
|
2020-08-11 19:39:44 +00:00
|
|
|
func (c *OperatorDebugCommand) Name() string { return "debug" }
|
2020-06-25 16:51:23 +00:00
|
|
|
|
2020-08-11 19:39:44 +00:00
|
|
|
func (c *OperatorDebugCommand) Run(args []string) int {
|
2020-06-25 16:51:23 +00:00
|
|
|
flags := c.Meta.FlagSet(c.Name(), FlagSetClient)
|
|
|
|
flags.Usage = func() { c.Ui.Output(c.Help()) }
|
|
|
|
|
|
|
|
var duration, interval, output string
|
|
|
|
var nodeIDs, serverIDs string
|
|
|
|
|
|
|
|
flags.StringVar(&duration, "duration", "2m", "")
|
|
|
|
flags.StringVar(&interval, "interval", "2m", "")
|
|
|
|
flags.StringVar(&c.logLevel, "log-level", "DEBUG", "")
|
|
|
|
flags.StringVar(&nodeIDs, "node-id", "", "")
|
|
|
|
flags.StringVar(&serverIDs, "server-id", "", "")
|
2020-08-11 17:14:28 +00:00
|
|
|
flags.BoolVar(&c.stale, "stale", false, "")
|
2020-06-25 16:51:23 +00:00
|
|
|
flags.StringVar(&output, "output", "", "")
|
2020-08-11 17:14:28 +00:00
|
|
|
|
|
|
|
c.consul = &external{tls: &api.TLSConfig{}}
|
|
|
|
flags.StringVar(&c.consul.addrVal, "consul-http-addr", os.Getenv("CONSUL_HTTP_ADDR"), "")
|
|
|
|
ssl := os.Getenv("CONSUL_HTTP_SSL")
|
|
|
|
c.consul.ssl, _ = strconv.ParseBool(ssl)
|
|
|
|
flags.StringVar(&c.consul.auth, "consul-auth", os.Getenv("CONSUL_HTTP_AUTH"), "")
|
|
|
|
flags.StringVar(&c.consul.tokenVal, "consul-token", os.Getenv("CONSUL_HTTP_TOKEN"), "")
|
|
|
|
flags.StringVar(&c.consul.tokenFile, "consul-token-file", os.Getenv("CONSUL_HTTP_TOKEN_FILE"), "")
|
|
|
|
flags.StringVar(&c.consul.tls.ClientCert, "consul-client-cert", os.Getenv("CONSUL_CLIENT_CERT"), "")
|
|
|
|
flags.StringVar(&c.consul.tls.ClientKey, "consul-client-key", os.Getenv("CONSUL_CLIENT_KEY"), "")
|
|
|
|
flags.StringVar(&c.consul.tls.CACert, "consul-ca-cert", os.Getenv("CONSUL_CACERT"), "")
|
|
|
|
flags.StringVar(&c.consul.tls.CAPath, "consul-ca-path", os.Getenv("CONSUL_CAPATH"), "")
|
|
|
|
|
|
|
|
c.vault = &external{tls: &api.TLSConfig{}}
|
|
|
|
flags.StringVar(&c.vault.addrVal, "vault-address", os.Getenv("VAULT_ADDR"), "")
|
|
|
|
flags.StringVar(&c.vault.tokenVal, "vault-token", os.Getenv("VAULT_TOKEN"), "")
|
|
|
|
flags.StringVar(&c.vault.tls.CACert, "vault-ca-cert", os.Getenv("VAULT_CACERT"), "")
|
|
|
|
flags.StringVar(&c.vault.tls.CAPath, "vault-ca-path", os.Getenv("VAULT_CAPATH"), "")
|
|
|
|
flags.StringVar(&c.vault.tls.ClientCert, "vault-client-cert", os.Getenv("VAULT_CLIENT_CERT"), "")
|
|
|
|
flags.StringVar(&c.vault.tls.ClientKey, "vault-client-key", os.Getenv("VAULT_CLIENT_KEY"), "")
|
2020-06-25 16:51:23 +00:00
|
|
|
|
|
|
|
if err := flags.Parse(args); err != nil {
|
2020-10-06 02:30:01 +00:00
|
|
|
c.Ui.Error(fmt.Sprintf("Error parsing arguments: %q", err))
|
2020-06-25 16:51:23 +00:00
|
|
|
return 1
|
|
|
|
}
|
|
|
|
|
|
|
|
// Parse the time durations
|
|
|
|
d, err := time.ParseDuration(duration)
|
|
|
|
if err != nil {
|
|
|
|
c.Ui.Error(fmt.Sprintf("Error parsing duration: %s: %s", duration, err.Error()))
|
|
|
|
return 1
|
|
|
|
}
|
|
|
|
c.duration = d
|
|
|
|
|
|
|
|
i, err := time.ParseDuration(interval)
|
|
|
|
if err != nil {
|
|
|
|
c.Ui.Error(fmt.Sprintf("Error parsing interval: %s: %s", interval, err.Error()))
|
|
|
|
return 1
|
|
|
|
}
|
|
|
|
c.interval = i
|
|
|
|
|
|
|
|
args = flags.Args()
|
|
|
|
if l := len(args); l != 0 {
|
|
|
|
c.Ui.Error("This command takes no arguments")
|
|
|
|
c.Ui.Error(commandErrorText(c))
|
|
|
|
return 1
|
|
|
|
}
|
|
|
|
|
|
|
|
client, err := c.Meta.Client()
|
|
|
|
if err != nil {
|
|
|
|
c.Ui.Error(fmt.Sprintf("Error initializing client: %s", err.Error()))
|
|
|
|
return 1
|
|
|
|
}
|
|
|
|
|
|
|
|
// Resolve node prefixes
|
|
|
|
for _, id := range argNodes(nodeIDs) {
|
|
|
|
id = sanitizeUUIDPrefix(id)
|
|
|
|
nodes, _, err := client.Nodes().PrefixList(id)
|
|
|
|
if err != nil {
|
|
|
|
c.Ui.Error(fmt.Sprintf("Error querying node info: %s", err))
|
|
|
|
return 1
|
|
|
|
}
|
|
|
|
// Return error if no nodes are found
|
|
|
|
if len(nodes) == 0 {
|
|
|
|
c.Ui.Error(fmt.Sprintf("No node(s) with prefix %q found", id))
|
|
|
|
return 1
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, n := range nodes {
|
|
|
|
c.nodeIDs = append(c.nodeIDs, n.ID)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-14 19:16:10 +00:00
|
|
|
// Resolve servers
|
|
|
|
members, err := client.Agent().Members()
|
|
|
|
c.writeJSON("version", "members.json", members, err)
|
|
|
|
// We always write the error to the file, but don't range if no members found
|
|
|
|
if serverIDs == "all" && members != nil {
|
|
|
|
// Special case to capture from all servers
|
|
|
|
for _, member := range members.Members {
|
|
|
|
c.serverIDs = append(c.serverIDs, member.Name)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
for _, id := range argNodes(serverIDs) {
|
|
|
|
c.serverIDs = append(c.serverIDs, id)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Return error if servers were specified but not found
|
|
|
|
if len(serverIDs) > 0 && len(c.serverIDs) == 0 {
|
|
|
|
c.Ui.Error(fmt.Sprintf("Failed to retrieve servers, 0 members found in list: %s", serverIDs))
|
|
|
|
return 1
|
2020-06-25 16:51:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
c.manifest = make([]string, 0)
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
c.ctx = ctx
|
|
|
|
c.cancel = cancel
|
2020-08-11 17:14:28 +00:00
|
|
|
c.trap()
|
2020-06-25 16:51:23 +00:00
|
|
|
|
|
|
|
format := "2006-01-02-150405Z"
|
|
|
|
c.timestamp = time.Now().UTC().Format(format)
|
|
|
|
stamped := "nomad-debug-" + c.timestamp
|
|
|
|
|
2020-08-11 17:14:28 +00:00
|
|
|
c.Ui.Output("Starting debugger and capturing cluster data...")
|
2020-10-14 19:16:10 +00:00
|
|
|
c.Ui.Output(fmt.Sprintf("Capturing from servers: %v", c.serverIDs))
|
|
|
|
c.Ui.Output(fmt.Sprintf("Capturing from client nodes: %v", c.nodeIDs))
|
2020-08-11 17:14:28 +00:00
|
|
|
|
2020-08-31 18:13:03 +00:00
|
|
|
c.Ui.Output(fmt.Sprintf(" Interval: '%s'", interval))
|
|
|
|
c.Ui.Output(fmt.Sprintf(" Duration: '%s'", duration))
|
2020-08-11 17:14:28 +00:00
|
|
|
|
|
|
|
// Create the output path
|
2020-06-25 16:51:23 +00:00
|
|
|
var tmp string
|
|
|
|
if output != "" {
|
|
|
|
tmp = filepath.Join(output, stamped)
|
|
|
|
_, err := os.Stat(tmp)
|
|
|
|
if !os.IsNotExist(err) {
|
|
|
|
c.Ui.Error("Output directory already exists")
|
|
|
|
return 2
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
tmp, err = ioutil.TempDir(os.TempDir(), stamped)
|
|
|
|
if err != nil {
|
|
|
|
c.Ui.Error(fmt.Sprintf("Error creating tmp directory: %s", err.Error()))
|
|
|
|
return 2
|
|
|
|
}
|
|
|
|
defer os.RemoveAll(tmp)
|
|
|
|
}
|
|
|
|
|
|
|
|
c.collectDir = tmp
|
|
|
|
|
|
|
|
err = c.collect(client)
|
|
|
|
if err != nil {
|
|
|
|
c.Ui.Error(fmt.Sprintf("Error collecting data: %s", err.Error()))
|
|
|
|
return 2
|
|
|
|
}
|
|
|
|
|
|
|
|
c.writeManifest()
|
|
|
|
|
|
|
|
if output != "" {
|
|
|
|
c.Ui.Output(fmt.Sprintf("Created debug directory: %s", c.collectDir))
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
|
|
|
archiveFile := stamped + ".tar.gz"
|
2020-08-11 17:14:28 +00:00
|
|
|
err = TarCZF(archiveFile, tmp, stamped)
|
2020-06-25 16:51:23 +00:00
|
|
|
if err != nil {
|
|
|
|
c.Ui.Error(fmt.Sprintf("Error creating archive: %s", err.Error()))
|
|
|
|
return 2
|
|
|
|
}
|
|
|
|
|
|
|
|
c.Ui.Output(fmt.Sprintf("Created debug archive: %s", archiveFile))
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
|
|
|
// collect collects data from our endpoints and writes the archive bundle
|
2020-08-11 19:39:44 +00:00
|
|
|
func (c *OperatorDebugCommand) collect(client *api.Client) error {
|
2020-06-25 16:51:23 +00:00
|
|
|
// Version contains cluster meta information
|
|
|
|
dir := "version"
|
|
|
|
err := c.mkdir(dir)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
self, err := client.Agent().Self()
|
2020-08-11 17:14:28 +00:00
|
|
|
c.writeJSON(dir, "agent-self.json", self, err)
|
2020-06-25 16:51:23 +00:00
|
|
|
|
|
|
|
// Fetch data directly from consul and vault. Ignore errors
|
|
|
|
var consul, vault string
|
|
|
|
|
2020-08-11 17:14:28 +00:00
|
|
|
if self != nil {
|
|
|
|
r, ok := self.Config["Consul"]
|
|
|
|
if ok {
|
|
|
|
m, ok := r.(map[string]interface{})
|
|
|
|
if ok {
|
|
|
|
|
|
|
|
raw := m["Addr"]
|
|
|
|
consul, _ = raw.(string)
|
|
|
|
raw = m["EnableSSL"]
|
|
|
|
ssl, _ := raw.(bool)
|
|
|
|
if ssl {
|
|
|
|
consul = "https://" + consul
|
|
|
|
} else {
|
|
|
|
consul = "http://" + consul
|
|
|
|
}
|
|
|
|
}
|
2020-06-25 16:51:23 +00:00
|
|
|
}
|
|
|
|
|
2020-08-11 17:14:28 +00:00
|
|
|
r, ok = self.Config["Vault"]
|
|
|
|
if ok {
|
|
|
|
m, ok := r.(map[string]interface{})
|
|
|
|
if ok {
|
|
|
|
raw := m["Addr"]
|
|
|
|
vault, _ = raw.(string)
|
|
|
|
}
|
|
|
|
}
|
2020-06-25 16:51:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
c.collectConsul(dir, consul)
|
|
|
|
c.collectVault(dir, vault)
|
2020-08-11 17:14:28 +00:00
|
|
|
c.collectAgentHosts(client)
|
|
|
|
c.collectPprofs(client)
|
2020-06-25 16:51:23 +00:00
|
|
|
|
|
|
|
c.startMonitors(client)
|
|
|
|
c.collectPeriodic(client)
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// path returns platform specific paths in the tmp root directory
|
2020-08-11 19:39:44 +00:00
|
|
|
func (c *OperatorDebugCommand) path(paths ...string) string {
|
2020-06-25 16:51:23 +00:00
|
|
|
ps := []string{c.collectDir}
|
|
|
|
ps = append(ps, paths...)
|
|
|
|
return filepath.Join(ps...)
|
|
|
|
}
|
|
|
|
|
|
|
|
// mkdir creates directories in the tmp root directory
|
2020-08-11 19:39:44 +00:00
|
|
|
func (c *OperatorDebugCommand) mkdir(paths ...string) error {
|
2020-06-25 16:51:23 +00:00
|
|
|
return os.MkdirAll(c.path(paths...), 0755)
|
|
|
|
}
|
|
|
|
|
|
|
|
// startMonitors starts go routines for each node and client
|
2020-08-11 19:39:44 +00:00
|
|
|
func (c *OperatorDebugCommand) startMonitors(client *api.Client) {
|
2020-06-25 16:51:23 +00:00
|
|
|
for _, id := range c.nodeIDs {
|
|
|
|
go c.startMonitor("client", "node_id", id, client)
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, id := range c.serverIDs {
|
|
|
|
go c.startMonitor("server", "server_id", id, client)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// startMonitor starts one monitor api request, writing to a file. It blocks and should be
|
|
|
|
// called in a go routine. Errors are ignored, we want to build the archive even if a node
|
|
|
|
// is unavailable
|
2020-08-11 19:39:44 +00:00
|
|
|
func (c *OperatorDebugCommand) startMonitor(path, idKey, nodeID string, client *api.Client) {
|
2020-06-25 16:51:23 +00:00
|
|
|
c.mkdir(path, nodeID)
|
|
|
|
fh, err := os.Create(c.path(path, nodeID, "monitor.log"))
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
defer fh.Close()
|
|
|
|
|
|
|
|
qo := api.QueryOptions{
|
|
|
|
Params: map[string]string{
|
|
|
|
idKey: nodeID,
|
|
|
|
"log_level": c.logLevel,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
outCh, errCh := client.Agent().Monitor(c.ctx.Done(), &qo)
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case out := <-outCh:
|
|
|
|
if out == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
fh.Write(out.Data)
|
|
|
|
fh.WriteString("\n")
|
|
|
|
|
|
|
|
case err := <-errCh:
|
|
|
|
fh.WriteString(fmt.Sprintf("monitor: %s\n", err.Error()))
|
|
|
|
return
|
|
|
|
|
|
|
|
case <-c.ctx.Done():
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-02 13:51:25 +00:00
|
|
|
// collectAgentHosts calls collectAgentHost for each selected node
|
2020-08-11 19:39:44 +00:00
|
|
|
func (c *OperatorDebugCommand) collectAgentHosts(client *api.Client) {
|
2020-07-02 13:51:25 +00:00
|
|
|
for _, n := range c.nodeIDs {
|
|
|
|
c.collectAgentHost("client", n, client)
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, n := range c.serverIDs {
|
|
|
|
c.collectAgentHost("server", n, client)
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
// collectAgentHost gets the agent host data
|
2020-08-11 19:39:44 +00:00
|
|
|
func (c *OperatorDebugCommand) collectAgentHost(path, id string, client *api.Client) {
|
2020-07-02 13:51:25 +00:00
|
|
|
var host *api.HostDataResponse
|
|
|
|
var err error
|
|
|
|
if path == "server" {
|
|
|
|
host, err = client.Agent().Host(id, "", nil)
|
|
|
|
} else {
|
|
|
|
host, err = client.Agent().Host("", id, nil)
|
|
|
|
}
|
|
|
|
|
|
|
|
path = filepath.Join(path, id)
|
2020-08-28 15:58:06 +00:00
|
|
|
c.mkdir(path)
|
2020-07-02 13:51:25 +00:00
|
|
|
|
2020-08-11 17:14:28 +00:00
|
|
|
c.writeJSON(path, "agent-host.json", host, err)
|
2020-07-02 13:51:25 +00:00
|
|
|
}
|
|
|
|
|
2020-06-25 16:51:23 +00:00
|
|
|
// collectPprofs captures the /agent/pprof for each listed node
|
2020-08-11 19:39:44 +00:00
|
|
|
func (c *OperatorDebugCommand) collectPprofs(client *api.Client) {
|
2020-06-25 16:51:23 +00:00
|
|
|
for _, n := range c.nodeIDs {
|
|
|
|
c.collectPprof("client", n, client)
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, n := range c.serverIDs {
|
|
|
|
c.collectPprof("server", n, client)
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
// collectPprof captures pprof data for the node
|
2020-08-11 19:39:44 +00:00
|
|
|
func (c *OperatorDebugCommand) collectPprof(path, id string, client *api.Client) {
|
2020-06-25 16:51:23 +00:00
|
|
|
opts := api.PprofOptions{Seconds: 1}
|
|
|
|
if path == "server" {
|
|
|
|
opts.ServerID = id
|
|
|
|
} else {
|
|
|
|
opts.NodeID = id
|
|
|
|
}
|
|
|
|
|
|
|
|
path = filepath.Join(path, id)
|
2020-08-11 17:14:28 +00:00
|
|
|
err := c.mkdir(path)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
2020-06-25 16:51:23 +00:00
|
|
|
|
|
|
|
bs, err := client.Agent().CPUProfile(opts, nil)
|
|
|
|
if err == nil {
|
|
|
|
c.writeBytes(path, "profile.prof", bs)
|
|
|
|
}
|
|
|
|
|
|
|
|
bs, err = client.Agent().Trace(opts, nil)
|
|
|
|
if err == nil {
|
|
|
|
c.writeBytes(path, "trace.prof", bs)
|
|
|
|
}
|
|
|
|
|
|
|
|
bs, err = client.Agent().Lookup("goroutine", opts, nil)
|
|
|
|
if err == nil {
|
|
|
|
c.writeBytes(path, "goroutine.prof", bs)
|
|
|
|
}
|
2020-10-14 19:16:10 +00:00
|
|
|
|
|
|
|
// Gather goroutine text output - debug type 1
|
|
|
|
// debug type 1 writes the legacy text format for human readable output
|
|
|
|
opts.Debug = 1
|
|
|
|
bs, err = client.Agent().Lookup("goroutine", opts, nil)
|
|
|
|
if err == nil {
|
|
|
|
c.writeBytes(path, "goroutine-debug1.txt", bs)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Gather goroutine text output - debug type 2
|
|
|
|
// When printing the "goroutine" profile, debug=2 means to print the goroutine
|
|
|
|
// stacks in the same form that a Go program uses when dying due to an unrecovered panic.
|
|
|
|
opts.Debug = 2
|
|
|
|
bs, err = client.Agent().Lookup("goroutine", opts, nil)
|
|
|
|
if err == nil {
|
|
|
|
c.writeBytes(path, "goroutine-debug2.txt", bs)
|
|
|
|
}
|
2020-06-25 16:51:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// collectPeriodic runs for duration, capturing the cluster state every interval. It flushes and stops
|
|
|
|
// the monitor requests
|
2020-08-11 19:39:44 +00:00
|
|
|
func (c *OperatorDebugCommand) collectPeriodic(client *api.Client) {
|
2020-06-25 16:51:23 +00:00
|
|
|
duration := time.After(c.duration)
|
|
|
|
// Set interval to 0 so that we immediately execute, wait the interval next time
|
|
|
|
interval := time.After(0 * time.Second)
|
|
|
|
var intervalCount int
|
2020-08-11 17:14:28 +00:00
|
|
|
var name, dir string
|
2020-06-25 16:51:23 +00:00
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-duration:
|
|
|
|
c.cancel()
|
|
|
|
return
|
|
|
|
|
|
|
|
case <-interval:
|
2020-08-11 17:14:28 +00:00
|
|
|
name = fmt.Sprintf("%04d", intervalCount)
|
|
|
|
dir = filepath.Join("nomad", name)
|
|
|
|
c.Ui.Output(fmt.Sprintf(" Capture interval %s", name))
|
2020-06-25 16:51:23 +00:00
|
|
|
c.collectNomad(dir, client)
|
2020-08-11 17:14:28 +00:00
|
|
|
c.collectOperator(dir, client)
|
2020-06-25 16:51:23 +00:00
|
|
|
interval = time.After(c.interval)
|
|
|
|
intervalCount += 1
|
|
|
|
|
|
|
|
case <-c.ctx.Done():
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-11 17:14:28 +00:00
|
|
|
// collectOperator captures some cluster meta information
|
2020-08-11 19:39:44 +00:00
|
|
|
func (c *OperatorDebugCommand) collectOperator(dir string, client *api.Client) {
|
2020-08-11 17:14:28 +00:00
|
|
|
rc, err := client.Operator().RaftGetConfiguration(nil)
|
|
|
|
c.writeJSON(dir, "operator-raft.json", rc, err)
|
|
|
|
|
|
|
|
sc, _, err := client.Operator().SchedulerGetConfiguration(nil)
|
|
|
|
c.writeJSON(dir, "operator-scheduler.json", sc, err)
|
|
|
|
|
|
|
|
ah, _, err := client.Operator().AutopilotServerHealth(nil)
|
|
|
|
c.writeJSON(dir, "operator-autopilot-health.json", ah, err)
|
2020-08-31 17:22:23 +00:00
|
|
|
|
|
|
|
lic, _, err := client.Operator().LicenseGet(nil)
|
|
|
|
c.writeJSON(dir, "license.json", lic, err)
|
2020-08-11 17:14:28 +00:00
|
|
|
}
|
|
|
|
|
2020-06-25 16:51:23 +00:00
|
|
|
// collectNomad captures the nomad cluster state
|
2020-08-11 19:39:44 +00:00
|
|
|
func (c *OperatorDebugCommand) collectNomad(dir string, client *api.Client) error {
|
2020-06-25 16:51:23 +00:00
|
|
|
err := c.mkdir(dir)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
var qo *api.QueryOptions
|
|
|
|
|
|
|
|
js, _, err := client.Jobs().List(qo)
|
2020-08-11 17:14:28 +00:00
|
|
|
c.writeJSON(dir, "jobs.json", js, err)
|
2020-06-25 16:51:23 +00:00
|
|
|
|
|
|
|
ds, _, err := client.Deployments().List(qo)
|
2020-08-11 17:14:28 +00:00
|
|
|
c.writeJSON(dir, "deployments.json", ds, err)
|
2020-06-25 16:51:23 +00:00
|
|
|
|
|
|
|
es, _, err := client.Evaluations().List(qo)
|
2020-08-11 17:14:28 +00:00
|
|
|
c.writeJSON(dir, "evaluations.json", es, err)
|
2020-06-25 16:51:23 +00:00
|
|
|
|
|
|
|
as, _, err := client.Allocations().List(qo)
|
2020-08-11 17:14:28 +00:00
|
|
|
c.writeJSON(dir, "allocations.json", as, err)
|
2020-06-25 16:51:23 +00:00
|
|
|
|
|
|
|
ns, _, err := client.Nodes().List(qo)
|
2020-08-11 17:14:28 +00:00
|
|
|
c.writeJSON(dir, "nodes.json", ns, err)
|
2020-06-25 16:51:23 +00:00
|
|
|
|
|
|
|
ps, _, err := client.CSIPlugins().List(qo)
|
2020-08-11 17:14:28 +00:00
|
|
|
c.writeJSON(dir, "plugins.json", ps, err)
|
2020-06-25 16:51:23 +00:00
|
|
|
|
|
|
|
vs, _, err := client.CSIVolumes().List(qo)
|
2020-08-11 17:14:28 +00:00
|
|
|
c.writeJSON(dir, "volumes.json", vs, err)
|
2020-06-25 16:51:23 +00:00
|
|
|
|
2020-10-14 19:16:10 +00:00
|
|
|
if metricBytes, err := client.Operator().Metrics(qo); err != nil {
|
|
|
|
c.writeError(dir, "metrics.json", err)
|
|
|
|
} else {
|
|
|
|
c.writeBytes(dir, "metrics.json", metricBytes)
|
|
|
|
}
|
2020-10-06 02:30:01 +00:00
|
|
|
|
2020-06-25 16:51:23 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// collectConsul calls the Consul API directly to collect data
|
2020-08-11 19:39:44 +00:00
|
|
|
func (c *OperatorDebugCommand) collectConsul(dir, consul string) error {
|
2020-08-11 17:14:28 +00:00
|
|
|
addr := c.consul.addr(consul)
|
|
|
|
if addr == "" {
|
2020-06-25 16:51:23 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-08-11 17:14:28 +00:00
|
|
|
client := defaultHttpClient()
|
|
|
|
api.ConfigureTLS(client, c.consul.tls)
|
2020-06-25 16:51:23 +00:00
|
|
|
|
2020-08-11 17:14:28 +00:00
|
|
|
req, _ := http.NewRequest("GET", addr+"/v1/agent/self", nil)
|
|
|
|
req.Header.Add("X-Consul-Token", c.consul.token())
|
2020-06-25 16:51:23 +00:00
|
|
|
req.Header.Add("User-Agent", userAgent)
|
|
|
|
resp, err := client.Do(req)
|
|
|
|
c.writeBody(dir, "consul-agent-self.json", resp, err)
|
|
|
|
|
2020-08-11 17:14:28 +00:00
|
|
|
req, _ = http.NewRequest("GET", addr+"/v1/agent/members", nil)
|
|
|
|
req.Header.Add("X-Consul-Token", c.consul.token())
|
2020-06-25 16:51:23 +00:00
|
|
|
req.Header.Add("User-Agent", userAgent)
|
|
|
|
resp, err = client.Do(req)
|
|
|
|
c.writeBody(dir, "consul-agent-members.json", resp, err)
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// collectVault calls the Vault API directly to collect data
|
2020-08-11 19:39:44 +00:00
|
|
|
func (c *OperatorDebugCommand) collectVault(dir, vault string) error {
|
2020-08-11 17:14:28 +00:00
|
|
|
addr := c.vault.addr(vault)
|
|
|
|
if addr == "" {
|
2020-06-25 16:51:23 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-08-11 17:14:28 +00:00
|
|
|
client := defaultHttpClient()
|
|
|
|
api.ConfigureTLS(client, c.vault.tls)
|
2020-06-25 16:51:23 +00:00
|
|
|
|
2020-08-11 17:14:28 +00:00
|
|
|
req, _ := http.NewRequest("GET", addr+"/sys/health", nil)
|
|
|
|
req.Header.Add("X-Vault-Token", c.vault.token())
|
2020-06-25 16:51:23 +00:00
|
|
|
req.Header.Add("User-Agent", userAgent)
|
|
|
|
resp, err := client.Do(req)
|
|
|
|
c.writeBody(dir, "vault-sys-health.json", resp, err)
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// writeBytes writes a file to the archive, recording it in the manifest
|
2020-08-11 19:39:44 +00:00
|
|
|
func (c *OperatorDebugCommand) writeBytes(dir, file string, data []byte) error {
|
2020-10-14 19:16:10 +00:00
|
|
|
relativePath := filepath.Join(dir, file)
|
|
|
|
c.manifest = append(c.manifest, relativePath)
|
|
|
|
dirPath := filepath.Join(c.collectDir, dir)
|
|
|
|
filePath := filepath.Join(dirPath, file)
|
|
|
|
|
|
|
|
// Ensure parent directories exist
|
|
|
|
err := os.MkdirAll(dirPath, os.ModePerm)
|
|
|
|
if err != nil {
|
|
|
|
// Display error immediately -- may not see this if files aren't written
|
|
|
|
c.Ui.Error(fmt.Sprintf("failed to create parent directories of \"%s\": %s", dirPath, err.Error()))
|
|
|
|
return err
|
|
|
|
}
|
2020-06-25 16:51:23 +00:00
|
|
|
|
2020-10-14 19:16:10 +00:00
|
|
|
// Create the file
|
|
|
|
fh, err := os.Create(filePath)
|
2020-06-25 16:51:23 +00:00
|
|
|
if err != nil {
|
2020-10-14 19:16:10 +00:00
|
|
|
// Display error immediately -- may not see this if files aren't written
|
|
|
|
c.Ui.Error(fmt.Sprintf("failed to create file \"%s\": %s", filePath, err.Error()))
|
2020-06-25 16:51:23 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer fh.Close()
|
|
|
|
|
|
|
|
_, err = fh.Write(data)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// writeJSON writes JSON responses from the Nomad API calls to the archive
|
2020-08-11 19:39:44 +00:00
|
|
|
func (c *OperatorDebugCommand) writeJSON(dir, file string, data interface{}, err error) error {
|
2020-08-11 17:14:28 +00:00
|
|
|
if err != nil {
|
|
|
|
return c.writeError(dir, file, err)
|
|
|
|
}
|
2020-06-25 16:51:23 +00:00
|
|
|
bytes, err := json.Marshal(data)
|
2020-08-11 17:14:28 +00:00
|
|
|
if err != nil {
|
|
|
|
return c.writeError(dir, file, err)
|
|
|
|
}
|
|
|
|
return c.writeBytes(dir, file, bytes)
|
|
|
|
}
|
|
|
|
|
|
|
|
// writeError writes a JSON error object to capture errors in the debug bundle without
|
|
|
|
// reporting
|
2020-08-11 19:39:44 +00:00
|
|
|
func (c *OperatorDebugCommand) writeError(dir, file string, err error) error {
|
2020-08-11 17:14:28 +00:00
|
|
|
bytes, err := json.Marshal(errorWrapper{Error: err.Error()})
|
2020-06-25 16:51:23 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return c.writeBytes(dir, file, bytes)
|
|
|
|
}
|
|
|
|
|
2020-08-11 17:14:28 +00:00
|
|
|
type errorWrapper struct {
|
|
|
|
Error string
|
|
|
|
}
|
|
|
|
|
2020-06-25 16:51:23 +00:00
|
|
|
// writeBody is a helper that writes the body of an http.Response to the archive
|
2020-08-11 19:39:44 +00:00
|
|
|
func (c *OperatorDebugCommand) writeBody(dir, file string, resp *http.Response, err error) {
|
2020-06-25 16:51:23 +00:00
|
|
|
if err != nil {
|
2020-08-11 17:14:28 +00:00
|
|
|
c.writeError(dir, file, err)
|
2020-06-25 16:51:23 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if resp.ContentLength == 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-08-11 17:14:28 +00:00
|
|
|
defer resp.Body.Close()
|
|
|
|
|
2020-06-25 16:51:23 +00:00
|
|
|
body, err := ioutil.ReadAll(resp.Body)
|
|
|
|
if err != nil {
|
2020-08-11 17:14:28 +00:00
|
|
|
c.writeError(dir, file, err)
|
2020-06-25 16:51:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
c.writeBytes(dir, file, body)
|
|
|
|
}
|
|
|
|
|
|
|
|
// writeManifest creates the index files
|
2020-08-11 19:39:44 +00:00
|
|
|
func (c *OperatorDebugCommand) writeManifest() error {
|
2020-06-25 16:51:23 +00:00
|
|
|
// Write the JSON
|
|
|
|
path := filepath.Join(c.collectDir, "index.json")
|
|
|
|
jsonFh, err := os.Create(path)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer jsonFh.Close()
|
|
|
|
|
|
|
|
json.NewEncoder(jsonFh).Encode(c.manifest)
|
|
|
|
|
|
|
|
// Write the HTML
|
|
|
|
path = filepath.Join(c.collectDir, "index.html")
|
|
|
|
htmlFh, err := os.Create(path)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer htmlFh.Close()
|
|
|
|
|
|
|
|
head, _ := template.New("head").Parse("<html><head><title>{{.}}</title></head>\n<body><h1>{{.}}</h1>\n<ul>")
|
|
|
|
line, _ := template.New("line").Parse("<li><a href=\"{{.}}\">{{.}}</a></li>\n")
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("%v", err)
|
|
|
|
}
|
|
|
|
tail := "</ul></body></html>\n"
|
|
|
|
|
|
|
|
head.Execute(htmlFh, c.timestamp)
|
|
|
|
for _, f := range c.manifest {
|
|
|
|
line.Execute(htmlFh, f)
|
|
|
|
}
|
|
|
|
htmlFh.WriteString(tail)
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// trap captures signals, and closes stopCh
|
2020-08-11 19:39:44 +00:00
|
|
|
func (c *OperatorDebugCommand) trap() {
|
2020-06-25 16:51:23 +00:00
|
|
|
sigCh := make(chan os.Signal, 1)
|
|
|
|
signal.Notify(sigCh,
|
|
|
|
syscall.SIGHUP,
|
|
|
|
syscall.SIGINT,
|
|
|
|
syscall.SIGTERM,
|
|
|
|
syscall.SIGQUIT)
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
<-sigCh
|
|
|
|
c.cancel()
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
|
|
|
// TarCZF, like the tar command, recursively builds a gzip compressed tar archive from a
|
2020-08-11 17:14:28 +00:00
|
|
|
// directory. If not empty, all files in the bundle are prefixed with the target path
|
|
|
|
func TarCZF(archive string, src, target string) error {
|
2020-06-25 16:51:23 +00:00
|
|
|
// ensure the src actually exists before trying to tar it
|
|
|
|
if _, err := os.Stat(src); err != nil {
|
|
|
|
return fmt.Errorf("Unable to tar files - %v", err.Error())
|
|
|
|
}
|
|
|
|
|
|
|
|
// create the archive
|
|
|
|
fh, err := os.Create(archive)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer fh.Close()
|
|
|
|
|
|
|
|
zz := gzip.NewWriter(fh)
|
|
|
|
defer zz.Close()
|
|
|
|
|
|
|
|
tw := tar.NewWriter(zz)
|
|
|
|
defer tw.Close()
|
|
|
|
|
|
|
|
// tar
|
|
|
|
return filepath.Walk(src, func(file string, fi os.FileInfo, err error) error {
|
|
|
|
|
|
|
|
// return on any error
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if !fi.Mode().IsRegular() {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
header, err := tar.FileInfoHeader(fi, fi.Name())
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// remove leading path to the src, so files are relative to the archive
|
2020-08-11 17:14:28 +00:00
|
|
|
path := strings.Replace(file, src, "", -1)
|
|
|
|
if target != "" {
|
|
|
|
path = filepath.Join([]string{target, path}...)
|
|
|
|
}
|
|
|
|
path = strings.TrimPrefix(path, string(filepath.Separator))
|
|
|
|
|
|
|
|
header.Name = path
|
2020-06-25 16:51:23 +00:00
|
|
|
|
|
|
|
if err := tw.WriteHeader(header); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// copy the file contents
|
|
|
|
f, err := os.Open(file)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if _, err := io.Copy(tw, f); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
f.Close()
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// argNodes splits node ids from the command line by ","
|
|
|
|
func argNodes(input string) []string {
|
|
|
|
ns := strings.Split(input, ",")
|
|
|
|
var out []string
|
|
|
|
for _, n := range ns {
|
|
|
|
s := strings.TrimSpace(n)
|
|
|
|
if s == "" {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
out = append(out, s)
|
|
|
|
}
|
|
|
|
return out
|
|
|
|
}
|
2020-08-11 17:14:28 +00:00
|
|
|
|
|
|
|
// external holds address configuration for Consul and Vault APIs
|
|
|
|
type external struct {
|
|
|
|
tls *api.TLSConfig
|
|
|
|
addrVal string
|
|
|
|
auth string
|
|
|
|
ssl bool
|
|
|
|
tokenVal string
|
|
|
|
tokenFile string
|
|
|
|
}
|
|
|
|
|
|
|
|
func (e *external) addr(defaultAddr string) string {
|
|
|
|
if e.addrVal == "" {
|
|
|
|
return defaultAddr
|
|
|
|
}
|
|
|
|
|
|
|
|
if !e.ssl {
|
|
|
|
if strings.HasPrefix(e.addrVal, "http:") {
|
|
|
|
return e.addrVal
|
|
|
|
}
|
|
|
|
return "http://" + e.addrVal
|
|
|
|
}
|
|
|
|
|
|
|
|
if strings.HasPrefix(e.addrVal, "https:") {
|
|
|
|
return e.addrVal
|
|
|
|
}
|
|
|
|
|
|
|
|
if strings.HasPrefix(e.addrVal, "http:") {
|
|
|
|
return "https:" + e.addrVal[5:]
|
|
|
|
}
|
|
|
|
|
|
|
|
return "https://" + e.addrVal
|
|
|
|
}
|
|
|
|
|
|
|
|
func (e *external) token() string {
|
|
|
|
if e.tokenVal != "" {
|
|
|
|
return e.tokenVal
|
|
|
|
}
|
|
|
|
|
|
|
|
if e.tokenFile != "" {
|
|
|
|
bs, err := ioutil.ReadFile(e.tokenFile)
|
|
|
|
if err == nil {
|
|
|
|
return strings.TrimSpace(string(bs))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
|
|
|
|
// defaultHttpClient configures a basic httpClient
|
|
|
|
func defaultHttpClient() *http.Client {
|
|
|
|
httpClient := cleanhttp.DefaultClient()
|
|
|
|
transport := httpClient.Transport.(*http.Transport)
|
|
|
|
transport.TLSHandshakeTimeout = 10 * time.Second
|
|
|
|
transport.TLSClientConfig = &tls.Config{
|
|
|
|
MinVersion: tls.VersionTLS12,
|
|
|
|
}
|
|
|
|
|
|
|
|
return httpClient
|
|
|
|
}
|