2013-12-19 20:18:06 +00:00
|
|
|
package agent
|
|
|
|
|
|
|
|
import (
|
2013-12-21 00:39:32 +00:00
|
|
|
"flag"
|
2013-12-20 01:14:46 +00:00
|
|
|
"fmt"
|
2013-12-21 00:39:32 +00:00
|
|
|
"io"
|
2013-12-30 23:27:41 +00:00
|
|
|
"net"
|
2013-12-21 00:39:32 +00:00
|
|
|
"os"
|
|
|
|
"os/signal"
|
2014-09-02 21:23:43 +00:00
|
|
|
"path/filepath"
|
2014-06-09 18:57:15 +00:00
|
|
|
"regexp"
|
2014-02-23 01:43:12 +00:00
|
|
|
"runtime"
|
2013-12-19 20:18:06 +00:00
|
|
|
"strings"
|
2013-12-21 00:39:32 +00:00
|
|
|
"syscall"
|
|
|
|
"time"
|
2014-06-16 21:36:12 +00:00
|
|
|
|
|
|
|
"github.com/armon/go-metrics"
|
2014-08-21 20:09:13 +00:00
|
|
|
"github.com/hashicorp/consul/watch"
|
2014-09-02 21:23:43 +00:00
|
|
|
"github.com/hashicorp/go-checkpoint"
|
2014-06-16 21:36:12 +00:00
|
|
|
"github.com/hashicorp/go-syslog"
|
|
|
|
"github.com/hashicorp/logutils"
|
|
|
|
"github.com/mitchellh/cli"
|
2013-12-19 20:18:06 +00:00
|
|
|
)
|
|
|
|
|
2013-12-21 00:39:32 +00:00
|
|
|
// gracefulTimeout controls how long we wait before forcefully terminating
|
|
|
|
var gracefulTimeout = 5 * time.Second
|
|
|
|
|
2014-06-09 18:57:15 +00:00
|
|
|
// validDatacenter is used to validate a datacenter
|
|
|
|
var validDatacenter = regexp.MustCompile("^[a-zA-Z0-9_-]+$")
|
|
|
|
|
2014-01-02 23:10:13 +00:00
|
|
|
// Command is a Command implementation that runs a Consul agent.
|
2013-12-19 20:18:06 +00:00
|
|
|
// The command will not end unless a shutdown message is sent on the
|
|
|
|
// ShutdownCh. If two messages are sent on the ShutdownCh it will forcibly
|
|
|
|
// exit.
|
|
|
|
type Command struct {
|
2014-06-06 21:40:22 +00:00
|
|
|
Revision string
|
|
|
|
Version string
|
|
|
|
VersionPrerelease string
|
|
|
|
Ui cli.Ui
|
|
|
|
ShutdownCh <-chan struct{}
|
|
|
|
args []string
|
|
|
|
logFilter *logutils.LevelFilter
|
2014-08-21 20:09:13 +00:00
|
|
|
logOutput io.Writer
|
2014-06-06 21:40:22 +00:00
|
|
|
agent *Agent
|
|
|
|
rpcServer *AgentRPC
|
|
|
|
httpServer *HTTPServer
|
|
|
|
dnsServer *DNSServer
|
2013-12-21 00:39:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// readConfig is responsible for setup of our configuration using
|
|
|
|
// the command line and any file configs
|
|
|
|
func (c *Command) readConfig() *Config {
|
|
|
|
var cmdConfig Config
|
|
|
|
var configFiles []string
|
2014-10-12 17:50:15 +00:00
|
|
|
var retryInterval string
|
2014-11-14 15:02:42 +00:00
|
|
|
var retryWanInterval string
|
2013-12-21 00:39:32 +00:00
|
|
|
cmdFlags := flag.NewFlagSet("agent", flag.ContinueOnError)
|
|
|
|
cmdFlags.Usage = func() { c.Ui.Output(c.Help()) }
|
2014-04-11 22:22:35 +00:00
|
|
|
|
|
|
|
cmdFlags.Var((*AppendSliceValue)(&configFiles), "config-file", "json file to read config from")
|
|
|
|
cmdFlags.Var((*AppendSliceValue)(&configFiles), "config-dir", "directory of json files to read")
|
|
|
|
|
2013-12-21 00:39:32 +00:00
|
|
|
cmdFlags.StringVar(&cmdConfig.LogLevel, "log-level", "", "log level")
|
|
|
|
cmdFlags.StringVar(&cmdConfig.NodeName, "node", "", "node name")
|
|
|
|
cmdFlags.StringVar(&cmdConfig.Datacenter, "dc", "", "node datacenter")
|
2014-04-11 22:22:35 +00:00
|
|
|
cmdFlags.StringVar(&cmdConfig.DataDir, "data-dir", "", "path to the data directory")
|
2014-04-23 19:37:53 +00:00
|
|
|
cmdFlags.StringVar(&cmdConfig.UiDir, "ui-dir", "", "path to the web UI directory")
|
2014-05-06 03:29:50 +00:00
|
|
|
cmdFlags.StringVar(&cmdConfig.PidFile, "pid-file", "", "path to file to store PID")
|
2014-08-22 22:08:15 +00:00
|
|
|
cmdFlags.StringVar(&cmdConfig.EncryptKey, "encrypt", "", "gossip encryption key")
|
2014-04-11 22:22:35 +00:00
|
|
|
|
2013-12-25 00:48:07 +00:00
|
|
|
cmdFlags.BoolVar(&cmdConfig.Server, "server", false, "run agent as server")
|
|
|
|
cmdFlags.BoolVar(&cmdConfig.Bootstrap, "bootstrap", false, "enable server bootstrap mode")
|
2014-06-20 00:08:48 +00:00
|
|
|
cmdFlags.IntVar(&cmdConfig.BootstrapExpect, "bootstrap-expect", 0, "enable automatic bootstrap via expect mode")
|
2014-04-11 22:22:35 +00:00
|
|
|
|
|
|
|
cmdFlags.StringVar(&cmdConfig.ClientAddr, "client", "", "address to bind client listeners to (DNS, HTTP, RPC)")
|
|
|
|
cmdFlags.StringVar(&cmdConfig.BindAddr, "bind", "", "address to bind server listeners to")
|
2014-05-16 17:49:36 +00:00
|
|
|
cmdFlags.StringVar(&cmdConfig.AdvertiseAddr, "advertise", "", "address to advertise instead of bind addr")
|
2014-04-11 22:22:35 +00:00
|
|
|
|
2014-03-09 22:57:03 +00:00
|
|
|
cmdFlags.IntVar(&cmdConfig.Protocol, "protocol", -1, "protocol version")
|
2014-04-11 22:22:35 +00:00
|
|
|
|
2014-05-21 19:06:03 +00:00
|
|
|
cmdFlags.BoolVar(&cmdConfig.EnableSyslog, "syslog", false,
|
|
|
|
"enable logging to syslog facility")
|
2014-05-21 19:32:24 +00:00
|
|
|
cmdFlags.BoolVar(&cmdConfig.RejoinAfterLeave, "rejoin", false,
|
|
|
|
"enable re-joining after a previous leave")
|
2014-04-11 23:59:16 +00:00
|
|
|
cmdFlags.Var((*AppendSliceValue)(&cmdConfig.StartJoin), "join",
|
|
|
|
"address of agent to join on startup")
|
2014-11-14 15:02:42 +00:00
|
|
|
cmdFlags.Var((*AppendSliceValue)(&cmdConfig.StartWanJoin), "join-wan",
|
|
|
|
"address of agent to join -wan on startup")
|
2014-10-12 17:50:15 +00:00
|
|
|
cmdFlags.Var((*AppendSliceValue)(&cmdConfig.RetryJoin), "retry-join",
|
|
|
|
"address of agent to join on startup with retry")
|
|
|
|
cmdFlags.IntVar(&cmdConfig.RetryMaxAttempts, "retry-max", 0,
|
|
|
|
"number of retries for joining")
|
|
|
|
cmdFlags.StringVar(&retryInterval, "retry-interval", "",
|
|
|
|
"interval between join attempts")
|
2014-11-14 15:02:42 +00:00
|
|
|
cmdFlags.Var((*AppendSliceValue)(&cmdConfig.RetryWanJoin), "retry-wan-join",
|
|
|
|
"address of agent to join -wan on startup with retry")
|
|
|
|
cmdFlags.IntVar(&cmdConfig.RetryWanMaxAttempts, "retry-wan-max", 0,
|
|
|
|
"number of retries for joining -wan")
|
|
|
|
cmdFlags.StringVar(&retryWanInterval, "retry-wan-interval", "",
|
|
|
|
"interval between join -wan attempts")
|
2014-04-11 23:59:16 +00:00
|
|
|
|
2013-12-21 00:39:32 +00:00
|
|
|
if err := cmdFlags.Parse(c.args); err != nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2014-10-12 17:50:15 +00:00
|
|
|
if retryInterval != "" {
|
|
|
|
dur, err := time.ParseDuration(retryInterval)
|
|
|
|
if err != nil {
|
|
|
|
c.Ui.Error(fmt.Sprintf("Error: %s", err))
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
cmdConfig.RetryInterval = dur
|
|
|
|
}
|
|
|
|
|
2014-11-14 15:02:42 +00:00
|
|
|
if retryWanInterval != "" {
|
|
|
|
dur, err := time.ParseDuration(retryWanInterval)
|
|
|
|
if err != nil {
|
|
|
|
c.Ui.Error(fmt.Sprintf("Error: %s", err))
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
cmdConfig.RetryWanInterval = dur
|
|
|
|
}
|
|
|
|
|
2013-12-21 00:39:32 +00:00
|
|
|
config := DefaultConfig()
|
|
|
|
if len(configFiles) > 0 {
|
|
|
|
fileConfig, err := ReadConfigPaths(configFiles)
|
|
|
|
if err != nil {
|
|
|
|
c.Ui.Error(err.Error())
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
config = MergeConfig(config, fileConfig)
|
|
|
|
}
|
|
|
|
|
|
|
|
config = MergeConfig(config, &cmdConfig)
|
|
|
|
|
|
|
|
if config.NodeName == "" {
|
|
|
|
hostname, err := os.Hostname()
|
|
|
|
if err != nil {
|
|
|
|
c.Ui.Error(fmt.Sprintf("Error determining hostname: %s", err))
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
config.NodeName = hostname
|
|
|
|
}
|
|
|
|
|
|
|
|
if config.EncryptKey != "" {
|
|
|
|
if _, err := config.EncryptBytes(); err != nil {
|
|
|
|
c.Ui.Error(fmt.Sprintf("Invalid encryption key: %s", err))
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-02-23 01:34:57 +00:00
|
|
|
// Ensure we have a data directory
|
|
|
|
if config.DataDir == "" {
|
|
|
|
c.Ui.Error("Must specify data directory using -data-dir")
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2014-06-09 18:57:15 +00:00
|
|
|
// Verify data center is valid
|
|
|
|
if !validDatacenter.MatchString(config.Datacenter) {
|
|
|
|
c.Ui.Error("Datacenter must be alpha-numeric with underscores and hypens only")
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2014-02-23 02:58:15 +00:00
|
|
|
// Only allow bootstrap mode when acting as a server
|
|
|
|
if config.Bootstrap && !config.Server {
|
|
|
|
c.Ui.Error("Bootstrap mode cannot be enabled when server mode is not enabled")
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2014-06-16 21:36:12 +00:00
|
|
|
// Expect can only work when acting as a server
|
2014-06-20 00:08:48 +00:00
|
|
|
if config.BootstrapExpect != 0 && !config.Server {
|
2014-06-16 21:36:12 +00:00
|
|
|
c.Ui.Error("Expect mode cannot be enabled when server mode is not enabled")
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Expect & Bootstrap are mutually exclusive
|
2014-06-20 00:08:48 +00:00
|
|
|
if config.BootstrapExpect != 0 && config.Bootstrap {
|
2014-06-18 16:03:30 +00:00
|
|
|
c.Ui.Error("Bootstrap cannot be provided with an expected server count")
|
2014-06-16 21:36:12 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2014-08-21 20:09:13 +00:00
|
|
|
// Compile all the watches
|
|
|
|
for _, params := range config.Watches {
|
|
|
|
// Parse the watches, excluding the handler
|
|
|
|
wp, err := watch.ParseExempt(params, []string{"handler"})
|
|
|
|
if err != nil {
|
|
|
|
c.Ui.Error(fmt.Sprintf("Failed to parse watch (%#v): %v", params, err))
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get the handler
|
|
|
|
if err := verifyWatchHandler(wp.Exempt["handler"]); err != nil {
|
|
|
|
c.Ui.Error(fmt.Sprintf("Failed to setup watch handler (%#v): %v", params, err))
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Store the watch plan
|
|
|
|
config.WatchPlans = append(config.WatchPlans, wp)
|
|
|
|
}
|
|
|
|
|
2014-06-16 21:36:12 +00:00
|
|
|
// Warn if we are in expect mode
|
2014-06-20 00:08:48 +00:00
|
|
|
if config.BootstrapExpect == 1 {
|
|
|
|
c.Ui.Error("WARNING: BootstrapExpect Mode is specified as 1; this is the same as Bootstrap mode.")
|
|
|
|
config.BootstrapExpect = 0
|
|
|
|
config.Bootstrap = true
|
|
|
|
} else if config.BootstrapExpect > 0 {
|
|
|
|
c.Ui.Error(fmt.Sprintf("WARNING: Expect Mode enabled, expecting %d servers", config.BootstrapExpect))
|
2014-06-16 21:36:12 +00:00
|
|
|
}
|
|
|
|
|
2014-02-23 02:58:15 +00:00
|
|
|
// Warn if we are in bootstrap mode
|
|
|
|
if config.Bootstrap {
|
|
|
|
c.Ui.Error("WARNING: Bootstrap mode enabled! Do not enable unless necessary")
|
|
|
|
}
|
|
|
|
|
2014-04-14 23:49:43 +00:00
|
|
|
// Warn if using windows as a server
|
|
|
|
if config.Server && runtime.GOOS == "windows" {
|
|
|
|
c.Ui.Error("WARNING: Windows is not recommended as a Consul server. Do not use in production.")
|
|
|
|
}
|
|
|
|
|
2014-06-06 21:40:22 +00:00
|
|
|
// Set the version info
|
|
|
|
config.Revision = c.Revision
|
|
|
|
config.Version = c.Version
|
|
|
|
config.VersionPrerelease = c.VersionPrerelease
|
|
|
|
|
2013-12-21 00:39:32 +00:00
|
|
|
return config
|
|
|
|
}
|
|
|
|
|
|
|
|
// setupLoggers is used to setup the logGate, logWriter, and our logOutput
|
|
|
|
func (c *Command) setupLoggers(config *Config) (*GatedWriter, *logWriter, io.Writer) {
|
|
|
|
// Setup logging. First create the gated log writer, which will
|
|
|
|
// store logs until we're ready to show them. Then create the level
|
|
|
|
// filter, filtering logs of the specified level.
|
|
|
|
logGate := &GatedWriter{
|
|
|
|
Writer: &cli.UiWriter{Ui: c.Ui},
|
|
|
|
}
|
|
|
|
|
|
|
|
c.logFilter = LevelFilter()
|
|
|
|
c.logFilter.MinLevel = logutils.LogLevel(strings.ToUpper(config.LogLevel))
|
|
|
|
c.logFilter.Writer = logGate
|
|
|
|
if !ValidateLevelFilter(c.logFilter.MinLevel, c.logFilter) {
|
|
|
|
c.Ui.Error(fmt.Sprintf(
|
|
|
|
"Invalid log level: %s. Valid log levels are: %v",
|
|
|
|
c.logFilter.MinLevel, c.logFilter.Levels))
|
|
|
|
return nil, nil, nil
|
|
|
|
}
|
|
|
|
|
2014-05-21 19:06:03 +00:00
|
|
|
// Check if syslog is enabled
|
|
|
|
var syslog io.Writer
|
|
|
|
if config.EnableSyslog {
|
2014-06-11 17:28:55 +00:00
|
|
|
l, err := gsyslog.NewLogger(gsyslog.LOG_NOTICE, config.SyslogFacility, "consul")
|
2014-05-21 19:06:03 +00:00
|
|
|
if err != nil {
|
|
|
|
c.Ui.Error(fmt.Sprintf("Syslog setup failed: %v", err))
|
|
|
|
return nil, nil, nil
|
|
|
|
}
|
2014-10-14 05:38:12 +00:00
|
|
|
syslog = &SyslogWrapper{l, c.logFilter}
|
2014-05-21 19:06:03 +00:00
|
|
|
}
|
|
|
|
|
2013-12-21 00:39:32 +00:00
|
|
|
// Create a log writer, and wrap a logOutput around it
|
|
|
|
logWriter := NewLogWriter(512)
|
2014-05-21 19:06:03 +00:00
|
|
|
var logOutput io.Writer
|
|
|
|
if syslog != nil {
|
|
|
|
logOutput = io.MultiWriter(c.logFilter, logWriter, syslog)
|
|
|
|
} else {
|
|
|
|
logOutput = io.MultiWriter(c.logFilter, logWriter)
|
|
|
|
}
|
2014-08-21 20:09:13 +00:00
|
|
|
c.logOutput = logOutput
|
2013-12-21 00:39:32 +00:00
|
|
|
return logGate, logWriter, logOutput
|
2013-12-19 20:18:06 +00:00
|
|
|
}
|
|
|
|
|
2013-12-23 19:38:51 +00:00
|
|
|
// setupAgent is used to start the agent and various interfaces
|
2013-12-30 23:27:41 +00:00
|
|
|
func (c *Command) setupAgent(config *Config, logOutput io.Writer, logWriter *logWriter) error {
|
2013-12-23 19:38:51 +00:00
|
|
|
c.Ui.Output("Starting Consul agent...")
|
|
|
|
agent, err := Create(config, logOutput)
|
|
|
|
if err != nil {
|
|
|
|
c.Ui.Error(fmt.Sprintf("Error starting agent: %s", err))
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
c.agent = agent
|
|
|
|
|
2013-12-30 23:27:41 +00:00
|
|
|
// Setup the RPC listener
|
2014-09-02 19:47:40 +00:00
|
|
|
rpcAddr, err := config.ClientListener(config.Addresses.RPC, config.Ports.RPC)
|
2014-04-11 22:22:35 +00:00
|
|
|
if err != nil {
|
|
|
|
c.Ui.Error(fmt.Sprintf("Invalid RPC bind address: %s", err))
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
rpcListener, err := net.Listen("tcp", rpcAddr.String())
|
2013-12-30 23:27:41 +00:00
|
|
|
if err != nil {
|
|
|
|
agent.Shutdown()
|
|
|
|
c.Ui.Error(fmt.Sprintf("Error starting RPC listener: %s", err))
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Start the IPC layer
|
2014-01-02 23:10:13 +00:00
|
|
|
c.Ui.Output("Starting Consul agent RPC...")
|
2013-12-30 23:27:41 +00:00
|
|
|
c.rpcServer = NewAgentRPC(agent, rpcListener, logOutput, logWriter)
|
|
|
|
|
2014-04-11 22:22:35 +00:00
|
|
|
if config.Ports.HTTP > 0 {
|
2014-09-02 19:47:40 +00:00
|
|
|
httpAddr, err := config.ClientListener(config.Addresses.HTTP, config.Ports.HTTP)
|
2014-04-11 22:22:35 +00:00
|
|
|
if err != nil {
|
|
|
|
c.Ui.Error(fmt.Sprintf("Invalid HTTP bind address: %s", err))
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2014-04-23 19:40:59 +00:00
|
|
|
server, err := NewHTTPServer(agent, config.UiDir, config.EnableDebug, logOutput, httpAddr.String())
|
2013-12-23 19:38:51 +00:00
|
|
|
if err != nil {
|
|
|
|
agent.Shutdown()
|
|
|
|
c.Ui.Error(fmt.Sprintf("Error starting http server: %s", err))
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
c.httpServer = server
|
|
|
|
}
|
2014-01-02 23:10:13 +00:00
|
|
|
|
2014-04-11 22:22:35 +00:00
|
|
|
if config.Ports.DNS > 0 {
|
2014-09-02 19:47:40 +00:00
|
|
|
dnsAddr, err := config.ClientListener(config.Addresses.DNS, config.Ports.DNS)
|
2014-04-11 22:22:35 +00:00
|
|
|
if err != nil {
|
|
|
|
c.Ui.Error(fmt.Sprintf("Invalid DNS bind address: %s", err))
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2014-06-08 22:49:24 +00:00
|
|
|
server, err := NewDNSServer(agent, &config.DNSConfig, logOutput,
|
2014-10-31 19:19:41 +00:00
|
|
|
config.Domain, dnsAddr.String(), config.DNSRecursors)
|
2014-01-02 23:10:13 +00:00
|
|
|
if err != nil {
|
|
|
|
agent.Shutdown()
|
|
|
|
c.Ui.Error(fmt.Sprintf("Error starting dns server: %s", err))
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
c.dnsServer = server
|
|
|
|
}
|
|
|
|
|
2014-09-02 21:23:43 +00:00
|
|
|
// Setup update checking
|
|
|
|
if !config.DisableUpdateCheck {
|
|
|
|
updateParams := &checkpoint.CheckParams{
|
|
|
|
Product: "consul",
|
|
|
|
Version: fmt.Sprintf("%s%s", config.Version, config.VersionPrerelease),
|
|
|
|
}
|
|
|
|
if !config.DisableAnonymousSignature {
|
|
|
|
updateParams.SignatureFile = filepath.Join(config.DataDir, "checkpoint-signature")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Schedule a periodic check with expected interval of 24 hours
|
|
|
|
checkpoint.CheckInterval(updateParams, 24*time.Hour, c.checkpointResults)
|
|
|
|
|
|
|
|
// Do an immediate check within the next 30 seconds
|
|
|
|
go func() {
|
|
|
|
time.Sleep(randomStagger(30 * time.Second))
|
|
|
|
c.checkpointResults(checkpoint.Check(updateParams))
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
2013-12-23 19:38:51 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2014-09-02 21:23:43 +00:00
|
|
|
// checkpointResults is used to handler periodic results from our update checker
|
|
|
|
func (c *Command) checkpointResults(results *checkpoint.CheckResponse, err error) {
|
|
|
|
if err != nil {
|
|
|
|
c.Ui.Error(fmt.Sprintf("Failed to check for updates: %v", err))
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if results.Outdated {
|
|
|
|
c.Ui.Error(fmt.Sprintf("Newer Consul version available: %s", results.CurrentVersion))
|
|
|
|
}
|
|
|
|
for _, alert := range results.Alerts {
|
|
|
|
switch alert.Level {
|
|
|
|
case "info":
|
|
|
|
c.Ui.Info(fmt.Sprintf("Bulletin [%s]: %s (%s)", alert.Level, alert.Message, alert.URL))
|
|
|
|
default:
|
|
|
|
c.Ui.Error(fmt.Sprintf("Bulletin [%s]: %s (%s)", alert.Level, alert.Message, alert.URL))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-04-11 23:59:16 +00:00
|
|
|
// startupJoin is invoked to handle any joins specified to take place at start time
|
|
|
|
func (c *Command) startupJoin(config *Config) error {
|
|
|
|
if len(config.StartJoin) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
c.Ui.Output("Joining cluster...")
|
|
|
|
n, err := c.agent.JoinLAN(config.StartJoin)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
c.Ui.Info(fmt.Sprintf("Join completed. Synced with %d initial agents", n))
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2014-11-14 15:02:42 +00:00
|
|
|
// startupWanJoin is invoked to handle any joins -wan specified to take place at start time
|
|
|
|
func (c *Command) startupWanJoin(config *Config) error {
|
|
|
|
if len(config.StartWanJoin) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
c.Ui.Output("Joining -wan cluster...")
|
|
|
|
n, err := c.agent.JoinWAN(config.StartWanJoin)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
c.Ui.Info(fmt.Sprintf("Join -wan completed. Synced with %d initial agents", n))
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2014-10-12 19:45:40 +00:00
|
|
|
// retryJoin is used to handle retrying a join until it succeeds or all
|
|
|
|
// retries are exhausted.
|
2014-10-12 17:50:15 +00:00
|
|
|
func (c *Command) retryJoin(config *Config, errCh chan<- struct{}) {
|
|
|
|
if len(config.RetryJoin) == 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
logger := c.agent.logger
|
|
|
|
logger.Printf("[INFO] agent: Joining cluster...")
|
|
|
|
|
|
|
|
attempt := 0
|
|
|
|
for {
|
|
|
|
n, err := c.agent.JoinLAN(config.RetryJoin)
|
|
|
|
if err == nil {
|
|
|
|
logger.Printf("[INFO] agent: Join completed. Synced with %d initial agents", n)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
attempt++
|
|
|
|
if config.RetryMaxAttempts > 0 && attempt > config.RetryMaxAttempts {
|
|
|
|
logger.Printf("[ERROR] agent: max join retry exhausted, exiting")
|
|
|
|
close(errCh)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
logger.Printf("[WARN] agent: Join failed: %v, retrying in %v", err,
|
|
|
|
config.RetryInterval)
|
|
|
|
time.Sleep(config.RetryInterval)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-11-14 15:02:42 +00:00
|
|
|
// retryWanJoin is used to handle retrying a join -wan until it succeeds or all
|
|
|
|
// retries are exhausted.
|
|
|
|
func (c *Command) retryWanJoin(config *Config, errCh chan<- struct{}) {
|
|
|
|
if len(config.RetryWanJoin) == 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
logger := c.agent.logger
|
|
|
|
logger.Printf("[INFO] agent: Joining WAN cluster...")
|
|
|
|
|
|
|
|
attempt := 0
|
|
|
|
for {
|
|
|
|
n, err := c.agent.JoinWAN(config.RetryWanJoin)
|
|
|
|
if err == nil {
|
|
|
|
logger.Printf("[INFO] agent: Join -wan completed. Synced with %d initial agents", n)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
attempt++
|
|
|
|
if config.RetryWanMaxAttempts > 0 && attempt > config.RetryWanMaxAttempts {
|
|
|
|
logger.Printf("[ERROR] agent: max join -wan retry exhausted, exiting")
|
|
|
|
close(errCh)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
logger.Printf("[WARN] agent: Join -wan failed: %v, retrying in %v", err,
|
|
|
|
config.RetryWanInterval)
|
|
|
|
time.Sleep(config.RetryWanInterval)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-12-19 20:18:06 +00:00
|
|
|
func (c *Command) Run(args []string) int {
|
2013-12-20 01:14:46 +00:00
|
|
|
c.Ui = &cli.PrefixedUi{
|
|
|
|
OutputPrefix: "==> ",
|
|
|
|
InfoPrefix: " ",
|
|
|
|
ErrorPrefix: "==> ",
|
|
|
|
Ui: c.Ui,
|
|
|
|
}
|
|
|
|
|
2013-12-21 00:39:32 +00:00
|
|
|
// Parse our configs
|
|
|
|
c.args = args
|
|
|
|
config := c.readConfig()
|
|
|
|
if config == nil {
|
|
|
|
return 1
|
|
|
|
}
|
|
|
|
|
2014-02-23 01:43:12 +00:00
|
|
|
// Check GOMAXPROCS
|
|
|
|
if runtime.GOMAXPROCS(0) == 1 {
|
|
|
|
c.Ui.Error("WARNING: It is highly recommended to set GOMAXPROCS higher than 1")
|
|
|
|
}
|
|
|
|
|
2013-12-21 00:39:32 +00:00
|
|
|
// Setup the log outputs
|
|
|
|
logGate, logWriter, logOutput := c.setupLoggers(config)
|
|
|
|
if logWriter == nil {
|
|
|
|
return 1
|
|
|
|
}
|
|
|
|
|
2014-02-20 21:51:51 +00:00
|
|
|
/* Setup telemetry
|
|
|
|
Aggregate on 10 second intervals for 1 minute. Expose the
|
|
|
|
metrics over stderr when there is a SIGUSR1 received.
|
|
|
|
*/
|
|
|
|
inm := metrics.NewInmemSink(10*time.Second, time.Minute)
|
|
|
|
metrics.DefaultInmemSignal(inm)
|
|
|
|
metricsConf := metrics.DefaultConfig("consul")
|
2014-02-20 22:59:54 +00:00
|
|
|
|
2014-09-02 18:26:08 +00:00
|
|
|
// Configure the statsite sink
|
|
|
|
var fanout metrics.FanoutSink
|
2014-02-20 22:59:54 +00:00
|
|
|
if config.StatsiteAddr != "" {
|
|
|
|
sink, err := metrics.NewStatsiteSink(config.StatsiteAddr)
|
|
|
|
if err != nil {
|
|
|
|
c.Ui.Error(fmt.Sprintf("Failed to start statsite sink. Got: %s", err))
|
|
|
|
return 1
|
|
|
|
}
|
2014-09-02 18:26:08 +00:00
|
|
|
fanout = append(fanout, sink)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Configure the statsd sink
|
|
|
|
if config.StatsdAddr != "" {
|
|
|
|
sink, err := metrics.NewStatsdSink(config.StatsdAddr)
|
|
|
|
if err != nil {
|
|
|
|
c.Ui.Error(fmt.Sprintf("Failed to start statsd sink. Got: %s", err))
|
|
|
|
return 1
|
|
|
|
}
|
|
|
|
fanout = append(fanout, sink)
|
|
|
|
}
|
2014-02-20 22:59:54 +00:00
|
|
|
|
2014-09-02 18:26:08 +00:00
|
|
|
// Initialize the global sink
|
|
|
|
if len(fanout) > 0 {
|
|
|
|
fanout = append(fanout, inm)
|
|
|
|
metrics.NewGlobal(metricsConf, fanout)
|
2014-02-20 22:59:54 +00:00
|
|
|
} else {
|
|
|
|
metricsConf.EnableHostname = false
|
|
|
|
metrics.NewGlobal(metricsConf, inm)
|
|
|
|
}
|
2014-02-20 21:51:51 +00:00
|
|
|
|
2013-12-21 00:39:32 +00:00
|
|
|
// Create the agent
|
2013-12-30 23:27:41 +00:00
|
|
|
if err := c.setupAgent(config, logOutput, logWriter); err != nil {
|
2013-12-20 01:14:46 +00:00
|
|
|
return 1
|
|
|
|
}
|
2013-12-23 19:38:51 +00:00
|
|
|
defer c.agent.Shutdown()
|
2013-12-30 23:27:41 +00:00
|
|
|
if c.rpcServer != nil {
|
|
|
|
defer c.rpcServer.Shutdown()
|
|
|
|
}
|
2013-12-23 19:38:51 +00:00
|
|
|
if c.httpServer != nil {
|
|
|
|
defer c.httpServer.Shutdown()
|
|
|
|
}
|
2013-12-20 01:14:46 +00:00
|
|
|
|
2014-04-11 23:59:16 +00:00
|
|
|
// Join startup nodes if specified
|
|
|
|
if err := c.startupJoin(config); err != nil {
|
|
|
|
c.Ui.Error(err.Error())
|
|
|
|
return 1
|
|
|
|
}
|
|
|
|
|
2014-11-14 15:02:42 +00:00
|
|
|
// Join startup nodes if specified
|
|
|
|
if err := c.startupWanJoin(config); err != nil {
|
|
|
|
c.Ui.Error(err.Error())
|
|
|
|
return 1
|
|
|
|
}
|
|
|
|
|
2014-02-03 23:15:35 +00:00
|
|
|
// Register the services
|
|
|
|
for _, service := range config.Services {
|
|
|
|
ns := service.NodeService()
|
|
|
|
chkType := service.CheckType()
|
|
|
|
if err := c.agent.AddService(ns, chkType); err != nil {
|
|
|
|
c.Ui.Error(fmt.Sprintf("Failed to register service '%s': %v", service.Name, err))
|
|
|
|
return 1
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Register the checks
|
|
|
|
for _, check := range config.Checks {
|
|
|
|
health := check.HealthCheck(config.NodeName)
|
|
|
|
chkType := &check.CheckType
|
|
|
|
if err := c.agent.AddCheck(health, chkType); err != nil {
|
|
|
|
c.Ui.Error(fmt.Sprintf("Failed to register check '%s': %v %v", check.Name, err, check))
|
|
|
|
return 1
|
|
|
|
}
|
|
|
|
}
|
2014-01-16 01:14:50 +00:00
|
|
|
|
2014-08-21 20:09:13 +00:00
|
|
|
// Get the new client listener addr
|
2014-09-02 19:47:40 +00:00
|
|
|
httpAddr, err := config.ClientListenerAddr(config.Addresses.HTTP, config.Ports.HTTP)
|
2014-08-21 20:09:13 +00:00
|
|
|
if err != nil {
|
|
|
|
c.Ui.Error(fmt.Sprintf("Failed to determine HTTP address: %v", err))
|
|
|
|
}
|
|
|
|
|
|
|
|
// Register the watches
|
|
|
|
for _, wp := range config.WatchPlans {
|
2014-09-15 17:55:57 +00:00
|
|
|
go func(wp *watch.WatchPlan) {
|
2014-08-21 20:09:13 +00:00
|
|
|
wp.Handler = makeWatchHandler(logOutput, wp.Exempt["handler"])
|
|
|
|
wp.LogOutput = c.logOutput
|
|
|
|
if err := wp.Run(httpAddr); err != nil {
|
|
|
|
c.Ui.Error(fmt.Sprintf("Error running watch: %v", err))
|
|
|
|
}
|
2014-09-15 17:55:57 +00:00
|
|
|
}(wp)
|
2014-08-21 20:09:13 +00:00
|
|
|
}
|
|
|
|
|
2014-01-16 01:14:50 +00:00
|
|
|
// Let the agent know we've finished registration
|
2014-01-21 19:52:25 +00:00
|
|
|
c.agent.StartSync()
|
2014-01-16 01:14:50 +00:00
|
|
|
|
2013-12-21 00:39:32 +00:00
|
|
|
c.Ui.Output("Consul agent running!")
|
2014-05-21 00:00:04 +00:00
|
|
|
c.Ui.Info(fmt.Sprintf(" Node name: '%s'", config.NodeName))
|
|
|
|
c.Ui.Info(fmt.Sprintf(" Datacenter: '%s'", config.Datacenter))
|
|
|
|
c.Ui.Info(fmt.Sprintf(" Server: %v (bootstrap: %v)", config.Server, config.Bootstrap))
|
|
|
|
c.Ui.Info(fmt.Sprintf(" Client Addr: %v (HTTP: %d, DNS: %d, RPC: %d)", config.ClientAddr,
|
2014-04-11 22:54:03 +00:00
|
|
|
config.Ports.HTTP, config.Ports.DNS, config.Ports.RPC))
|
2014-05-21 00:00:04 +00:00
|
|
|
c.Ui.Info(fmt.Sprintf(" Cluster Addr: %v (LAN: %d, WAN: %d)", config.AdvertiseAddr,
|
2014-04-11 22:54:03 +00:00
|
|
|
config.Ports.SerfLan, config.Ports.SerfWan))
|
2014-05-21 00:00:04 +00:00
|
|
|
c.Ui.Info(fmt.Sprintf("Gossip encrypt: %v, RPC-TLS: %v, TLS-Incoming: %v",
|
|
|
|
config.EncryptKey != "", config.VerifyOutgoing, config.VerifyIncoming))
|
2013-12-21 00:39:32 +00:00
|
|
|
|
|
|
|
// Enable log streaming
|
|
|
|
c.Ui.Info("")
|
|
|
|
c.Ui.Output("Log data will now stream in as it occurs:\n")
|
|
|
|
logGate.Flush()
|
|
|
|
|
2014-10-12 17:50:15 +00:00
|
|
|
// Start retry join process
|
|
|
|
errCh := make(chan struct{})
|
|
|
|
go c.retryJoin(config, errCh)
|
|
|
|
|
2014-11-14 15:02:42 +00:00
|
|
|
// Start retry -wan join process
|
|
|
|
errWanCh := make(chan struct{})
|
|
|
|
go c.retryWanJoin(config, errWanCh)
|
|
|
|
|
2013-12-21 00:39:32 +00:00
|
|
|
// Wait for exit
|
2014-11-14 15:02:42 +00:00
|
|
|
return c.handleSignals(config, errCh, errWanCh)
|
2013-12-21 00:39:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// handleSignals blocks until we get an exit-causing signal
|
2014-11-14 15:02:42 +00:00
|
|
|
func (c *Command) handleSignals(config *Config, retryJoin <-chan struct{}, retryWanJoin <-chan struct{}) int {
|
2013-12-21 00:39:32 +00:00
|
|
|
signalCh := make(chan os.Signal, 4)
|
|
|
|
signal.Notify(signalCh, os.Interrupt, syscall.SIGTERM, syscall.SIGHUP)
|
|
|
|
|
|
|
|
// Wait for a signal
|
|
|
|
WAIT:
|
|
|
|
var sig os.Signal
|
2013-12-20 01:14:46 +00:00
|
|
|
select {
|
2013-12-21 00:39:32 +00:00
|
|
|
case s := <-signalCh:
|
|
|
|
sig = s
|
2014-06-11 17:53:28 +00:00
|
|
|
case <-c.rpcServer.ReloadCh():
|
|
|
|
sig = syscall.SIGHUP
|
2013-12-20 01:14:46 +00:00
|
|
|
case <-c.ShutdownCh:
|
2013-12-21 00:39:32 +00:00
|
|
|
sig = os.Interrupt
|
2014-10-12 17:50:15 +00:00
|
|
|
case <-retryJoin:
|
|
|
|
return 1
|
2014-11-14 15:02:42 +00:00
|
|
|
case <-retryWanJoin:
|
|
|
|
return 1
|
2013-12-23 19:38:51 +00:00
|
|
|
case <-c.agent.ShutdownCh():
|
2013-12-21 00:39:32 +00:00
|
|
|
// Agent is already shutdown!
|
2013-12-20 01:14:46 +00:00
|
|
|
return 0
|
|
|
|
}
|
2013-12-21 00:39:32 +00:00
|
|
|
c.Ui.Output(fmt.Sprintf("Caught signal: %v", sig))
|
|
|
|
|
|
|
|
// Check if this is a SIGHUP
|
|
|
|
if sig == syscall.SIGHUP {
|
2013-12-23 19:38:51 +00:00
|
|
|
config = c.handleReload(config)
|
2013-12-21 00:39:32 +00:00
|
|
|
goto WAIT
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if we should do a graceful leave
|
|
|
|
graceful := false
|
|
|
|
if sig == os.Interrupt && !config.SkipLeaveOnInt {
|
|
|
|
graceful = true
|
|
|
|
} else if sig == syscall.SIGTERM && config.LeaveOnTerm {
|
|
|
|
graceful = true
|
|
|
|
}
|
|
|
|
|
|
|
|
// Bail fast if not doing a graceful leave
|
|
|
|
if !graceful {
|
|
|
|
return 1
|
|
|
|
}
|
|
|
|
|
|
|
|
// Attempt a graceful leave
|
|
|
|
gracefulCh := make(chan struct{})
|
|
|
|
c.Ui.Output("Gracefully shutting down agent...")
|
|
|
|
go func() {
|
2013-12-23 19:38:51 +00:00
|
|
|
if err := c.agent.Leave(); err != nil {
|
2013-12-21 00:39:32 +00:00
|
|
|
c.Ui.Error(fmt.Sprintf("Error: %s", err))
|
|
|
|
return
|
|
|
|
}
|
|
|
|
close(gracefulCh)
|
|
|
|
}()
|
|
|
|
|
|
|
|
// Wait for leave or another signal
|
|
|
|
select {
|
|
|
|
case <-signalCh:
|
|
|
|
return 1
|
|
|
|
case <-time.After(gracefulTimeout):
|
|
|
|
return 1
|
|
|
|
case <-gracefulCh:
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// handleReload is invoked when we should reload our configs, e.g. SIGHUP
|
2013-12-23 19:38:51 +00:00
|
|
|
func (c *Command) handleReload(config *Config) *Config {
|
2013-12-21 00:39:32 +00:00
|
|
|
c.Ui.Output("Reloading configuration...")
|
2014-02-07 20:03:14 +00:00
|
|
|
newConf := c.readConfig()
|
|
|
|
if newConf == nil {
|
|
|
|
c.Ui.Error(fmt.Sprintf("Failed to reload configs"))
|
|
|
|
return config
|
|
|
|
}
|
|
|
|
|
|
|
|
// Change the log level
|
|
|
|
minLevel := logutils.LogLevel(strings.ToUpper(newConf.LogLevel))
|
|
|
|
if ValidateLevelFilter(minLevel, c.logFilter) {
|
|
|
|
c.logFilter.SetMinLevel(minLevel)
|
|
|
|
} else {
|
|
|
|
c.Ui.Error(fmt.Sprintf(
|
|
|
|
"Invalid log level: %s. Valid log levels are: %v",
|
|
|
|
minLevel, c.logFilter.Levels))
|
|
|
|
|
|
|
|
// Keep the current log level
|
|
|
|
newConf.LogLevel = config.LogLevel
|
|
|
|
}
|
|
|
|
|
2014-02-07 20:19:56 +00:00
|
|
|
// Bulk update the services and checks
|
|
|
|
c.agent.PauseSync()
|
|
|
|
defer c.agent.ResumeSync()
|
|
|
|
|
|
|
|
// Deregister the old services
|
|
|
|
for _, service := range config.Services {
|
|
|
|
ns := service.NodeService()
|
|
|
|
c.agent.RemoveService(ns.ID)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Deregister the old checks
|
|
|
|
for _, check := range config.Checks {
|
|
|
|
health := check.HealthCheck(config.NodeName)
|
|
|
|
c.agent.RemoveCheck(health.CheckID)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Register the services
|
|
|
|
for _, service := range newConf.Services {
|
|
|
|
ns := service.NodeService()
|
|
|
|
chkType := service.CheckType()
|
|
|
|
if err := c.agent.AddService(ns, chkType); err != nil {
|
|
|
|
c.Ui.Error(fmt.Sprintf("Failed to register service '%s': %v", service.Name, err))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Register the checks
|
|
|
|
for _, check := range newConf.Checks {
|
|
|
|
health := check.HealthCheck(config.NodeName)
|
|
|
|
chkType := &check.CheckType
|
|
|
|
if err := c.agent.AddCheck(health, chkType); err != nil {
|
|
|
|
c.Ui.Error(fmt.Sprintf("Failed to register check '%s': %v %v", check.Name, err, check))
|
|
|
|
}
|
|
|
|
}
|
2014-02-07 20:03:14 +00:00
|
|
|
|
2014-08-21 20:09:13 +00:00
|
|
|
// Get the new client listener addr
|
2014-09-02 19:47:40 +00:00
|
|
|
httpAddr, err := newConf.ClientListenerAddr(config.Addresses.HTTP, config.Ports.HTTP)
|
2014-08-21 20:09:13 +00:00
|
|
|
if err != nil {
|
|
|
|
c.Ui.Error(fmt.Sprintf("Failed to determine HTTP address: %v", err))
|
|
|
|
}
|
|
|
|
|
|
|
|
// Deregister the old watches
|
|
|
|
for _, wp := range config.WatchPlans {
|
|
|
|
wp.Stop()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Register the new watches
|
|
|
|
for _, wp := range newConf.WatchPlans {
|
2014-09-15 17:55:57 +00:00
|
|
|
go func(wp *watch.WatchPlan) {
|
2014-08-21 20:09:13 +00:00
|
|
|
wp.Handler = makeWatchHandler(c.logOutput, wp.Exempt["handler"])
|
|
|
|
wp.LogOutput = c.logOutput
|
|
|
|
if err := wp.Run(httpAddr); err != nil {
|
|
|
|
c.Ui.Error(fmt.Sprintf("Error running watch: %v", err))
|
|
|
|
}
|
2014-09-15 17:55:57 +00:00
|
|
|
}(wp)
|
2014-08-21 20:09:13 +00:00
|
|
|
}
|
|
|
|
|
2014-02-07 20:03:14 +00:00
|
|
|
return newConf
|
2013-12-19 20:18:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Command) Synopsis() string {
|
|
|
|
return "Runs a Consul agent"
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Command) Help() string {
|
|
|
|
helpText := `
|
|
|
|
Usage: consul agent [options]
|
|
|
|
|
|
|
|
Starts the Consul agent and runs until an interrupt is received. The
|
2014-04-11 22:22:35 +00:00
|
|
|
agent represents a single node in a cluster.
|
2013-12-19 20:18:06 +00:00
|
|
|
|
|
|
|
Options:
|
|
|
|
|
2014-05-16 17:49:36 +00:00
|
|
|
-advertise=addr Sets the advertise address to use
|
2014-04-11 22:22:35 +00:00
|
|
|
-bootstrap Sets server to bootstrap mode
|
|
|
|
-bind=0.0.0.0 Sets the bind address for cluster communication
|
2014-07-01 22:02:26 +00:00
|
|
|
-bootstrap-expect=0 Sets server to expect bootstrap mode.
|
2014-04-11 22:22:35 +00:00
|
|
|
-client=127.0.0.1 Sets the address to bind for client access.
|
|
|
|
This includes RPC, DNS and HTTP
|
|
|
|
-config-file=foo Path to a JSON file to read configuration from.
|
|
|
|
This can be specified multiple times.
|
|
|
|
-config-dir=foo Path to a directory to read configuration files
|
|
|
|
from. This will read every file ending in ".json"
|
|
|
|
as configuration in this directory in alphabetical
|
|
|
|
order.
|
|
|
|
-data-dir=path Path to a data directory to store agent state
|
|
|
|
-dc=east-aws Datacenter of the agent
|
2014-08-22 22:08:15 +00:00
|
|
|
-encrypt=key Provides the gossip encryption key
|
2014-04-11 23:59:16 +00:00
|
|
|
-join=1.2.3.4 Address of an agent to join at start time.
|
|
|
|
Can be specified multiple times.
|
2014-11-14 15:02:42 +00:00
|
|
|
-join-wan=1.2.3.4 Address of an agent to join -wan at start time.
|
|
|
|
Can be specified multiple times.
|
2014-10-12 19:35:25 +00:00
|
|
|
-retry-join=1.2.3.4 Address of an agent to join at start time with
|
|
|
|
retries enabled. Can be specified multiple times.
|
|
|
|
-retry-interval=30s Time to wait between join attempts.
|
|
|
|
-retry-max=0 Maximum number of join attempts. Defaults to 0, which
|
|
|
|
will retry indefinitely.
|
2014-11-14 15:02:42 +00:00
|
|
|
-retry-wan-join=1.2.3.4 Address of an agent to join -wan at start time with
|
|
|
|
retries enabled. Can be specified multiple times.
|
|
|
|
-retry-wan-interval=30s Time to wait between join -wan attempts.
|
|
|
|
-retry-wan-max=0 Maximum number of join -wan attempts. Defaults to 0, which
|
|
|
|
will retry indefinitely.
|
2014-04-11 22:22:35 +00:00
|
|
|
-log-level=info Log level of the agent.
|
|
|
|
-node=hostname Name of this node. Must be unique in the cluster
|
|
|
|
-protocol=N Sets the protocol version. Defaults to latest.
|
2014-05-21 19:32:24 +00:00
|
|
|
-rejoin Ignores a previous leave and attempts to rejoin the cluster.
|
2014-04-11 22:22:35 +00:00
|
|
|
-server Switches agent to server mode.
|
2014-05-21 19:06:03 +00:00
|
|
|
-syslog Enables logging to syslog
|
2014-04-23 19:37:53 +00:00
|
|
|
-ui-dir=path Path to directory containing the Web UI resources
|
2014-05-06 16:57:53 +00:00
|
|
|
-pid-file=path Path to file to store agent PID
|
2014-04-11 22:22:35 +00:00
|
|
|
|
|
|
|
`
|
2013-12-19 20:18:06 +00:00
|
|
|
return strings.TrimSpace(helpText)
|
|
|
|
}
|