2013-12-20 01:14:46 +00:00
|
|
|
package agent
|
|
|
|
|
2013-12-20 23:33:13 +00:00
|
|
|
import (
|
2017-05-19 09:53:41 +00:00
|
|
|
"context"
|
2017-04-10 18:57:24 +00:00
|
|
|
"crypto/sha512"
|
2017-05-24 13:22:56 +00:00
|
|
|
"crypto/tls"
|
2014-11-24 08:36:03 +00:00
|
|
|
"encoding/json"
|
2013-12-20 23:33:13 +00:00
|
|
|
"fmt"
|
2013-12-21 00:39:32 +00:00
|
|
|
"io"
|
2015-06-04 21:33:30 +00:00
|
|
|
"io/ioutil"
|
2013-12-21 00:39:32 +00:00
|
|
|
"log"
|
2014-01-01 00:45:13 +00:00
|
|
|
"net"
|
2017-05-19 09:53:41 +00:00
|
|
|
"net/http"
|
2013-12-21 00:39:32 +00:00
|
|
|
"os"
|
2014-09-06 00:22:33 +00:00
|
|
|
"path/filepath"
|
2014-02-24 00:42:39 +00:00
|
|
|
"strconv"
|
2016-12-02 05:35:38 +00:00
|
|
|
"strings"
|
2013-12-21 00:39:32 +00:00
|
|
|
"sync"
|
2015-06-05 23:17:07 +00:00
|
|
|
"time"
|
2014-06-16 21:36:12 +00:00
|
|
|
|
2018-10-03 19:37:53 +00:00
|
|
|
"google.golang.org/grpc"
|
|
|
|
|
2019-02-25 19:06:01 +00:00
|
|
|
metrics "github.com/armon/go-metrics"
|
2017-08-23 14:52:48 +00:00
|
|
|
"github.com/hashicorp/consul/acl"
|
2017-08-28 12:17:09 +00:00
|
|
|
"github.com/hashicorp/consul/agent/ae"
|
2018-04-11 08:52:51 +00:00
|
|
|
"github.com/hashicorp/consul/agent/cache"
|
2019-02-25 19:06:01 +00:00
|
|
|
cachetype "github.com/hashicorp/consul/agent/cache-types"
|
2017-10-25 09:18:07 +00:00
|
|
|
"github.com/hashicorp/consul/agent/checks"
|
2017-09-25 18:40:42 +00:00
|
|
|
"github.com/hashicorp/consul/agent/config"
|
pkg refactor
command/agent/* -> agent/*
command/consul/* -> agent/consul/*
command/agent/command{,_test}.go -> command/agent{,_test}.go
command/base/command.go -> command/base.go
command/base/* -> command/*
commands.go -> command/commands.go
The script which did the refactor is:
(
cd $GOPATH/src/github.com/hashicorp/consul
git mv command/agent/command.go command/agent.go
git mv command/agent/command_test.go command/agent_test.go
git mv command/agent/flag_slice_value{,_test}.go command/
git mv command/agent .
git mv command/base/command.go command/base.go
git mv command/base/config_util{,_test}.go command/
git mv commands.go command/
git mv consul agent
rmdir command/base/
gsed -i -e 's|package agent|package command|' command/agent{,_test}.go
gsed -i -e 's|package agent|package command|' command/flag_slice_value{,_test}.go
gsed -i -e 's|package base|package command|' command/base.go command/config_util{,_test}.go
gsed -i -e 's|package main|package command|' command/commands.go
gsed -i -e 's|base.Command|BaseCommand|' command/commands.go
gsed -i -e 's|agent.Command|AgentCommand|' command/commands.go
gsed -i -e 's|\tCommand:|\tBaseCommand:|' command/commands.go
gsed -i -e 's|base\.||' command/commands.go
gsed -i -e 's|command\.||' command/commands.go
gsed -i -e 's|command|c|' main.go
gsed -i -e 's|range Commands|range command.Commands|' main.go
gsed -i -e 's|Commands: Commands|Commands: command.Commands|' main.go
gsed -i -e 's|base\.BoolValue|BoolValue|' command/operator_autopilot_set.go
gsed -i -e 's|base\.DurationValue|DurationValue|' command/operator_autopilot_set.go
gsed -i -e 's|base\.StringValue|StringValue|' command/operator_autopilot_set.go
gsed -i -e 's|base\.UintValue|UintValue|' command/operator_autopilot_set.go
gsed -i -e 's|\bCommand\b|BaseCommand|' command/base.go
gsed -i -e 's|BaseCommand Options|Command Options|' command/base.go
gsed -i -e 's|base.Command|BaseCommand|' command/*.go
gsed -i -e 's|c\.Command|c.BaseCommand|g' command/*.go
gsed -i -e 's|\tCommand:|\tBaseCommand:|' command/*_test.go
gsed -i -e 's|base\.||' command/*_test.go
gsed -i -e 's|\bCommand\b|AgentCommand|' command/agent{,_test}.go
gsed -i -e 's|cmd.AgentCommand|cmd.BaseCommand|' command/agent.go
gsed -i -e 's|cli.AgentCommand = new(Command)|cli.Command = new(AgentCommand)|' command/agent_test.go
gsed -i -e 's|exec.AgentCommand|exec.Command|' command/agent_test.go
gsed -i -e 's|exec.BaseCommand|exec.Command|' command/agent_test.go
gsed -i -e 's|NewTestAgent|agent.NewTestAgent|' command/agent_test.go
gsed -i -e 's|= TestConfig|= agent.TestConfig|' command/agent_test.go
gsed -i -e 's|: RetryJoin|: agent.RetryJoin|' command/agent_test.go
gsed -i -e 's|\.\./\.\./|../|' command/config_util_test.go
gsed -i -e 's|\bverifyUniqueListeners|VerifyUniqueListeners|' agent/config{,_test}.go command/agent.go
gsed -i -e 's|\bserfLANKeyring\b|SerfLANKeyring|g' agent/{agent,keyring,testagent}.go command/agent.go
gsed -i -e 's|\bserfWANKeyring\b|SerfWANKeyring|g' agent/{agent,keyring,testagent}.go command/agent.go
gsed -i -e 's|\bNewAgent\b|agent.New|g' command/agent{,_test}.go
gsed -i -e 's|\bNewAgent|New|' agent/{acl_test,agent,testagent}.go
gsed -i -e 's|\bAgent\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bBool\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bConfig\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bDefaultConfig\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bDevConfig\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bMergeConfig\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bReadConfigPaths\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bParseMetaPair\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bSerfLANKeyring\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bSerfWANKeyring\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|circonus\.agent|circonus|g' command/agent{,_test}.go
gsed -i -e 's|logger\.agent|logger|g' command/agent{,_test}.go
gsed -i -e 's|metrics\.agent|metrics|g' command/agent{,_test}.go
gsed -i -e 's|// agent.Agent|// agent|' command/agent{,_test}.go
gsed -i -e 's|a\.agent\.Config|a.Config|' command/agent{,_test}.go
gsed -i -e 's|agent\.AppendSliceValue|AppendSliceValue|' command/{configtest,validate}.go
gsed -i -e 's|consul/consul|agent/consul|' GNUmakefile
gsed -i -e 's|\.\./test|../../test|' agent/consul/server_test.go
# fix imports
f=$(grep -rl 'github.com/hashicorp/consul/command/agent' * | grep '\.go')
gsed -i -e 's|github.com/hashicorp/consul/command/agent|github.com/hashicorp/consul/agent|' $f
goimports -w $f
f=$(grep -rl 'github.com/hashicorp/consul/consul' * | grep '\.go')
gsed -i -e 's|github.com/hashicorp/consul/consul|github.com/hashicorp/consul/agent/consul|' $f
goimports -w $f
goimports -w command/*.go main.go
)
2017-06-09 22:28:28 +00:00
|
|
|
"github.com/hashicorp/consul/agent/consul"
|
2017-08-28 12:17:12 +00:00
|
|
|
"github.com/hashicorp/consul/agent/local"
|
2018-10-03 19:37:53 +00:00
|
|
|
"github.com/hashicorp/consul/agent/proxycfg"
|
2018-09-06 10:50:38 +00:00
|
|
|
"github.com/hashicorp/consul/agent/proxyprocess"
|
2017-07-06 10:34:00 +00:00
|
|
|
"github.com/hashicorp/consul/agent/structs"
|
2017-06-21 04:43:55 +00:00
|
|
|
"github.com/hashicorp/consul/agent/systemd"
|
2017-07-26 18:03:43 +00:00
|
|
|
"github.com/hashicorp/consul/agent/token"
|
2018-10-03 19:37:53 +00:00
|
|
|
"github.com/hashicorp/consul/agent/xds"
|
2017-04-19 23:00:11 +00:00
|
|
|
"github.com/hashicorp/consul/api"
|
2019-04-26 16:33:01 +00:00
|
|
|
"github.com/hashicorp/consul/api/watch"
|
2017-05-15 20:10:36 +00:00
|
|
|
"github.com/hashicorp/consul/ipaddr"
|
2016-01-29 19:42:34 +00:00
|
|
|
"github.com/hashicorp/consul/lib"
|
2018-05-03 20:56:42 +00:00
|
|
|
"github.com/hashicorp/consul/lib/file"
|
2016-11-16 21:45:26 +00:00
|
|
|
"github.com/hashicorp/consul/logger"
|
2019-02-26 15:52:07 +00:00
|
|
|
"github.com/hashicorp/consul/tlsutil"
|
2016-06-06 20:19:31 +00:00
|
|
|
"github.com/hashicorp/consul/types"
|
2019-02-25 19:06:01 +00:00
|
|
|
multierror "github.com/hashicorp/go-multierror"
|
|
|
|
uuid "github.com/hashicorp/go-uuid"
|
2017-09-07 19:17:20 +00:00
|
|
|
"github.com/hashicorp/memberlist"
|
2017-02-24 04:32:13 +00:00
|
|
|
"github.com/hashicorp/raft"
|
2014-06-16 21:36:12 +00:00
|
|
|
"github.com/hashicorp/serf/serf"
|
2017-02-01 18:27:04 +00:00
|
|
|
"github.com/shirou/gopsutil/host"
|
2017-11-07 23:06:59 +00:00
|
|
|
"golang.org/x/net/http2"
|
2013-12-20 23:33:13 +00:00
|
|
|
)
|
|
|
|
|
2014-11-24 08:36:03 +00:00
|
|
|
const (
|
|
|
|
// Path to save agent service definitions
|
|
|
|
servicesDir = "services"
|
|
|
|
|
2018-05-14 20:55:24 +00:00
|
|
|
// Path to save agent proxy definitions
|
|
|
|
proxyDir = "proxies"
|
|
|
|
|
2014-11-24 08:36:03 +00:00
|
|
|
// Path to save local agent checks
|
2015-06-05 23:17:07 +00:00
|
|
|
checksDir = "checks"
|
|
|
|
checkStateDir = "checks/state"
|
2015-01-16 20:39:15 +00:00
|
|
|
|
2019-02-27 19:28:31 +00:00
|
|
|
// Name of the file tokens will be persisted within
|
|
|
|
tokensPath = "acl-tokens.json"
|
|
|
|
|
2015-01-21 22:45:09 +00:00
|
|
|
// Default reasons for node/service maintenance mode
|
|
|
|
defaultNodeMaintReason = "Maintenance mode is enabled for this node, " +
|
|
|
|
"but no reason was provided. This is a default message."
|
|
|
|
defaultServiceMaintReason = "Maintenance mode is enabled for this " +
|
|
|
|
"service, but no reason was provided. This is a default message."
|
2019-06-27 20:22:07 +00:00
|
|
|
|
|
|
|
// ID of the roots watch
|
|
|
|
rootsWatchID = "roots"
|
|
|
|
|
|
|
|
// ID of the leaf watch
|
|
|
|
leafWatchID = "leaf"
|
2014-11-24 08:36:03 +00:00
|
|
|
)
|
|
|
|
|
2018-10-11 12:22:11 +00:00
|
|
|
type configSource int
|
|
|
|
|
|
|
|
const (
|
|
|
|
ConfigSourceLocal configSource = iota
|
|
|
|
ConfigSourceRemote
|
|
|
|
)
|
|
|
|
|
2017-06-15 09:42:07 +00:00
|
|
|
// delegate defines the interface shared by both
|
2017-05-15 14:05:17 +00:00
|
|
|
// consul.Client and consul.Server.
|
2017-06-15 09:42:07 +00:00
|
|
|
type delegate interface {
|
2017-05-15 14:05:17 +00:00
|
|
|
Encrypted() bool
|
2017-08-14 14:36:07 +00:00
|
|
|
GetLANCoordinate() (lib.CoordinateSet, error)
|
2017-05-15 14:05:17 +00:00
|
|
|
Leave() error
|
|
|
|
LANMembers() []serf.Member
|
2017-09-05 19:22:20 +00:00
|
|
|
LANMembersAllSegments() ([]serf.Member, error)
|
2017-08-30 23:44:04 +00:00
|
|
|
LANSegmentMembers(segment string) ([]serf.Member, error)
|
2017-05-15 14:05:17 +00:00
|
|
|
LocalMember() serf.Member
|
|
|
|
JoinLAN(addrs []string) (n int, err error)
|
|
|
|
RemoveFailedNode(node string) error
|
2018-10-19 16:04:07 +00:00
|
|
|
ResolveToken(secretID string) (acl.Authorizer, error)
|
2017-05-15 14:05:17 +00:00
|
|
|
RPC(method string, args interface{}, reply interface{}) error
|
2018-10-19 16:04:07 +00:00
|
|
|
ACLsEnabled() bool
|
|
|
|
UseLegacyACLs() bool
|
2017-06-15 09:50:28 +00:00
|
|
|
SnapshotRPC(args *structs.SnapshotRequest, in io.Reader, out io.Writer, replyFn structs.SnapshotReplyFn) error
|
2017-05-15 14:05:17 +00:00
|
|
|
Shutdown() error
|
|
|
|
Stats() map[string]map[string]string
|
2018-06-11 19:51:17 +00:00
|
|
|
ReloadConfig(config *consul.Config) error
|
2018-05-24 14:36:42 +00:00
|
|
|
enterpriseDelegate
|
2017-05-15 14:05:17 +00:00
|
|
|
}
|
2015-02-09 17:22:51 +00:00
|
|
|
|
2017-06-21 04:43:55 +00:00
|
|
|
// notifier is called after a successful JoinLAN.
|
|
|
|
type notifier interface {
|
|
|
|
Notify(string) error
|
|
|
|
}
|
|
|
|
|
2017-05-15 14:05:17 +00:00
|
|
|
// The agent is the long running process that is run on every machine.
|
|
|
|
// It exposes an RPC interface that is used by the CLI to control the
|
|
|
|
// agent. The agent runs the query interfaces like HTTP, DNS, and RPC.
|
|
|
|
// However, it can run in either a client, or server mode. In server
|
|
|
|
// mode, it runs a full Consul server. In client-only mode, it only forwards
|
|
|
|
// requests to other Consul servers.
|
2013-12-20 01:14:46 +00:00
|
|
|
type Agent struct {
|
2017-05-23 17:04:06 +00:00
|
|
|
// config is the agent configuration.
|
2017-09-25 18:40:42 +00:00
|
|
|
config *config.RuntimeConfig
|
2013-12-20 23:33:13 +00:00
|
|
|
|
2013-12-21 00:39:32 +00:00
|
|
|
// Used for writing our logs
|
|
|
|
logger *log.Logger
|
|
|
|
|
|
|
|
// Output sink for logs
|
2017-05-19 15:51:39 +00:00
|
|
|
LogOutput io.Writer
|
2013-12-21 00:39:32 +00:00
|
|
|
|
2016-11-16 21:45:26 +00:00
|
|
|
// Used for streaming logs to
|
2017-05-19 15:51:39 +00:00
|
|
|
LogWriter *logger.LogWriter
|
2016-11-16 21:45:26 +00:00
|
|
|
|
2017-08-08 08:31:38 +00:00
|
|
|
// In-memory sink used for collecting metrics
|
|
|
|
MemSink *metrics.InmemSink
|
|
|
|
|
2017-05-15 14:05:17 +00:00
|
|
|
// delegate is either a *consul.Server or *consul.Client
|
|
|
|
// depending on the configuration
|
2017-06-15 09:42:07 +00:00
|
|
|
delegate delegate
|
2013-12-21 00:39:32 +00:00
|
|
|
|
2018-10-19 16:04:07 +00:00
|
|
|
// aclMasterAuthorizer is an object that helps manage local ACL enforcement.
|
|
|
|
aclMasterAuthorizer acl.Authorizer
|
2016-12-14 07:21:14 +00:00
|
|
|
|
2014-01-16 01:14:50 +00:00
|
|
|
// state stores a local representation of the node,
|
|
|
|
// services and checks. Used for anti-entropy.
|
2017-08-28 12:17:13 +00:00
|
|
|
State *local.State
|
2014-01-21 20:05:56 +00:00
|
|
|
|
2017-08-28 12:17:09 +00:00
|
|
|
// sync manages the synchronization of the local
|
|
|
|
// and the remote state.
|
|
|
|
sync *ae.StateSyncer
|
|
|
|
|
2018-09-27 14:00:51 +00:00
|
|
|
// syncMu and syncCh are used to coordinate agent endpoints that are blocking
|
|
|
|
// on local state during a config reload.
|
|
|
|
syncMu sync.Mutex
|
|
|
|
syncCh chan struct{}
|
|
|
|
|
2018-04-11 08:52:51 +00:00
|
|
|
// cache is the in-memory cache for data the Agent requests.
|
|
|
|
cache *cache.Cache
|
|
|
|
|
2016-08-16 07:05:55 +00:00
|
|
|
// checkReapAfter maps the check ID to a timeout after which we should
|
|
|
|
// reap its associated service
|
|
|
|
checkReapAfter map[types.CheckID]time.Duration
|
|
|
|
|
2014-01-21 20:05:56 +00:00
|
|
|
// checkMonitors maps the check ID to an associated monitor
|
2017-10-25 09:18:07 +00:00
|
|
|
checkMonitors map[types.CheckID]*checks.CheckMonitor
|
2015-01-09 22:43:24 +00:00
|
|
|
|
|
|
|
// checkHTTPs maps the check ID to an associated HTTP check
|
2017-10-25 09:18:07 +00:00
|
|
|
checkHTTPs map[types.CheckID]*checks.CheckHTTP
|
2015-01-09 22:43:24 +00:00
|
|
|
|
2015-07-23 11:45:08 +00:00
|
|
|
// checkTCPs maps the check ID to an associated TCP check
|
2017-10-25 09:18:07 +00:00
|
|
|
checkTCPs map[types.CheckID]*checks.CheckTCP
|
2015-07-23 11:45:08 +00:00
|
|
|
|
2017-12-27 04:35:22 +00:00
|
|
|
// checkGRPCs maps the check ID to an associated GRPC check
|
|
|
|
checkGRPCs map[types.CheckID]*checks.CheckGRPC
|
|
|
|
|
2015-01-09 22:43:24 +00:00
|
|
|
// checkTTLs maps the check ID to an associated check TTL
|
2017-10-25 09:18:07 +00:00
|
|
|
checkTTLs map[types.CheckID]*checks.CheckTTL
|
2015-01-09 22:43:24 +00:00
|
|
|
|
2015-10-22 22:29:13 +00:00
|
|
|
// checkDockers maps the check ID to an associated Docker Exec based check
|
2017-10-25 09:18:07 +00:00
|
|
|
checkDockers map[types.CheckID]*checks.CheckDocker
|
2015-10-22 22:29:13 +00:00
|
|
|
|
2018-06-30 13:38:56 +00:00
|
|
|
// checkAliases maps the check ID to an associated Alias checks
|
|
|
|
checkAliases map[types.CheckID]*checks.CheckAlias
|
|
|
|
|
2019-03-04 14:34:05 +00:00
|
|
|
// stateLock protects the agent state
|
|
|
|
stateLock sync.Mutex
|
2014-01-21 20:05:56 +00:00
|
|
|
|
2017-07-12 14:01:42 +00:00
|
|
|
// dockerClient is the client for performing docker health checks.
|
2017-10-25 09:18:07 +00:00
|
|
|
dockerClient *checks.DockerClient
|
2017-07-12 14:01:42 +00:00
|
|
|
|
2014-08-27 23:49:12 +00:00
|
|
|
// eventCh is used to receive user events
|
|
|
|
eventCh chan serf.UserEvent
|
|
|
|
|
2014-08-28 00:01:10 +00:00
|
|
|
// eventBuf stores the most recent events in a ring buffer
|
|
|
|
// using eventIndex as the next index to insert into. This
|
|
|
|
// is guarded by eventLock. When an insert happens, the
|
|
|
|
// eventNotify group is notified.
|
2014-08-28 17:56:30 +00:00
|
|
|
eventBuf []*UserEvent
|
2014-08-28 00:01:10 +00:00
|
|
|
eventIndex int
|
|
|
|
eventLock sync.RWMutex
|
2017-06-15 16:45:30 +00:00
|
|
|
eventNotify NotifyGroup
|
2014-08-28 00:01:10 +00:00
|
|
|
|
2016-11-30 18:29:42 +00:00
|
|
|
reloadCh chan chan error
|
|
|
|
|
2014-01-21 20:05:56 +00:00
|
|
|
shutdown bool
|
|
|
|
shutdownCh chan struct{}
|
|
|
|
shutdownLock sync.Mutex
|
2015-11-12 17:19:33 +00:00
|
|
|
|
2019-06-27 20:22:07 +00:00
|
|
|
InterruptStartCh chan struct{}
|
|
|
|
|
2017-06-21 04:43:55 +00:00
|
|
|
// joinLANNotifier is called after a successful JoinLAN.
|
|
|
|
joinLANNotifier notifier
|
|
|
|
|
2017-06-02 09:55:29 +00:00
|
|
|
// retryJoinCh transports errors from the retry join
|
|
|
|
// attempts.
|
|
|
|
retryJoinCh chan error
|
|
|
|
|
2017-06-16 07:54:09 +00:00
|
|
|
// endpoints maps unique RPC endpoint names to common ones
|
|
|
|
// to allow overriding of RPC handlers since the golang
|
|
|
|
// net/rpc server does not allow this.
|
2017-05-22 22:00:14 +00:00
|
|
|
endpoints map[string]string
|
|
|
|
endpointsLock sync.RWMutex
|
2017-05-19 09:53:41 +00:00
|
|
|
|
|
|
|
// dnsServer provides the DNS API
|
|
|
|
dnsServers []*DNSServer
|
|
|
|
|
|
|
|
// httpServers provides the HTTP API on various endpoints
|
|
|
|
httpServers []*HTTPServer
|
|
|
|
|
|
|
|
// wgServers is the wait group for all HTTP and DNS servers
|
|
|
|
wgServers sync.WaitGroup
|
2017-06-24 19:52:41 +00:00
|
|
|
|
|
|
|
// watchPlans tracks all the currently-running watch plans for the
|
|
|
|
// agent.
|
|
|
|
watchPlans []*watch.Plan
|
2017-07-26 18:03:43 +00:00
|
|
|
|
|
|
|
// tokens holds ACL tokens initially from the configuration, but can
|
|
|
|
// be updated at runtime, so should always be used instead of going to
|
|
|
|
// the configuration directly.
|
|
|
|
tokens *token.Store
|
2018-05-02 18:38:18 +00:00
|
|
|
|
|
|
|
// proxyManager is the proxy process manager for managed Connect proxies.
|
2018-09-06 10:50:38 +00:00
|
|
|
proxyManager *proxyprocess.Manager
|
2018-07-17 20:16:43 +00:00
|
|
|
|
2018-10-03 19:37:53 +00:00
|
|
|
// proxyConfig is the manager for proxy service (Kind = connect-proxy)
|
|
|
|
// configuration state. This ensures all state needed by a proxy registration
|
|
|
|
// is maintained in cache and handles pushing updates to that state into XDS
|
|
|
|
// server to be pushed out to Envoy. This is NOT related to managed proxies
|
|
|
|
// directly.
|
|
|
|
proxyConfig *proxycfg.Manager
|
|
|
|
|
2019-04-24 13:46:30 +00:00
|
|
|
// serviceManager is the manager for combining local service registrations with
|
|
|
|
// the centrally configured proxy/service defaults.
|
2019-04-18 04:35:19 +00:00
|
|
|
serviceManager *ServiceManager
|
|
|
|
|
2018-10-03 19:37:53 +00:00
|
|
|
// xdsServer is the Server instance that serves xDS gRPC API.
|
|
|
|
xdsServer *xds.Server
|
|
|
|
|
|
|
|
// grpcServer is the server instance used currently to serve xDS API for
|
|
|
|
// Envoy.
|
|
|
|
grpcServer *grpc.Server
|
2019-02-26 15:52:07 +00:00
|
|
|
|
2019-02-27 09:14:59 +00:00
|
|
|
// tlsConfigurator is the central instance to provide a *tls.Config
|
|
|
|
// based on the current consul configuration.
|
2019-02-26 15:52:07 +00:00
|
|
|
tlsConfigurator *tlsutil.Configurator
|
2019-02-27 19:28:31 +00:00
|
|
|
|
|
|
|
// persistedTokensLock is used to synchronize access to the persisted token
|
|
|
|
// store within the data directory. This will prevent loading while writing as
|
|
|
|
// well as multiple concurrent writes.
|
|
|
|
persistedTokensLock sync.RWMutex
|
2013-12-20 01:14:46 +00:00
|
|
|
}
|
|
|
|
|
2019-06-27 20:22:07 +00:00
|
|
|
func New(c *config.RuntimeConfig, logger *log.Logger) (*Agent, error) {
|
2017-05-19 09:53:41 +00:00
|
|
|
if c.Datacenter == "" {
|
2013-12-24 00:20:51 +00:00
|
|
|
return nil, fmt.Errorf("Must configure a Datacenter")
|
|
|
|
}
|
2017-05-19 09:53:41 +00:00
|
|
|
if c.DataDir == "" && !c.DevMode {
|
2013-12-24 00:20:51 +00:00
|
|
|
return nil, fmt.Errorf("Must configure a DataDir")
|
|
|
|
}
|
|
|
|
|
2019-06-27 20:22:07 +00:00
|
|
|
a := Agent{
|
|
|
|
config: c,
|
|
|
|
checkReapAfter: make(map[types.CheckID]time.Duration),
|
|
|
|
checkMonitors: make(map[types.CheckID]*checks.CheckMonitor),
|
|
|
|
checkTTLs: make(map[types.CheckID]*checks.CheckTTL),
|
|
|
|
checkHTTPs: make(map[types.CheckID]*checks.CheckHTTP),
|
|
|
|
checkTCPs: make(map[types.CheckID]*checks.CheckTCP),
|
|
|
|
checkGRPCs: make(map[types.CheckID]*checks.CheckGRPC),
|
|
|
|
checkDockers: make(map[types.CheckID]*checks.CheckDocker),
|
|
|
|
checkAliases: make(map[types.CheckID]*checks.CheckAlias),
|
|
|
|
eventCh: make(chan serf.UserEvent, 1024),
|
|
|
|
eventBuf: make([]*UserEvent, 256),
|
|
|
|
joinLANNotifier: &systemd.Notifier{},
|
|
|
|
reloadCh: make(chan chan error),
|
|
|
|
retryJoinCh: make(chan error),
|
|
|
|
shutdownCh: make(chan struct{}),
|
|
|
|
InterruptStartCh: make(chan struct{}),
|
|
|
|
endpoints: make(map[string]string),
|
|
|
|
tokens: new(token.Store),
|
|
|
|
logger: logger,
|
|
|
|
}
|
|
|
|
a.serviceManager = NewServiceManager(&a)
|
2017-06-30 21:56:05 +00:00
|
|
|
|
2018-10-19 16:04:07 +00:00
|
|
|
if err := a.initializeACLs(); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2019-06-27 20:22:07 +00:00
|
|
|
// Retrieve or generate the node ID before setting up the rest of the
|
|
|
|
// agent, which depends on it.
|
|
|
|
if err := a.setupNodeID(c); err != nil {
|
|
|
|
return nil, fmt.Errorf("Failed to setup node ID: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return &a, nil
|
2017-05-19 09:53:41 +00:00
|
|
|
}
|
2016-12-02 05:35:38 +00:00
|
|
|
|
2017-08-28 12:17:13 +00:00
|
|
|
func LocalConfig(cfg *config.RuntimeConfig) local.Config {
|
|
|
|
lc := local.Config{
|
|
|
|
AdvertiseAddr: cfg.AdvertiseAddrLAN.String(),
|
|
|
|
CheckUpdateInterval: cfg.CheckUpdateInterval,
|
|
|
|
Datacenter: cfg.Datacenter,
|
|
|
|
DiscardCheckOutput: cfg.DiscardCheckOutput,
|
|
|
|
NodeID: cfg.NodeID,
|
|
|
|
NodeName: cfg.NodeName,
|
|
|
|
TaggedAddresses: map[string]string{},
|
2018-04-16 15:00:20 +00:00
|
|
|
ProxyBindMinPort: cfg.ConnectProxyBindMinPort,
|
|
|
|
ProxyBindMaxPort: cfg.ConnectProxyBindMaxPort,
|
2017-08-28 12:17:13 +00:00
|
|
|
}
|
|
|
|
for k, v := range cfg.TaggedAddresses {
|
|
|
|
lc.TaggedAddresses[k] = v
|
|
|
|
}
|
|
|
|
return lc
|
|
|
|
}
|
|
|
|
|
2018-09-13 14:06:04 +00:00
|
|
|
func (a *Agent) setupProxyManager() error {
|
|
|
|
acfg, err := a.config.APIConfig(true)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("[INFO] agent: Connect managed proxies are disabled due to providing an invalid HTTP configuration")
|
|
|
|
}
|
2018-09-06 10:50:38 +00:00
|
|
|
a.proxyManager = proxyprocess.NewManager()
|
2018-09-13 14:06:04 +00:00
|
|
|
a.proxyManager.AllowRoot = a.config.ConnectProxyAllowManagedRoot
|
|
|
|
a.proxyManager.State = a.State
|
|
|
|
a.proxyManager.Logger = a.logger
|
|
|
|
if a.config.DataDir != "" {
|
|
|
|
// DataDir is required for all non-dev mode agents, but we want
|
|
|
|
// to allow setting the data dir for demos and so on for the agent,
|
|
|
|
// so do the check above instead.
|
|
|
|
a.proxyManager.DataDir = filepath.Join(a.config.DataDir, "proxy")
|
|
|
|
|
|
|
|
// Restore from our snapshot (if it exists)
|
|
|
|
if err := a.proxyManager.Restore(a.proxyManager.SnapshotPath()); err != nil {
|
|
|
|
a.logger.Printf("[WARN] agent: error restoring proxy state: %s", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
a.proxyManager.ProxyEnv = acfg.GenerateEnv()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-05-19 09:53:41 +00:00
|
|
|
func (a *Agent) Start() error {
|
2019-03-04 14:34:05 +00:00
|
|
|
a.stateLock.Lock()
|
|
|
|
defer a.stateLock.Unlock()
|
|
|
|
|
2017-05-19 09:53:41 +00:00
|
|
|
c := a.config
|
|
|
|
|
2018-03-27 19:00:33 +00:00
|
|
|
// Warn if the node name is incompatible with DNS
|
|
|
|
if InvalidDnsRe.MatchString(a.config.NodeName) {
|
|
|
|
a.logger.Printf("[WARN] agent: Node name %q will not be discoverable "+
|
|
|
|
"via DNS due to invalid characters. Valid characters include "+
|
|
|
|
"all alpha-numerics and dashes.", a.config.NodeName)
|
2018-03-27 20:31:27 +00:00
|
|
|
} else if len(a.config.NodeName) > MaxDNSLabelLength {
|
2018-03-27 19:00:33 +00:00
|
|
|
a.logger.Printf("[WARN] agent: Node name %q will not be discoverable "+
|
|
|
|
"via DNS due to it being too long. Valid lengths are between "+
|
|
|
|
"1 and 63 bytes.", a.config.NodeName)
|
|
|
|
}
|
2018-03-27 20:31:27 +00:00
|
|
|
|
2019-02-27 19:28:31 +00:00
|
|
|
// load the tokens - this requires the logger to be setup
|
|
|
|
// which is why we can't do this in New
|
|
|
|
a.loadTokens(a.config)
|
|
|
|
|
2017-06-29 12:35:55 +00:00
|
|
|
// create the local state
|
2017-08-30 10:25:49 +00:00
|
|
|
a.State = local.NewState(LocalConfig(c), a.logger, a.tokens)
|
2017-08-28 12:17:09 +00:00
|
|
|
|
|
|
|
// create the state synchronization manager which performs
|
|
|
|
// regular and on-demand state synchronizations (anti-entropy).
|
2017-10-19 09:20:24 +00:00
|
|
|
a.sync = ae.NewStateSyncer(a.State, c.AEInterval, a.shutdownCh, a.logger)
|
2017-06-29 12:35:55 +00:00
|
|
|
|
2018-04-22 21:00:32 +00:00
|
|
|
// create the cache
|
2018-04-11 08:52:51 +00:00
|
|
|
a.cache = cache.New(nil)
|
|
|
|
|
2017-06-29 12:35:55 +00:00
|
|
|
// create the config for the rpc server/client
|
|
|
|
consulCfg, err := a.consulConfig()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-08-28 12:17:09 +00:00
|
|
|
// ServerUp is used to inform that a new consul server is now
|
|
|
|
// up. This can be used to speed up the sync process if we are blocking
|
|
|
|
// waiting to discover a consul server
|
2017-08-30 10:25:49 +00:00
|
|
|
consulCfg.ServerUp = a.sync.SyncFull.Trigger
|
2017-06-29 12:35:55 +00:00
|
|
|
|
2019-03-13 09:29:06 +00:00
|
|
|
tlsConfigurator, err := tlsutil.NewConfigurator(c.ToTLSUtilConfig(), a.logger)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
a.tlsConfigurator = tlsConfigurator
|
2019-02-26 15:52:07 +00:00
|
|
|
|
2016-08-16 07:05:55 +00:00
|
|
|
// Setup either the client or the server.
|
2017-09-25 18:40:42 +00:00
|
|
|
if c.ServerMode {
|
2019-02-26 15:52:07 +00:00
|
|
|
server, err := consul.NewServerLogger(consulCfg, a.logger, a.tokens, a.tlsConfigurator)
|
2017-05-19 09:53:41 +00:00
|
|
|
if err != nil {
|
2017-06-29 12:35:55 +00:00
|
|
|
return fmt.Errorf("Failed to start Consul server: %v", err)
|
2017-05-19 09:53:41 +00:00
|
|
|
}
|
|
|
|
a.delegate = server
|
2013-12-20 23:33:13 +00:00
|
|
|
} else {
|
2019-02-26 15:52:07 +00:00
|
|
|
client, err := consul.NewClientLogger(consulCfg, a.logger, a.tlsConfigurator)
|
2017-05-19 09:53:41 +00:00
|
|
|
if err != nil {
|
2017-06-29 12:35:55 +00:00
|
|
|
return fmt.Errorf("Failed to start Consul client: %v", err)
|
2017-05-19 09:53:41 +00:00
|
|
|
}
|
|
|
|
a.delegate = client
|
2013-12-20 23:33:13 +00:00
|
|
|
}
|
|
|
|
|
2017-08-30 10:25:49 +00:00
|
|
|
// the staggering of the state syncing depends on the cluster size.
|
|
|
|
a.sync.ClusterSize = func() int { return len(a.delegate.LANMembers()) }
|
|
|
|
|
|
|
|
// link the state with the consul server/client and the state syncer
|
|
|
|
// via callbacks. After several attempts this was easier than using
|
|
|
|
// channels since the event notification needs to be non-blocking
|
|
|
|
// and that should be hidden in the state syncer implementation.
|
|
|
|
a.State.Delegate = a.delegate
|
|
|
|
a.State.TriggerSyncChanges = a.sync.SyncChanges.Trigger
|
|
|
|
|
2018-04-22 21:00:32 +00:00
|
|
|
// Register the cache. We do this much later so the delegate is
|
|
|
|
// populated from above.
|
|
|
|
a.registerCache()
|
|
|
|
|
2019-06-27 20:22:07 +00:00
|
|
|
if a.config.AutoEncryptTLS && !a.config.ServerMode {
|
|
|
|
reply, err := a.setupClientAutoEncrypt()
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("AutoEncrypt failed: %s", err)
|
|
|
|
}
|
|
|
|
rootsReq, leafReq, err := a.setupClientAutoEncryptCache(reply)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("AutoEncrypt failed: %s", err)
|
|
|
|
}
|
|
|
|
if err = a.setupClientAutoEncryptWatching(rootsReq, leafReq); err != nil {
|
|
|
|
return fmt.Errorf("AutoEncrypt failed: %s", err)
|
|
|
|
}
|
|
|
|
a.logger.Printf("[INFO] AutoEncrypt: upgraded to TLS")
|
|
|
|
}
|
|
|
|
|
2017-01-05 22:10:26 +00:00
|
|
|
// Load checks/services/metadata.
|
2017-05-19 09:53:41 +00:00
|
|
|
if err := a.loadServices(c); err != nil {
|
|
|
|
return err
|
2014-11-24 08:36:03 +00:00
|
|
|
}
|
2018-04-16 15:00:20 +00:00
|
|
|
if err := a.loadProxies(c); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-07-17 19:06:50 +00:00
|
|
|
if err := a.loadChecks(c, nil); err != nil {
|
2017-05-19 09:53:41 +00:00
|
|
|
return err
|
2014-11-24 08:36:03 +00:00
|
|
|
}
|
2017-05-19 09:53:41 +00:00
|
|
|
if err := a.loadMetadata(c); err != nil {
|
|
|
|
return err
|
2017-01-05 22:10:26 +00:00
|
|
|
}
|
2014-11-24 08:36:03 +00:00
|
|
|
|
2018-05-02 18:38:18 +00:00
|
|
|
// create the proxy process manager and start it. This is purposely
|
|
|
|
// done here after the local state above is loaded in so we can have
|
|
|
|
// a more accurate initial state view.
|
2018-06-15 22:35:15 +00:00
|
|
|
if !c.ConnectTestDisableManagedProxies {
|
2018-09-13 14:06:04 +00:00
|
|
|
if err := a.setupProxyManager(); err != nil {
|
|
|
|
a.logger.Printf(err.Error())
|
|
|
|
} else {
|
|
|
|
go a.proxyManager.Run()
|
2018-05-04 01:25:32 +00:00
|
|
|
}
|
2018-05-04 00:51:49 +00:00
|
|
|
}
|
2018-05-02 18:38:18 +00:00
|
|
|
|
2018-10-03 19:37:53 +00:00
|
|
|
// Start the proxy config manager.
|
|
|
|
a.proxyConfig, err = proxycfg.NewManager(proxycfg.ManagerConfig{
|
|
|
|
Cache: a.cache,
|
|
|
|
Logger: a.logger,
|
|
|
|
State: a.State,
|
|
|
|
Source: &structs.QuerySource{
|
|
|
|
Node: a.config.NodeName,
|
|
|
|
Datacenter: a.config.Datacenter,
|
|
|
|
Segment: a.config.SegmentName,
|
|
|
|
},
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2018-10-04 13:08:12 +00:00
|
|
|
go func() {
|
|
|
|
if err := a.proxyConfig.Run(); err != nil {
|
|
|
|
a.logger.Printf("[ERR] Proxy Config Manager exited: %s", err)
|
|
|
|
}
|
|
|
|
}()
|
2018-10-03 19:37:53 +00:00
|
|
|
|
2016-08-16 07:05:55 +00:00
|
|
|
// Start watching for critical services to deregister, based on their
|
|
|
|
// checks.
|
2017-05-19 09:53:41 +00:00
|
|
|
go a.reapServices()
|
2016-08-16 07:05:55 +00:00
|
|
|
|
|
|
|
// Start handling events.
|
2017-05-19 09:53:41 +00:00
|
|
|
go a.handleEvents()
|
2014-08-27 23:49:12 +00:00
|
|
|
|
2015-06-06 03:31:33 +00:00
|
|
|
// Start sending network coordinate to the server.
|
2017-05-19 09:53:41 +00:00
|
|
|
if !c.DisableCoordinates {
|
|
|
|
go a.sendCoordinate()
|
2015-06-06 03:31:33 +00:00
|
|
|
}
|
|
|
|
|
2016-08-16 07:05:55 +00:00
|
|
|
// Write out the PID file if necessary.
|
2017-05-19 09:53:41 +00:00
|
|
|
if err := a.storePid(); err != nil {
|
|
|
|
return err
|
2014-05-06 16:57:53 +00:00
|
|
|
}
|
2014-05-06 03:29:50 +00:00
|
|
|
|
2017-05-24 13:22:56 +00:00
|
|
|
// start DNS servers
|
|
|
|
if err := a.listenAndServeDNS(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-11-07 23:06:59 +00:00
|
|
|
// Create listeners and unstarted servers; see comment on listenHTTP why
|
|
|
|
// we are doing this.
|
|
|
|
servers, err := a.listenHTTP()
|
2017-05-24 13:22:56 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-11-07 23:06:59 +00:00
|
|
|
// Start HTTP and HTTPS servers.
|
|
|
|
for _, srv := range servers {
|
|
|
|
if err := a.serveHTTP(srv); err != nil {
|
2017-05-24 13:22:56 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
a.httpServers = append(a.httpServers, srv)
|
|
|
|
}
|
2017-06-02 09:55:29 +00:00
|
|
|
|
2018-10-03 19:37:53 +00:00
|
|
|
// Start gRPC server.
|
|
|
|
if err := a.listenAndServeGRPC(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-06-09 08:03:49 +00:00
|
|
|
// register watches
|
2017-06-24 19:52:41 +00:00
|
|
|
if err := a.reloadWatches(a.config); err != nil {
|
2017-06-09 08:03:49 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-06-02 09:55:29 +00:00
|
|
|
// start retry join
|
2017-08-19 08:44:19 +00:00
|
|
|
go a.retryJoinLAN()
|
|
|
|
go a.retryJoinWAN()
|
2017-06-02 09:55:29 +00:00
|
|
|
|
2017-05-24 13:22:56 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-06-27 20:22:07 +00:00
|
|
|
func (a *Agent) setupClientAutoEncrypt() (*structs.SignedResponse, error) {
|
|
|
|
client := a.delegate.(*consul.Client)
|
|
|
|
|
|
|
|
addrs := a.config.StartJoinAddrsLAN
|
|
|
|
disco, err := newDiscover()
|
|
|
|
if err != nil && len(addrs) == 0 {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
addrs = append(addrs, retryJoinAddrs(disco, "LAN", a.config.RetryJoinLAN, a.logger)...)
|
|
|
|
|
|
|
|
reply, priv, err := client.RequestAutoEncryptCerts(addrs, a.config.ServerPort, a.tokens.AgentToken(), a.InterruptStartCh)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
connectCAPems := []string{}
|
|
|
|
for _, ca := range reply.ConnectCARoots.Roots {
|
|
|
|
connectCAPems = append(connectCAPems, ca.RootCert)
|
|
|
|
}
|
|
|
|
if err := a.tlsConfigurator.UpdateAutoEncrypt(reply.ManualCARoots, connectCAPems, reply.IssuedCert.CertPEM, priv, reply.VerifyServerHostname); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return reply, nil
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
func (a *Agent) setupClientAutoEncryptCache(reply *structs.SignedResponse) (*structs.DCSpecificRequest, *cachetype.ConnectCALeafRequest, error) {
|
|
|
|
rootsReq := &structs.DCSpecificRequest{
|
|
|
|
Datacenter: a.config.Datacenter,
|
|
|
|
QueryOptions: structs.QueryOptions{Token: a.tokens.AgentToken()},
|
|
|
|
}
|
|
|
|
|
|
|
|
// prepolutate roots cache
|
|
|
|
rootRes := cache.FetchResult{Value: &reply.ConnectCARoots, Index: reply.ConnectCARoots.QueryMeta.Index}
|
|
|
|
if err := a.cache.Prepopulate(cachetype.ConnectCARootName, rootRes, a.config.Datacenter, a.tokens.AgentToken(), rootsReq.CacheInfo().Key); err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
leafReq := &cachetype.ConnectCALeafRequest{
|
|
|
|
Datacenter: a.config.Datacenter,
|
|
|
|
Token: a.tokens.AgentToken(),
|
|
|
|
Agent: a.config.NodeName,
|
|
|
|
}
|
|
|
|
|
|
|
|
// prepolutate leaf cache
|
|
|
|
certRes := cache.FetchResult{Value: &reply.IssuedCert, Index: reply.ConnectCARoots.QueryMeta.Index}
|
|
|
|
if err := a.cache.Prepopulate(cachetype.ConnectCALeafName, certRes, a.config.Datacenter, a.tokens.AgentToken(), leafReq.Key()); err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
return rootsReq, leafReq, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (a *Agent) setupClientAutoEncryptWatching(rootsReq *structs.DCSpecificRequest, leafReq *cachetype.ConnectCALeafRequest) error {
|
|
|
|
// setup watches
|
|
|
|
ch := make(chan cache.UpdateEvent, 10)
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
|
|
|
|
// Watch for root changes
|
|
|
|
err := a.cache.Notify(ctx, cachetype.ConnectCARootName, rootsReq, rootsWatchID, ch)
|
|
|
|
if err != nil {
|
|
|
|
cancel()
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Watch the leaf cert
|
|
|
|
err = a.cache.Notify(ctx, cachetype.ConnectCALeafName, leafReq, leafWatchID, ch)
|
|
|
|
if err != nil {
|
|
|
|
cancel()
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Setup actions in case the watches are firing.
|
|
|
|
go func() {
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-a.shutdownCh:
|
|
|
|
cancel()
|
|
|
|
return
|
|
|
|
case <-ctx.Done():
|
|
|
|
return
|
|
|
|
case u := <-ch:
|
|
|
|
switch u.CorrelationID {
|
|
|
|
case rootsWatchID:
|
|
|
|
roots, ok := u.Result.(*structs.IndexedCARoots)
|
|
|
|
if !ok {
|
|
|
|
err := fmt.Errorf("invalid type for roots response: %T", u.Result)
|
|
|
|
a.logger.Printf("[ERR] %s watch error: %s", u.CorrelationID, err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
pems := []string{}
|
|
|
|
for _, root := range roots.Roots {
|
|
|
|
pems = append(pems, root.RootCert)
|
|
|
|
}
|
|
|
|
a.tlsConfigurator.UpdateAutoEncryptCA(pems)
|
|
|
|
case leafWatchID:
|
|
|
|
leaf, ok := u.Result.(*structs.IssuedCert)
|
|
|
|
if !ok {
|
|
|
|
err := fmt.Errorf("invalid type for leaf response: %T", u.Result)
|
|
|
|
a.logger.Printf("[ERR] %s watch error: %s", u.CorrelationID, err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
a.tlsConfigurator.UpdateAutoEncryptCert(leaf.CertPEM, leaf.PrivateKeyPEM)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
// Setup safety net in case the auto_encrypt cert doesn't get renewed
|
|
|
|
// in time. The agent would be stuck in that case because the watches
|
|
|
|
// never use the AutoEncrypt.Sign endpoint.
|
|
|
|
go func() {
|
|
|
|
for {
|
|
|
|
|
|
|
|
// Check 10sec after cert expires. The agent cache
|
|
|
|
// should be handling the expiration and renew before
|
|
|
|
// it.
|
|
|
|
// If there is no cert, AutoEncryptCertNotAfter returns
|
|
|
|
// a value in the past which immediately triggers the
|
|
|
|
// renew, but this case shouldn't happen because at
|
|
|
|
// this point, auto_encrypt was just being setup
|
|
|
|
// successfully.
|
|
|
|
interval := a.tlsConfigurator.AutoEncryptCertNotAfter().Sub(time.Now().Add(10 * time.Second))
|
|
|
|
a.logger.Printf("[DEBUG] AutoEncrypt: client certificate expiration check in %s", interval)
|
|
|
|
select {
|
|
|
|
case <-a.shutdownCh:
|
|
|
|
return
|
|
|
|
case <-time.After(interval):
|
|
|
|
// check auto encrypt client cert expiration
|
|
|
|
if a.tlsConfigurator.AutoEncryptCertExpired() {
|
|
|
|
a.logger.Printf("[DEBUG] AutoEncrypt: client certificate expired.")
|
|
|
|
reply, err := a.setupClientAutoEncrypt()
|
|
|
|
if err != nil {
|
|
|
|
a.logger.Printf("[ERR] AutoEncrypt: client certificate expired, failed to renew: %s", err)
|
|
|
|
// in case of an error, try again in one minute
|
|
|
|
interval = time.Minute
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
_, _, err = a.setupClientAutoEncryptCache(reply)
|
|
|
|
if err != nil {
|
|
|
|
a.logger.Printf("[ERR] AutoEncrypt: client certificate expired, failed to populate cache: %s", err)
|
|
|
|
// in case of an error, try again in one minute
|
|
|
|
interval = time.Minute
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-10-03 19:37:53 +00:00
|
|
|
func (a *Agent) listenAndServeGRPC() error {
|
|
|
|
if len(a.config.GRPCAddrs) < 1 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
a.xdsServer = &xds.Server{
|
2018-10-19 16:04:07 +00:00
|
|
|
Logger: a.logger,
|
|
|
|
CfgMgr: a.proxyConfig,
|
|
|
|
Authz: a,
|
|
|
|
ResolveToken: a.resolveToken,
|
2018-10-03 19:37:53 +00:00
|
|
|
}
|
2019-01-11 15:43:18 +00:00
|
|
|
a.xdsServer.Initialize()
|
|
|
|
|
2018-10-03 19:37:53 +00:00
|
|
|
var err error
|
2019-02-13 17:49:54 +00:00
|
|
|
if a.config.HTTPSPort > 0 {
|
|
|
|
// gRPC uses the same TLS settings as the HTTPS API. If HTTPS is
|
|
|
|
// enabled then gRPC will require HTTPS as well.
|
|
|
|
a.grpcServer, err = a.xdsServer.GRPCServer(a.config.CertFile, a.config.KeyFile)
|
|
|
|
} else {
|
|
|
|
a.grpcServer, err = a.xdsServer.GRPCServer("", "")
|
|
|
|
}
|
2018-10-03 19:37:53 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
ln, err := a.startListeners(a.config.GRPCAddrs)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, l := range ln {
|
|
|
|
go func(innerL net.Listener) {
|
|
|
|
a.logger.Printf("[INFO] agent: Started gRPC server on %s (%s)",
|
|
|
|
innerL.Addr().String(), innerL.Addr().Network())
|
|
|
|
err := a.grpcServer.Serve(innerL)
|
|
|
|
if err != nil {
|
|
|
|
a.logger.Printf("[ERR] gRPC server failed: %s", err)
|
|
|
|
}
|
|
|
|
}(l)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-05-24 13:22:56 +00:00
|
|
|
func (a *Agent) listenAndServeDNS() error {
|
2017-09-25 18:40:42 +00:00
|
|
|
notif := make(chan net.Addr, len(a.config.DNSAddrs))
|
2018-09-07 14:48:29 +00:00
|
|
|
errCh := make(chan error, len(a.config.DNSAddrs))
|
2017-09-25 18:40:42 +00:00
|
|
|
for _, addr := range a.config.DNSAddrs {
|
2017-05-24 13:22:56 +00:00
|
|
|
// create server
|
|
|
|
s, err := NewDNSServer(a)
|
2017-05-19 09:53:41 +00:00
|
|
|
if err != nil {
|
2017-05-24 13:22:56 +00:00
|
|
|
return err
|
2017-05-19 09:53:41 +00:00
|
|
|
}
|
2017-05-24 13:22:56 +00:00
|
|
|
a.dnsServers = append(a.dnsServers, s)
|
|
|
|
|
|
|
|
// start server
|
|
|
|
a.wgServers.Add(1)
|
2017-09-25 18:40:42 +00:00
|
|
|
go func(addr net.Addr) {
|
2017-05-24 13:22:56 +00:00
|
|
|
defer a.wgServers.Done()
|
2017-09-25 18:40:42 +00:00
|
|
|
err := s.ListenAndServe(addr.Network(), addr.String(), func() { notif <- addr })
|
2017-05-24 13:22:56 +00:00
|
|
|
if err != nil && !strings.Contains(err.Error(), "accept") {
|
2018-09-07 14:48:29 +00:00
|
|
|
errCh <- err
|
2017-05-24 13:22:56 +00:00
|
|
|
}
|
2017-09-25 18:40:42 +00:00
|
|
|
}(addr)
|
2017-05-19 09:53:41 +00:00
|
|
|
}
|
|
|
|
|
2017-05-24 13:22:56 +00:00
|
|
|
// wait for servers to be up
|
|
|
|
timeout := time.After(time.Second)
|
2018-09-07 14:48:29 +00:00
|
|
|
var merr *multierror.Error
|
2017-09-25 18:40:42 +00:00
|
|
|
for range a.config.DNSAddrs {
|
2017-05-24 13:22:56 +00:00
|
|
|
select {
|
2017-09-25 18:40:42 +00:00
|
|
|
case addr := <-notif:
|
|
|
|
a.logger.Printf("[INFO] agent: Started DNS server %s (%s)", addr.String(), addr.Network())
|
2019-02-27 19:28:31 +00:00
|
|
|
|
2018-09-07 14:48:29 +00:00
|
|
|
case err := <-errCh:
|
|
|
|
merr = multierror.Append(merr, err)
|
2017-05-24 13:22:56 +00:00
|
|
|
case <-timeout:
|
2018-09-07 14:48:29 +00:00
|
|
|
merr = multierror.Append(merr, fmt.Errorf("agent: timeout starting DNS servers"))
|
|
|
|
break
|
2017-05-24 13:22:56 +00:00
|
|
|
}
|
|
|
|
}
|
2018-09-07 14:48:29 +00:00
|
|
|
return merr.ErrorOrNil()
|
2017-05-19 09:53:41 +00:00
|
|
|
}
|
|
|
|
|
2018-10-03 19:37:53 +00:00
|
|
|
func (a *Agent) startListeners(addrs []net.Addr) ([]net.Listener, error) {
|
|
|
|
var ln []net.Listener
|
|
|
|
for _, addr := range addrs {
|
|
|
|
var l net.Listener
|
|
|
|
var err error
|
|
|
|
|
|
|
|
switch x := addr.(type) {
|
|
|
|
case *net.UnixAddr:
|
|
|
|
l, err = a.listenSocket(x.Name)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
case *net.TCPAddr:
|
|
|
|
l, err = net.Listen("tcp", x.String())
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
l = &tcpKeepAliveListener{l.(*net.TCPListener)}
|
|
|
|
|
|
|
|
default:
|
|
|
|
return nil, fmt.Errorf("unsupported address type %T", addr)
|
|
|
|
}
|
|
|
|
ln = append(ln, l)
|
|
|
|
}
|
|
|
|
return ln, nil
|
|
|
|
}
|
|
|
|
|
2017-05-24 13:22:56 +00:00
|
|
|
// listenHTTP binds listeners to the provided addresses and also returns
|
|
|
|
// pre-configured HTTP servers which are not yet started. The motivation is
|
|
|
|
// that in the current startup/shutdown setup we de-couple the listener
|
|
|
|
// creation from the server startup assuming that if any of the listeners
|
|
|
|
// cannot be bound we fail immediately and later failures do not occur.
|
|
|
|
// Therefore, starting a server with a running listener is assumed to not
|
|
|
|
// produce an error.
|
|
|
|
//
|
|
|
|
// The second motivation is that an HTTPS server needs to use the same TLSConfig
|
|
|
|
// on both the listener and the HTTP server. When listeners and servers are
|
|
|
|
// created at different times this becomes difficult to handle without keeping
|
|
|
|
// the TLS configuration somewhere or recreating it.
|
|
|
|
//
|
|
|
|
// This approach should ultimately be refactored to the point where we just
|
|
|
|
// start the server and any error should trigger a proper shutdown of the agent.
|
2017-11-07 23:06:59 +00:00
|
|
|
func (a *Agent) listenHTTP() ([]*HTTPServer, error) {
|
2017-05-19 09:53:41 +00:00
|
|
|
var ln []net.Listener
|
2017-11-07 23:06:59 +00:00
|
|
|
var servers []*HTTPServer
|
2017-09-25 18:40:42 +00:00
|
|
|
start := func(proto string, addrs []net.Addr) error {
|
2018-10-03 19:37:53 +00:00
|
|
|
listeners, err := a.startListeners(addrs)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-05-24 13:22:56 +00:00
|
|
|
|
2018-10-03 19:37:53 +00:00
|
|
|
for _, l := range listeners {
|
|
|
|
var tlscfg *tls.Config
|
|
|
|
_, isTCP := l.(*tcpKeepAliveListener)
|
|
|
|
if isTCP && proto == "https" {
|
2019-03-13 09:29:06 +00:00
|
|
|
tlscfg = a.tlsConfigurator.IncomingHTTPSConfig()
|
2018-10-03 19:37:53 +00:00
|
|
|
l = tls.NewListener(l, tlscfg)
|
2017-05-19 09:53:41 +00:00
|
|
|
}
|
2017-11-07 23:06:59 +00:00
|
|
|
srv := &HTTPServer{
|
|
|
|
Server: &http.Server{
|
|
|
|
Addr: l.Addr().String(),
|
|
|
|
TLSConfig: tlscfg,
|
|
|
|
},
|
|
|
|
ln: l,
|
|
|
|
agent: a,
|
|
|
|
blacklist: NewBlacklist(a.config.HTTPBlockEndpoints),
|
|
|
|
proto: proto,
|
|
|
|
}
|
|
|
|
srv.Server.Handler = srv.handler(a.config.EnableDebug)
|
|
|
|
|
|
|
|
// This will enable upgrading connections to HTTP/2 as
|
|
|
|
// part of TLS negotiation.
|
|
|
|
if proto == "https" {
|
|
|
|
err = http2.ConfigureServer(srv.Server, nil)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-03 19:37:53 +00:00
|
|
|
ln = append(ln, l)
|
2017-11-07 23:06:59 +00:00
|
|
|
servers = append(servers, srv)
|
2017-05-19 09:53:41 +00:00
|
|
|
}
|
2017-09-25 18:40:42 +00:00
|
|
|
return nil
|
|
|
|
}
|
2017-05-24 13:22:56 +00:00
|
|
|
|
2017-09-25 18:40:42 +00:00
|
|
|
if err := start("http", a.config.HTTPAddrs); err != nil {
|
|
|
|
for _, l := range ln {
|
|
|
|
l.Close()
|
2017-05-31 08:24:32 +00:00
|
|
|
}
|
2017-09-25 18:40:42 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if err := start("https", a.config.HTTPSAddrs); err != nil {
|
|
|
|
for _, l := range ln {
|
|
|
|
l.Close()
|
|
|
|
}
|
|
|
|
return nil, err
|
2017-05-19 09:53:41 +00:00
|
|
|
}
|
2017-11-07 23:06:59 +00:00
|
|
|
return servers, nil
|
2017-05-24 13:22:56 +00:00
|
|
|
}
|
2017-05-19 09:53:41 +00:00
|
|
|
|
2017-05-30 23:05:21 +00:00
|
|
|
// tcpKeepAliveListener sets TCP keep-alive timeouts on accepted
|
2017-11-07 23:06:59 +00:00
|
|
|
// connections. It's used so dead TCP connections eventually go away.
|
2017-05-30 23:05:21 +00:00
|
|
|
type tcpKeepAliveListener struct {
|
|
|
|
*net.TCPListener
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) {
|
|
|
|
tc, err := ln.AcceptTCP()
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
tc.SetKeepAlive(true)
|
|
|
|
tc.SetKeepAlivePeriod(30 * time.Second)
|
|
|
|
return tc, nil
|
|
|
|
}
|
|
|
|
|
2017-09-25 18:40:42 +00:00
|
|
|
func (a *Agent) listenSocket(path string) (net.Listener, error) {
|
2017-05-24 13:22:56 +00:00
|
|
|
if _, err := os.Stat(path); !os.IsNotExist(err) {
|
|
|
|
a.logger.Printf("[WARN] agent: Replacing socket %q", path)
|
|
|
|
}
|
|
|
|
if err := os.Remove(path); err != nil && !os.IsNotExist(err) {
|
|
|
|
return nil, fmt.Errorf("error removing socket file: %s", err)
|
|
|
|
}
|
|
|
|
l, err := net.Listen("unix", path)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-09-25 18:40:42 +00:00
|
|
|
user, group, mode := a.config.UnixSocketUser, a.config.UnixSocketGroup, a.config.UnixSocketMode
|
|
|
|
if err := setFilePermissions(path, user, group, mode); err != nil {
|
|
|
|
return nil, fmt.Errorf("Failed setting up socket: %s", err)
|
2017-05-24 13:22:56 +00:00
|
|
|
}
|
|
|
|
return l, nil
|
|
|
|
}
|
|
|
|
|
2017-11-07 23:06:59 +00:00
|
|
|
func (a *Agent) serveHTTP(srv *HTTPServer) error {
|
2017-05-19 09:53:41 +00:00
|
|
|
// https://github.com/golang/go/issues/20239
|
|
|
|
//
|
2017-05-24 13:22:56 +00:00
|
|
|
// In go.8.1 there is a race between Serve and Shutdown. If
|
2017-05-19 09:53:41 +00:00
|
|
|
// Shutdown is called before the Serve go routine was scheduled then
|
|
|
|
// the Serve go routine never returns. This deadlocks the agent
|
|
|
|
// shutdown for some tests since it will wait forever.
|
2017-09-25 18:40:42 +00:00
|
|
|
notif := make(chan net.Addr)
|
2017-05-24 13:22:56 +00:00
|
|
|
a.wgServers.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer a.wgServers.Done()
|
2017-11-07 23:06:59 +00:00
|
|
|
notif <- srv.ln.Addr()
|
|
|
|
err := srv.Serve(srv.ln)
|
2017-05-24 13:22:56 +00:00
|
|
|
if err != nil && err != http.ErrServerClosed {
|
|
|
|
a.logger.Print(err)
|
|
|
|
}
|
|
|
|
}()
|
2017-05-19 09:53:41 +00:00
|
|
|
|
2017-05-24 13:22:56 +00:00
|
|
|
select {
|
|
|
|
case addr := <-notif:
|
|
|
|
if srv.proto == "https" {
|
2017-09-25 18:40:42 +00:00
|
|
|
a.logger.Printf("[INFO] agent: Started HTTPS server on %s (%s)", addr.String(), addr.Network())
|
2017-05-24 13:22:56 +00:00
|
|
|
} else {
|
2017-09-25 18:40:42 +00:00
|
|
|
a.logger.Printf("[INFO] agent: Started HTTP server on %s (%s)", addr.String(), addr.Network())
|
2017-05-24 13:22:56 +00:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
case <-time.After(time.Second):
|
|
|
|
return fmt.Errorf("agent: timeout starting HTTP servers")
|
2017-05-19 09:53:41 +00:00
|
|
|
}
|
2013-12-20 01:14:46 +00:00
|
|
|
}
|
|
|
|
|
2017-06-24 19:52:41 +00:00
|
|
|
// reloadWatches stops any existing watch plans and attempts to load the given
|
|
|
|
// set of watches.
|
2017-09-25 18:40:42 +00:00
|
|
|
func (a *Agent) reloadWatches(cfg *config.RuntimeConfig) error {
|
2017-09-26 20:47:27 +00:00
|
|
|
// Stop the current watches.
|
|
|
|
for _, wp := range a.watchPlans {
|
|
|
|
wp.Stop()
|
|
|
|
}
|
|
|
|
a.watchPlans = nil
|
|
|
|
|
|
|
|
// Return if there are no watches now.
|
|
|
|
if len(cfg.Watches) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-06-24 19:52:41 +00:00
|
|
|
// Watches use the API to talk to this agent, so that must be enabled.
|
2017-09-26 20:47:27 +00:00
|
|
|
if len(cfg.HTTPAddrs) == 0 && len(cfg.HTTPSAddrs) == 0 {
|
2017-06-09 08:03:49 +00:00
|
|
|
return fmt.Errorf("watch plans require an HTTP or HTTPS endpoint")
|
|
|
|
}
|
|
|
|
|
2017-09-25 18:40:42 +00:00
|
|
|
// Compile the watches
|
|
|
|
var watchPlans []*watch.Plan
|
|
|
|
for _, params := range cfg.Watches {
|
2017-10-22 01:39:09 +00:00
|
|
|
if handlerType, ok := params["handler_type"]; !ok {
|
|
|
|
params["handler_type"] = "script"
|
|
|
|
} else if handlerType != "http" && handlerType != "script" {
|
|
|
|
return fmt.Errorf("Handler type '%s' not recognized", params["handler_type"])
|
|
|
|
}
|
|
|
|
|
2018-04-26 17:06:26 +00:00
|
|
|
// Don't let people use connect watches via this mechanism for now as it
|
|
|
|
// needs thought about how to do securely and shouldn't be necessary. Note
|
|
|
|
// that if the type assertion fails an type is not a string then
|
|
|
|
// ParseExample below will error so we don't need to handle that case.
|
|
|
|
if typ, ok := params["type"].(string); ok {
|
|
|
|
if strings.HasPrefix(typ, "connect_") {
|
|
|
|
return fmt.Errorf("Watch type %s is not allowed in agent config", typ)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-10-22 01:39:09 +00:00
|
|
|
// Parse the watches, excluding 'handler' and 'args'
|
2017-10-04 23:48:00 +00:00
|
|
|
wp, err := watch.ParseExempt(params, []string{"handler", "args"})
|
2017-09-25 18:40:42 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Failed to parse watch (%#v): %v", params, err)
|
|
|
|
}
|
|
|
|
|
2017-10-04 23:48:00 +00:00
|
|
|
// Get the handler and subprocess arguments
|
|
|
|
handler, hasHandler := wp.Exempt["handler"]
|
|
|
|
args, hasArgs := wp.Exempt["args"]
|
|
|
|
if hasHandler {
|
|
|
|
a.logger.Printf("[WARN] agent: The 'handler' field in watches has been deprecated " +
|
|
|
|
"and replaced with the 'args' field. See https://www.consul.io/docs/agent/watches.html")
|
|
|
|
}
|
|
|
|
if _, ok := handler.(string); hasHandler && !ok {
|
2017-09-25 18:40:42 +00:00
|
|
|
return fmt.Errorf("Watch handler must be a string")
|
|
|
|
}
|
2017-10-04 23:48:00 +00:00
|
|
|
if raw, ok := args.([]interface{}); hasArgs && ok {
|
|
|
|
var parsed []string
|
|
|
|
for _, arg := range raw {
|
2018-01-28 18:53:30 +00:00
|
|
|
v, ok := arg.(string)
|
|
|
|
if !ok {
|
2017-10-04 23:48:00 +00:00
|
|
|
return fmt.Errorf("Watch args must be a list of strings")
|
|
|
|
}
|
2018-01-28 18:40:13 +00:00
|
|
|
|
|
|
|
parsed = append(parsed, v)
|
2017-10-04 23:48:00 +00:00
|
|
|
}
|
|
|
|
wp.Exempt["args"] = parsed
|
|
|
|
} else if hasArgs && !ok {
|
|
|
|
return fmt.Errorf("Watch args must be a list of strings")
|
|
|
|
}
|
2017-10-22 01:39:09 +00:00
|
|
|
if hasHandler && hasArgs || hasHandler && wp.HandlerType == "http" || hasArgs && wp.HandlerType == "http" {
|
|
|
|
return fmt.Errorf("Only one watch handler allowed")
|
2017-10-04 23:48:00 +00:00
|
|
|
}
|
2017-10-22 01:39:09 +00:00
|
|
|
if !hasHandler && !hasArgs && wp.HandlerType != "http" {
|
|
|
|
return fmt.Errorf("Must define a watch handler")
|
2017-10-04 23:48:00 +00:00
|
|
|
}
|
2017-09-25 18:40:42 +00:00
|
|
|
|
|
|
|
// Store the watch plan
|
|
|
|
watchPlans = append(watchPlans, wp)
|
|
|
|
}
|
|
|
|
|
2017-06-24 19:52:41 +00:00
|
|
|
// Fire off a goroutine for each new watch plan.
|
2017-09-25 18:40:42 +00:00
|
|
|
for _, wp := range watchPlans {
|
2018-07-16 20:30:15 +00:00
|
|
|
config, err := a.config.APIConfig(true)
|
|
|
|
if err != nil {
|
|
|
|
a.logger.Printf("[ERR] agent: Failed to run watch: %v", err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2017-06-24 19:52:41 +00:00
|
|
|
a.watchPlans = append(a.watchPlans, wp)
|
2017-06-09 08:03:49 +00:00
|
|
|
go func(wp *watch.Plan) {
|
2017-10-04 23:48:00 +00:00
|
|
|
if h, ok := wp.Exempt["handler"]; ok {
|
2017-10-22 01:39:09 +00:00
|
|
|
wp.Handler = makeWatchHandler(a.LogOutput, h)
|
|
|
|
} else if h, ok := wp.Exempt["args"]; ok {
|
|
|
|
wp.Handler = makeWatchHandler(a.LogOutput, h)
|
2017-10-04 23:48:00 +00:00
|
|
|
} else {
|
2017-10-22 01:39:09 +00:00
|
|
|
httpConfig := wp.Exempt["http_handler_config"].(*watch.HttpHandlerConfig)
|
|
|
|
wp.Handler = makeHTTPWatchHandler(a.LogOutput, httpConfig)
|
2017-10-04 23:48:00 +00:00
|
|
|
}
|
2017-06-09 08:03:49 +00:00
|
|
|
wp.LogOutput = a.LogOutput
|
2018-05-31 21:07:36 +00:00
|
|
|
|
2018-07-16 20:30:15 +00:00
|
|
|
addr := config.Address
|
|
|
|
if config.Scheme == "https" {
|
|
|
|
addr = "https://" + addr
|
2018-05-31 21:07:36 +00:00
|
|
|
}
|
|
|
|
|
2018-06-01 00:22:14 +00:00
|
|
|
if err := wp.RunWithConfig(addr, config); err != nil {
|
2018-03-21 15:56:14 +00:00
|
|
|
a.logger.Printf("[ERR] agent: Failed to run watch: %v", err)
|
2017-06-09 08:03:49 +00:00
|
|
|
}
|
|
|
|
}(wp)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2013-12-20 23:33:13 +00:00
|
|
|
// consulConfig is used to return a consul configuration
|
2017-05-03 21:47:25 +00:00
|
|
|
func (a *Agent) consulConfig() (*consul.Config, error) {
|
2013-12-20 23:33:13 +00:00
|
|
|
// Start with the provided config or default config
|
2017-05-03 19:12:30 +00:00
|
|
|
base := consul.DefaultConfig()
|
2017-06-30 09:09:52 +00:00
|
|
|
|
2017-01-18 06:20:11 +00:00
|
|
|
// This is set when the agent starts up
|
|
|
|
base.NodeID = a.config.NodeID
|
|
|
|
|
2015-11-29 04:40:05 +00:00
|
|
|
// Apply dev mode
|
|
|
|
base.DevMode = a.config.DevMode
|
|
|
|
|
2013-12-20 23:33:13 +00:00
|
|
|
// Override with our config
|
2017-09-25 18:40:42 +00:00
|
|
|
// todo(fs): these are now always set in the runtime config so we can simplify this
|
|
|
|
// todo(fs): or is there a reason to keep it like that?
|
|
|
|
base.Datacenter = a.config.Datacenter
|
2018-10-15 16:17:48 +00:00
|
|
|
base.PrimaryDatacenter = a.config.PrimaryDatacenter
|
2017-09-25 18:40:42 +00:00
|
|
|
base.DataDir = a.config.DataDir
|
|
|
|
base.NodeName = a.config.NodeName
|
|
|
|
|
|
|
|
base.CoordinateUpdateBatchSize = a.config.ConsulCoordinateUpdateBatchSize
|
|
|
|
base.CoordinateUpdateMaxBatches = a.config.ConsulCoordinateUpdateMaxBatches
|
|
|
|
base.CoordinateUpdatePeriod = a.config.ConsulCoordinateUpdatePeriod
|
2019-06-26 15:43:25 +00:00
|
|
|
base.CheckOutputMaxSize = a.config.CheckOutputMaxSize
|
2017-09-25 18:40:42 +00:00
|
|
|
|
|
|
|
base.RaftConfig.HeartbeatTimeout = a.config.ConsulRaftHeartbeatTimeout
|
|
|
|
base.RaftConfig.LeaderLeaseTimeout = a.config.ConsulRaftLeaderLeaseTimeout
|
|
|
|
base.RaftConfig.ElectionTimeout = a.config.ConsulRaftElectionTimeout
|
|
|
|
|
|
|
|
base.SerfLANConfig.MemberlistConfig.BindAddr = a.config.SerfBindAddrLAN.IP.String()
|
2017-09-29 14:12:04 +00:00
|
|
|
base.SerfLANConfig.MemberlistConfig.BindPort = a.config.SerfBindAddrLAN.Port
|
2017-09-25 18:40:42 +00:00
|
|
|
base.SerfLANConfig.MemberlistConfig.AdvertiseAddr = a.config.SerfAdvertiseAddrLAN.IP.String()
|
2017-09-29 14:12:04 +00:00
|
|
|
base.SerfLANConfig.MemberlistConfig.AdvertisePort = a.config.SerfAdvertiseAddrLAN.Port
|
2017-09-25 18:40:42 +00:00
|
|
|
base.SerfLANConfig.MemberlistConfig.GossipVerifyIncoming = a.config.EncryptVerifyIncoming
|
|
|
|
base.SerfLANConfig.MemberlistConfig.GossipVerifyOutgoing = a.config.EncryptVerifyOutgoing
|
2018-07-26 15:39:49 +00:00
|
|
|
base.SerfLANConfig.MemberlistConfig.GossipInterval = a.config.GossipLANGossipInterval
|
|
|
|
base.SerfLANConfig.MemberlistConfig.GossipNodes = a.config.GossipLANGossipNodes
|
|
|
|
base.SerfLANConfig.MemberlistConfig.ProbeInterval = a.config.GossipLANProbeInterval
|
|
|
|
base.SerfLANConfig.MemberlistConfig.ProbeTimeout = a.config.GossipLANProbeTimeout
|
|
|
|
base.SerfLANConfig.MemberlistConfig.SuspicionMult = a.config.GossipLANSuspicionMult
|
|
|
|
base.SerfLANConfig.MemberlistConfig.RetransmitMult = a.config.GossipLANRetransmitMult
|
2018-08-17 18:44:25 +00:00
|
|
|
if a.config.ReconnectTimeoutLAN != 0 {
|
|
|
|
base.SerfLANConfig.ReconnectTimeout = a.config.ReconnectTimeoutLAN
|
|
|
|
}
|
2017-09-25 18:40:42 +00:00
|
|
|
|
2018-03-26 19:21:06 +00:00
|
|
|
if a.config.SerfBindAddrWAN != nil {
|
|
|
|
base.SerfWANConfig.MemberlistConfig.BindAddr = a.config.SerfBindAddrWAN.IP.String()
|
|
|
|
base.SerfWANConfig.MemberlistConfig.BindPort = a.config.SerfBindAddrWAN.Port
|
|
|
|
base.SerfWANConfig.MemberlistConfig.AdvertiseAddr = a.config.SerfAdvertiseAddrWAN.IP.String()
|
|
|
|
base.SerfWANConfig.MemberlistConfig.AdvertisePort = a.config.SerfAdvertiseAddrWAN.Port
|
|
|
|
base.SerfWANConfig.MemberlistConfig.GossipVerifyIncoming = a.config.EncryptVerifyIncoming
|
|
|
|
base.SerfWANConfig.MemberlistConfig.GossipVerifyOutgoing = a.config.EncryptVerifyOutgoing
|
2018-07-26 15:39:49 +00:00
|
|
|
base.SerfWANConfig.MemberlistConfig.GossipInterval = a.config.GossipWANGossipInterval
|
|
|
|
base.SerfWANConfig.MemberlistConfig.GossipNodes = a.config.GossipWANGossipNodes
|
|
|
|
base.SerfWANConfig.MemberlistConfig.ProbeInterval = a.config.GossipWANProbeInterval
|
|
|
|
base.SerfWANConfig.MemberlistConfig.ProbeTimeout = a.config.GossipWANProbeTimeout
|
|
|
|
base.SerfWANConfig.MemberlistConfig.SuspicionMult = a.config.GossipWANSuspicionMult
|
|
|
|
base.SerfWANConfig.MemberlistConfig.RetransmitMult = a.config.GossipWANRetransmitMult
|
2018-08-17 18:44:25 +00:00
|
|
|
if a.config.ReconnectTimeoutWAN != 0 {
|
|
|
|
base.SerfWANConfig.ReconnectTimeout = a.config.ReconnectTimeoutWAN
|
|
|
|
}
|
2018-03-26 19:21:06 +00:00
|
|
|
} else {
|
|
|
|
// Disable serf WAN federation
|
|
|
|
base.SerfWANConfig = nil
|
|
|
|
}
|
2017-09-25 18:40:42 +00:00
|
|
|
|
|
|
|
base.RPCAddr = a.config.RPCBindAddr
|
|
|
|
base.RPCAdvertise = a.config.RPCAdvertiseAddr
|
|
|
|
|
|
|
|
base.Segment = a.config.SegmentName
|
2017-08-29 00:58:22 +00:00
|
|
|
if len(a.config.Segments) > 0 {
|
|
|
|
segments, err := a.segmentConfig()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2017-08-14 14:36:07 +00:00
|
|
|
}
|
2017-08-29 00:58:22 +00:00
|
|
|
base.Segments = segments
|
2017-08-14 14:36:07 +00:00
|
|
|
}
|
2013-12-25 00:48:07 +00:00
|
|
|
if a.config.Bootstrap {
|
|
|
|
base.Bootstrap = true
|
|
|
|
}
|
2019-06-26 15:43:25 +00:00
|
|
|
if a.config.CheckOutputMaxSize > 0 {
|
|
|
|
base.CheckOutputMaxSize = a.config.CheckOutputMaxSize
|
|
|
|
}
|
2014-06-18 17:32:19 +00:00
|
|
|
if a.config.RejoinAfterLeave {
|
|
|
|
base.RejoinAfterLeave = true
|
|
|
|
}
|
2014-06-20 00:08:48 +00:00
|
|
|
if a.config.BootstrapExpect != 0 {
|
|
|
|
base.BootstrapExpect = a.config.BootstrapExpect
|
2014-06-16 21:36:12 +00:00
|
|
|
}
|
2017-09-25 18:40:42 +00:00
|
|
|
if a.config.RPCProtocol > 0 {
|
|
|
|
base.ProtocolVersion = uint8(a.config.RPCProtocol)
|
2014-03-09 22:57:03 +00:00
|
|
|
}
|
2017-02-24 04:32:13 +00:00
|
|
|
if a.config.RaftProtocol != 0 {
|
|
|
|
base.RaftConfig.ProtocolVersion = raft.ProtocolVersion(a.config.RaftProtocol)
|
|
|
|
}
|
2018-05-10 15:16:38 +00:00
|
|
|
if a.config.RaftSnapshotThreshold != 0 {
|
|
|
|
base.RaftConfig.SnapshotThreshold = uint64(a.config.RaftSnapshotThreshold)
|
|
|
|
}
|
2018-05-10 22:06:47 +00:00
|
|
|
if a.config.RaftSnapshotInterval != 0 {
|
|
|
|
base.RaftConfig.SnapshotInterval = a.config.RaftSnapshotInterval
|
|
|
|
}
|
2019-07-23 14:19:57 +00:00
|
|
|
if a.config.RaftTrailingLogs != 0 {
|
|
|
|
base.RaftConfig.TrailingLogs = uint64(a.config.RaftTrailingLogs)
|
|
|
|
}
|
2014-08-05 22:36:08 +00:00
|
|
|
if a.config.ACLMasterToken != "" {
|
|
|
|
base.ACLMasterToken = a.config.ACLMasterToken
|
|
|
|
}
|
2014-08-05 22:20:35 +00:00
|
|
|
if a.config.ACLDatacenter != "" {
|
|
|
|
base.ACLDatacenter = a.config.ACLDatacenter
|
|
|
|
}
|
2018-10-19 16:04:07 +00:00
|
|
|
if a.config.ACLTokenTTL != 0 {
|
|
|
|
base.ACLTokenTTL = a.config.ACLTokenTTL
|
|
|
|
}
|
|
|
|
if a.config.ACLPolicyTTL != 0 {
|
|
|
|
base.ACLPolicyTTL = a.config.ACLPolicyTTL
|
2014-08-05 22:20:35 +00:00
|
|
|
}
|
2019-04-15 20:43:19 +00:00
|
|
|
if a.config.ACLRoleTTL != 0 {
|
|
|
|
base.ACLRoleTTL = a.config.ACLRoleTTL
|
|
|
|
}
|
2014-08-05 22:20:35 +00:00
|
|
|
if a.config.ACLDefaultPolicy != "" {
|
|
|
|
base.ACLDefaultPolicy = a.config.ACLDefaultPolicy
|
|
|
|
}
|
|
|
|
if a.config.ACLDownPolicy != "" {
|
|
|
|
base.ACLDownPolicy = a.config.ACLDownPolicy
|
|
|
|
}
|
2018-10-19 16:04:07 +00:00
|
|
|
base.ACLEnforceVersion8 = a.config.ACLEnforceVersion8
|
|
|
|
base.ACLTokenReplication = a.config.ACLTokenReplication
|
|
|
|
base.ACLsEnabled = a.config.ACLsEnabled
|
2017-10-02 22:10:21 +00:00
|
|
|
if a.config.ACLEnableKeyListPolicy {
|
|
|
|
base.ACLEnableKeyListPolicy = a.config.ACLEnableKeyListPolicy
|
|
|
|
}
|
2017-09-25 18:40:42 +00:00
|
|
|
if a.config.SessionTTLMin != 0 {
|
2015-03-27 05:30:04 +00:00
|
|
|
base.SessionTTLMin = a.config.SessionTTLMin
|
|
|
|
}
|
2017-03-21 23:36:44 +00:00
|
|
|
if a.config.NonVotingServer {
|
|
|
|
base.NonVoter = a.config.NonVotingServer
|
|
|
|
}
|
2017-12-13 18:31:45 +00:00
|
|
|
|
|
|
|
// These are fully specified in the agent defaults, so we can simply
|
|
|
|
// copy them over.
|
|
|
|
base.AutopilotConfig.CleanupDeadServers = a.config.AutopilotCleanupDeadServers
|
|
|
|
base.AutopilotConfig.LastContactThreshold = a.config.AutopilotLastContactThreshold
|
|
|
|
base.AutopilotConfig.MaxTrailingLogs = uint64(a.config.AutopilotMaxTrailingLogs)
|
|
|
|
base.AutopilotConfig.ServerStabilizationTime = a.config.AutopilotServerStabilizationTime
|
|
|
|
base.AutopilotConfig.RedundancyZoneTag = a.config.AutopilotRedundancyZoneTag
|
|
|
|
base.AutopilotConfig.DisableUpgradeMigration = a.config.AutopilotDisableUpgradeMigration
|
|
|
|
base.AutopilotConfig.UpgradeVersionTag = a.config.AutopilotUpgradeVersionTag
|
2013-12-20 23:33:13 +00:00
|
|
|
|
2017-05-03 20:59:06 +00:00
|
|
|
// make sure the advertise address is always set
|
|
|
|
if base.RPCAdvertise == nil {
|
|
|
|
base.RPCAdvertise = base.RPCAddr
|
|
|
|
}
|
|
|
|
|
2017-09-01 22:02:50 +00:00
|
|
|
// Rate limiting for RPC calls.
|
2017-09-25 18:40:42 +00:00
|
|
|
if a.config.RPCRateLimit > 0 {
|
|
|
|
base.RPCRate = a.config.RPCRateLimit
|
2017-09-01 22:02:50 +00:00
|
|
|
}
|
2017-09-25 18:40:42 +00:00
|
|
|
if a.config.RPCMaxBurst > 0 {
|
|
|
|
base.RPCMaxBurst = a.config.RPCMaxBurst
|
2017-09-01 22:02:50 +00:00
|
|
|
}
|
|
|
|
|
2017-10-10 22:19:50 +00:00
|
|
|
// RPC-related performance configs.
|
|
|
|
if a.config.RPCHoldTimeout > 0 {
|
|
|
|
base.RPCHoldTimeout = a.config.RPCHoldTimeout
|
|
|
|
}
|
|
|
|
if a.config.LeaveDrainTime > 0 {
|
|
|
|
base.LeaveDrainTime = a.config.LeaveDrainTime
|
|
|
|
}
|
|
|
|
|
2017-05-03 10:57:11 +00:00
|
|
|
// set the src address for outgoing rpc connections
|
2017-05-10 07:30:19 +00:00
|
|
|
// Use port 0 so that outgoing connections use a random port.
|
2017-05-15 20:10:36 +00:00
|
|
|
if !ipaddr.IsAny(base.RPCAddr.IP) {
|
2017-05-10 07:30:19 +00:00
|
|
|
base.RPCSrcAddr = &net.TCPAddr{IP: base.RPCAddr.IP}
|
|
|
|
}
|
2017-05-03 10:57:11 +00:00
|
|
|
|
2014-06-06 22:36:40 +00:00
|
|
|
// Format the build string
|
|
|
|
revision := a.config.Revision
|
|
|
|
if len(revision) > 8 {
|
|
|
|
revision = revision[:8]
|
|
|
|
}
|
2017-05-03 19:12:30 +00:00
|
|
|
base.Build = fmt.Sprintf("%s%s:%s", a.config.Version, a.config.VersionPrerelease, revision)
|
2014-06-06 22:36:40 +00:00
|
|
|
|
2014-04-04 23:52:39 +00:00
|
|
|
// Copy the TLS configuration
|
2017-04-28 23:15:55 +00:00
|
|
|
base.VerifyIncoming = a.config.VerifyIncoming || a.config.VerifyIncomingRPC
|
2017-05-10 21:25:48 +00:00
|
|
|
if a.config.CAPath != "" || a.config.CAFile != "" {
|
|
|
|
base.UseTLS = true
|
|
|
|
}
|
2014-04-04 23:52:39 +00:00
|
|
|
base.VerifyOutgoing = a.config.VerifyOutgoing
|
2015-05-11 22:16:13 +00:00
|
|
|
base.VerifyServerHostname = a.config.VerifyServerHostname
|
2014-04-04 23:52:39 +00:00
|
|
|
base.CAFile = a.config.CAFile
|
2017-04-27 08:29:39 +00:00
|
|
|
base.CAPath = a.config.CAPath
|
2014-04-04 23:52:39 +00:00
|
|
|
base.CertFile = a.config.CertFile
|
|
|
|
base.KeyFile = a.config.KeyFile
|
2014-06-13 18:27:44 +00:00
|
|
|
base.ServerName = a.config.ServerName
|
2017-09-25 18:40:42 +00:00
|
|
|
base.Domain = a.config.DNSDomain
|
2017-02-01 20:52:04 +00:00
|
|
|
base.TLSMinVersion = a.config.TLSMinVersion
|
2017-04-27 08:29:39 +00:00
|
|
|
base.TLSCipherSuites = a.config.TLSCipherSuites
|
|
|
|
base.TLSPreferServerCipherSuites = a.config.TLSPreferServerCipherSuites
|
2014-04-04 23:52:39 +00:00
|
|
|
|
2019-06-27 20:22:07 +00:00
|
|
|
base.AutoEncryptAllowTLS = a.config.AutoEncryptAllowTLS
|
|
|
|
|
2018-04-25 18:34:08 +00:00
|
|
|
// Copy the Connect CA bootstrap config
|
|
|
|
if a.config.ConnectEnabled {
|
|
|
|
base.ConnectEnabled = true
|
|
|
|
|
2018-05-10 16:04:33 +00:00
|
|
|
// Allow config to specify cluster_id provided it's a valid UUID. This is
|
|
|
|
// meant only for tests where a deterministic ID makes fixtures much simpler
|
|
|
|
// to work with but since it's only read on initial cluster bootstrap it's not
|
|
|
|
// that much of a liability in production. The worst a user could do is
|
|
|
|
// configure logically separate clusters with same ID by mistake but we can
|
|
|
|
// avoid documenting this is even an option.
|
|
|
|
if clusterID, ok := a.config.ConnectCAConfig["cluster_id"]; ok {
|
|
|
|
if cIDStr, ok := clusterID.(string); ok {
|
|
|
|
if _, err := uuid.ParseUUID(cIDStr); err == nil {
|
|
|
|
// Valid UUID configured, use that
|
|
|
|
base.CAConfig.ClusterID = cIDStr
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if base.CAConfig.ClusterID == "" {
|
2018-05-22 14:11:13 +00:00
|
|
|
// If the tried to specify an ID but typoed it don't ignore as they will
|
|
|
|
// then bootstrap with a new ID and have to throw away the whole cluster
|
|
|
|
// and start again.
|
|
|
|
a.logger.Println("[ERR] connect CA config cluster_id specified but " +
|
|
|
|
"is not a valid UUID, aborting startup")
|
|
|
|
return nil, fmt.Errorf("cluster_id was supplied but was not a valid UUID")
|
2018-05-10 16:04:33 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-04-25 18:34:08 +00:00
|
|
|
if a.config.ConnectCAProvider != "" {
|
|
|
|
base.CAConfig.Provider = a.config.ConnectCAProvider
|
2019-01-22 17:19:36 +00:00
|
|
|
}
|
2018-04-25 18:34:08 +00:00
|
|
|
|
2019-01-22 17:19:36 +00:00
|
|
|
// Merge connect CA Config regardless of provider (since there are some
|
|
|
|
// common config options valid to all like leaf TTL).
|
|
|
|
for k, v := range a.config.ConnectCAConfig {
|
|
|
|
base.CAConfig.Config[k] = v
|
2018-04-25 18:34:08 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-08-27 23:49:12 +00:00
|
|
|
// Setup the user event callback
|
|
|
|
base.UserEventHandler = func(e serf.UserEvent) {
|
|
|
|
select {
|
|
|
|
case a.eventCh <- e:
|
|
|
|
case <-a.shutdownCh:
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-12-21 00:39:32 +00:00
|
|
|
// Setup the loggers
|
2019-06-19 12:50:48 +00:00
|
|
|
base.LogLevel = a.config.LogLevel
|
2017-05-19 15:51:39 +00:00
|
|
|
base.LogOutput = a.LogOutput
|
2017-06-29 12:35:55 +00:00
|
|
|
|
2017-09-07 19:17:20 +00:00
|
|
|
// This will set up the LAN keyring, as well as the WAN and any segments
|
|
|
|
// for servers.
|
2017-07-17 19:48:45 +00:00
|
|
|
if err := a.setupKeyrings(base); err != nil {
|
|
|
|
return nil, fmt.Errorf("Failed to configure keyring: %v", err)
|
2017-06-29 12:35:55 +00:00
|
|
|
}
|
|
|
|
|
2019-04-26 18:25:03 +00:00
|
|
|
base.ConfigEntryBootstrap = a.config.ConfigEntryBootstrap
|
|
|
|
|
2017-05-03 21:47:25 +00:00
|
|
|
return base, nil
|
2013-12-20 23:33:13 +00:00
|
|
|
}
|
|
|
|
|
2017-08-29 00:58:22 +00:00
|
|
|
// Setup the serf and memberlist config for any defined network segments.
|
2017-09-07 23:37:11 +00:00
|
|
|
func (a *Agent) segmentConfig() ([]consul.NetworkSegment, error) {
|
|
|
|
var segments []consul.NetworkSegment
|
2017-08-29 00:58:22 +00:00
|
|
|
config := a.config
|
|
|
|
|
2017-09-25 18:40:42 +00:00
|
|
|
for _, s := range config.Segments {
|
2017-08-29 00:58:22 +00:00
|
|
|
serfConf := consul.DefaultConfig().SerfLANConfig
|
|
|
|
|
2017-09-25 18:40:42 +00:00
|
|
|
serfConf.MemberlistConfig.BindAddr = s.Bind.IP.String()
|
|
|
|
serfConf.MemberlistConfig.BindPort = s.Bind.Port
|
|
|
|
serfConf.MemberlistConfig.AdvertiseAddr = s.Advertise.IP.String()
|
|
|
|
serfConf.MemberlistConfig.AdvertisePort = s.Advertise.Port
|
2017-08-30 19:51:10 +00:00
|
|
|
|
2017-09-25 18:40:42 +00:00
|
|
|
if config.ReconnectTimeoutLAN != 0 {
|
|
|
|
serfConf.ReconnectTimeout = config.ReconnectTimeoutLAN
|
2017-08-29 00:58:22 +00:00
|
|
|
}
|
2017-09-25 18:40:42 +00:00
|
|
|
if config.EncryptVerifyIncoming {
|
|
|
|
serfConf.MemberlistConfig.GossipVerifyIncoming = config.EncryptVerifyIncoming
|
2017-08-29 00:58:22 +00:00
|
|
|
}
|
2017-09-25 18:40:42 +00:00
|
|
|
if config.EncryptVerifyOutgoing {
|
|
|
|
serfConf.MemberlistConfig.GossipVerifyOutgoing = config.EncryptVerifyOutgoing
|
2017-08-29 00:58:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
var rpcAddr *net.TCPAddr
|
2017-09-25 18:40:42 +00:00
|
|
|
if s.RPCListener {
|
2017-08-29 00:58:22 +00:00
|
|
|
rpcAddr = &net.TCPAddr{
|
2017-09-25 18:40:42 +00:00
|
|
|
IP: s.Bind.IP,
|
|
|
|
Port: a.config.ServerPort,
|
2017-08-29 00:58:22 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-09-07 23:37:11 +00:00
|
|
|
segments = append(segments, consul.NetworkSegment{
|
2017-09-25 18:40:42 +00:00
|
|
|
Name: s.Name,
|
2017-08-30 23:44:04 +00:00
|
|
|
Bind: serfConf.MemberlistConfig.BindAddr,
|
|
|
|
Advertise: serfConf.MemberlistConfig.AdvertiseAddr,
|
2017-09-25 18:40:42 +00:00
|
|
|
Port: s.Bind.Port,
|
2017-08-29 00:58:22 +00:00
|
|
|
RPCAddr: rpcAddr,
|
|
|
|
SerfConfig: serfConf,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
return segments, nil
|
|
|
|
}
|
|
|
|
|
2017-02-01 18:27:04 +00:00
|
|
|
// makeRandomID will generate a random UUID for a node.
|
|
|
|
func (a *Agent) makeRandomID() (string, error) {
|
|
|
|
id, err := uuid.GenerateUUID()
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
2018-03-21 15:56:14 +00:00
|
|
|
a.logger.Printf("[DEBUG] agent: Using random ID %q as node ID", id)
|
2017-02-01 18:27:04 +00:00
|
|
|
return id, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// makeNodeID will try to find a host-specific ID, or else will generate a
|
|
|
|
// random ID. The returned ID will always be formatted as a GUID. We don't tell
|
|
|
|
// the caller whether this ID is random or stable since the consequences are
|
|
|
|
// high for us if this changes, so we will persist it either way. This will let
|
|
|
|
// gopsutil change implementations without affecting in-place upgrades of nodes.
|
|
|
|
func (a *Agent) makeNodeID() (string, error) {
|
2017-04-13 05:05:38 +00:00
|
|
|
// If they've disabled host-based IDs then just make a random one.
|
2017-09-25 18:40:42 +00:00
|
|
|
if a.config.DisableHostNodeID {
|
2017-04-13 05:05:38 +00:00
|
|
|
return a.makeRandomID()
|
|
|
|
}
|
|
|
|
|
2017-02-01 18:27:04 +00:00
|
|
|
// Try to get a stable ID associated with the host itself.
|
|
|
|
info, err := host.Info()
|
|
|
|
if err != nil {
|
2018-03-21 15:56:14 +00:00
|
|
|
a.logger.Printf("[DEBUG] agent: Couldn't get a unique ID from the host: %v", err)
|
2017-02-01 18:27:04 +00:00
|
|
|
return a.makeRandomID()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Make sure the host ID parses as a UUID, since we don't have complete
|
|
|
|
// control over this process.
|
|
|
|
id := strings.ToLower(info.HostID)
|
|
|
|
if _, err := uuid.ParseUUID(id); err != nil {
|
2018-03-21 15:56:14 +00:00
|
|
|
a.logger.Printf("[DEBUG] agent: Unique ID %q from host isn't formatted as a UUID: %v",
|
2017-02-01 18:27:04 +00:00
|
|
|
id, err)
|
|
|
|
return a.makeRandomID()
|
|
|
|
}
|
|
|
|
|
2017-04-10 18:57:24 +00:00
|
|
|
// Hash the input to make it well distributed. The reported Host UUID may be
|
|
|
|
// similar across nodes if they are on a cloud provider or on motherboards
|
|
|
|
// created from the same batch.
|
|
|
|
buf := sha512.Sum512([]byte(id))
|
|
|
|
id = fmt.Sprintf("%08x-%04x-%04x-%04x-%12x",
|
|
|
|
buf[0:4],
|
|
|
|
buf[4:6],
|
|
|
|
buf[6:8],
|
|
|
|
buf[8:10],
|
|
|
|
buf[10:16])
|
|
|
|
|
2018-03-21 15:56:14 +00:00
|
|
|
a.logger.Printf("[DEBUG] agent: Using unique ID %q from host as node ID", id)
|
2017-02-01 18:27:04 +00:00
|
|
|
return id, nil
|
|
|
|
}
|
|
|
|
|
2017-02-01 03:13:49 +00:00
|
|
|
// setupNodeID will pull the persisted node ID, if any, or create a random one
|
2017-01-18 06:20:11 +00:00
|
|
|
// and persist it.
|
2017-09-25 18:40:42 +00:00
|
|
|
func (a *Agent) setupNodeID(config *config.RuntimeConfig) error {
|
2017-01-18 06:20:11 +00:00
|
|
|
// If they've configured a node ID manually then just use that, as
|
|
|
|
// long as it's valid.
|
|
|
|
if config.NodeID != "" {
|
2017-03-14 02:51:56 +00:00
|
|
|
config.NodeID = types.NodeID(strings.ToLower(string(config.NodeID)))
|
2017-01-18 06:20:11 +00:00
|
|
|
if _, err := uuid.ParseUUID(string(config.NodeID)); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-02-01 18:27:04 +00:00
|
|
|
// For dev mode we have no filesystem access so just make one.
|
2018-06-06 20:04:19 +00:00
|
|
|
if a.config.DataDir == "" {
|
2017-02-01 18:27:04 +00:00
|
|
|
id, err := a.makeNodeID()
|
2017-01-18 06:20:11 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
config.NodeID = types.NodeID(id)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Load saved state, if any. Since a user could edit this, we also
|
|
|
|
// validate it.
|
|
|
|
fileID := filepath.Join(config.DataDir, "node-id")
|
|
|
|
if _, err := os.Stat(fileID); err == nil {
|
|
|
|
rawID, err := ioutil.ReadFile(fileID)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
nodeID := strings.TrimSpace(string(rawID))
|
2017-03-14 02:51:56 +00:00
|
|
|
nodeID = strings.ToLower(nodeID)
|
2017-01-18 06:20:11 +00:00
|
|
|
if _, err := uuid.ParseUUID(nodeID); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
config.NodeID = types.NodeID(nodeID)
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we still don't have a valid node ID, make one.
|
|
|
|
if config.NodeID == "" {
|
2017-02-01 18:27:04 +00:00
|
|
|
id, err := a.makeNodeID()
|
2017-01-18 06:20:11 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := lib.EnsurePath(fileID, false); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := ioutil.WriteFile(fileID, []byte(id), 0600); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
config.NodeID = types.NodeID(id)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-09-07 19:17:20 +00:00
|
|
|
// setupBaseKeyrings configures the LAN and WAN keyrings.
|
|
|
|
func (a *Agent) setupBaseKeyrings(config *consul.Config) error {
|
2017-07-17 19:48:45 +00:00
|
|
|
// If the keyring file is disabled then just poke the provided key
|
|
|
|
// into the in-memory keyring.
|
2018-03-27 19:28:05 +00:00
|
|
|
federationEnabled := config.SerfWANConfig != nil
|
2017-07-17 19:48:45 +00:00
|
|
|
if a.config.DisableKeyringFile {
|
|
|
|
if a.config.EncryptKey == "" {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
keys := []string{a.config.EncryptKey}
|
|
|
|
if err := loadKeyring(config.SerfLANConfig, keys); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2018-03-27 19:28:05 +00:00
|
|
|
if a.config.ServerMode && federationEnabled {
|
2017-07-17 19:48:45 +00:00
|
|
|
if err := loadKeyring(config.SerfWANConfig, keys); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, we need to deal with the keyring files.
|
pkg refactor
command/agent/* -> agent/*
command/consul/* -> agent/consul/*
command/agent/command{,_test}.go -> command/agent{,_test}.go
command/base/command.go -> command/base.go
command/base/* -> command/*
commands.go -> command/commands.go
The script which did the refactor is:
(
cd $GOPATH/src/github.com/hashicorp/consul
git mv command/agent/command.go command/agent.go
git mv command/agent/command_test.go command/agent_test.go
git mv command/agent/flag_slice_value{,_test}.go command/
git mv command/agent .
git mv command/base/command.go command/base.go
git mv command/base/config_util{,_test}.go command/
git mv commands.go command/
git mv consul agent
rmdir command/base/
gsed -i -e 's|package agent|package command|' command/agent{,_test}.go
gsed -i -e 's|package agent|package command|' command/flag_slice_value{,_test}.go
gsed -i -e 's|package base|package command|' command/base.go command/config_util{,_test}.go
gsed -i -e 's|package main|package command|' command/commands.go
gsed -i -e 's|base.Command|BaseCommand|' command/commands.go
gsed -i -e 's|agent.Command|AgentCommand|' command/commands.go
gsed -i -e 's|\tCommand:|\tBaseCommand:|' command/commands.go
gsed -i -e 's|base\.||' command/commands.go
gsed -i -e 's|command\.||' command/commands.go
gsed -i -e 's|command|c|' main.go
gsed -i -e 's|range Commands|range command.Commands|' main.go
gsed -i -e 's|Commands: Commands|Commands: command.Commands|' main.go
gsed -i -e 's|base\.BoolValue|BoolValue|' command/operator_autopilot_set.go
gsed -i -e 's|base\.DurationValue|DurationValue|' command/operator_autopilot_set.go
gsed -i -e 's|base\.StringValue|StringValue|' command/operator_autopilot_set.go
gsed -i -e 's|base\.UintValue|UintValue|' command/operator_autopilot_set.go
gsed -i -e 's|\bCommand\b|BaseCommand|' command/base.go
gsed -i -e 's|BaseCommand Options|Command Options|' command/base.go
gsed -i -e 's|base.Command|BaseCommand|' command/*.go
gsed -i -e 's|c\.Command|c.BaseCommand|g' command/*.go
gsed -i -e 's|\tCommand:|\tBaseCommand:|' command/*_test.go
gsed -i -e 's|base\.||' command/*_test.go
gsed -i -e 's|\bCommand\b|AgentCommand|' command/agent{,_test}.go
gsed -i -e 's|cmd.AgentCommand|cmd.BaseCommand|' command/agent.go
gsed -i -e 's|cli.AgentCommand = new(Command)|cli.Command = new(AgentCommand)|' command/agent_test.go
gsed -i -e 's|exec.AgentCommand|exec.Command|' command/agent_test.go
gsed -i -e 's|exec.BaseCommand|exec.Command|' command/agent_test.go
gsed -i -e 's|NewTestAgent|agent.NewTestAgent|' command/agent_test.go
gsed -i -e 's|= TestConfig|= agent.TestConfig|' command/agent_test.go
gsed -i -e 's|: RetryJoin|: agent.RetryJoin|' command/agent_test.go
gsed -i -e 's|\.\./\.\./|../|' command/config_util_test.go
gsed -i -e 's|\bverifyUniqueListeners|VerifyUniqueListeners|' agent/config{,_test}.go command/agent.go
gsed -i -e 's|\bserfLANKeyring\b|SerfLANKeyring|g' agent/{agent,keyring,testagent}.go command/agent.go
gsed -i -e 's|\bserfWANKeyring\b|SerfWANKeyring|g' agent/{agent,keyring,testagent}.go command/agent.go
gsed -i -e 's|\bNewAgent\b|agent.New|g' command/agent{,_test}.go
gsed -i -e 's|\bNewAgent|New|' agent/{acl_test,agent,testagent}.go
gsed -i -e 's|\bAgent\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bBool\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bConfig\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bDefaultConfig\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bDevConfig\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bMergeConfig\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bReadConfigPaths\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bParseMetaPair\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bSerfLANKeyring\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bSerfWANKeyring\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|circonus\.agent|circonus|g' command/agent{,_test}.go
gsed -i -e 's|logger\.agent|logger|g' command/agent{,_test}.go
gsed -i -e 's|metrics\.agent|metrics|g' command/agent{,_test}.go
gsed -i -e 's|// agent.Agent|// agent|' command/agent{,_test}.go
gsed -i -e 's|a\.agent\.Config|a.Config|' command/agent{,_test}.go
gsed -i -e 's|agent\.AppendSliceValue|AppendSliceValue|' command/{configtest,validate}.go
gsed -i -e 's|consul/consul|agent/consul|' GNUmakefile
gsed -i -e 's|\.\./test|../../test|' agent/consul/server_test.go
# fix imports
f=$(grep -rl 'github.com/hashicorp/consul/command/agent' * | grep '\.go')
gsed -i -e 's|github.com/hashicorp/consul/command/agent|github.com/hashicorp/consul/agent|' $f
goimports -w $f
f=$(grep -rl 'github.com/hashicorp/consul/consul' * | grep '\.go')
gsed -i -e 's|github.com/hashicorp/consul/consul|github.com/hashicorp/consul/agent/consul|' $f
goimports -w $f
goimports -w command/*.go main.go
)
2017-06-09 22:28:28 +00:00
|
|
|
fileLAN := filepath.Join(a.config.DataDir, SerfLANKeyring)
|
|
|
|
fileWAN := filepath.Join(a.config.DataDir, SerfWANKeyring)
|
2014-10-10 18:13:30 +00:00
|
|
|
|
|
|
|
if a.config.EncryptKey == "" {
|
|
|
|
goto LOAD
|
|
|
|
}
|
|
|
|
if _, err := os.Stat(fileLAN); err != nil {
|
|
|
|
if err := initKeyring(fileLAN, a.config.EncryptKey); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2018-03-27 19:28:05 +00:00
|
|
|
if a.config.ServerMode && federationEnabled {
|
2014-10-10 18:13:30 +00:00
|
|
|
if _, err := os.Stat(fileWAN); err != nil {
|
|
|
|
if err := initKeyring(fileWAN, a.config.EncryptKey); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
LOAD:
|
|
|
|
if _, err := os.Stat(fileLAN); err == nil {
|
|
|
|
config.SerfLANConfig.KeyringFile = fileLAN
|
|
|
|
}
|
|
|
|
if err := loadKeyringFile(config.SerfLANConfig); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2018-03-27 19:28:05 +00:00
|
|
|
if a.config.ServerMode && federationEnabled {
|
2014-10-10 18:13:30 +00:00
|
|
|
if _, err := os.Stat(fileWAN); err == nil {
|
|
|
|
config.SerfWANConfig.KeyringFile = fileWAN
|
|
|
|
}
|
|
|
|
if err := loadKeyringFile(config.SerfWANConfig); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-09-07 19:17:20 +00:00
|
|
|
// setupKeyrings is used to initialize and load keyrings during agent startup.
|
|
|
|
func (a *Agent) setupKeyrings(config *consul.Config) error {
|
|
|
|
// First set up the LAN and WAN keyrings.
|
|
|
|
if err := a.setupBaseKeyrings(config); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// If there's no LAN keyring then there's nothing else to set up for
|
|
|
|
// any segments.
|
|
|
|
lanKeyring := config.SerfLANConfig.MemberlistConfig.Keyring
|
|
|
|
if lanKeyring == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Copy the initial state of the LAN keyring into each segment config.
|
|
|
|
// Segments don't have their own keyring file, they rely on the LAN
|
|
|
|
// holding the state so things can't get out of sync.
|
|
|
|
k, pk := lanKeyring.GetKeys(), lanKeyring.GetPrimaryKey()
|
|
|
|
for _, segment := range config.Segments {
|
|
|
|
keyring, err := memberlist.NewKeyring(k, pk)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
segment.SerfConfig.MemberlistConfig.Keyring = keyring
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-06-19 14:36:09 +00:00
|
|
|
// registerEndpoint registers a handler for the consul RPC server
|
2017-06-16 07:54:09 +00:00
|
|
|
// under a unique name while making it accessible under the provided
|
|
|
|
// name. This allows overwriting handlers for the golang net/rpc
|
|
|
|
// service which does not allow this.
|
2017-06-19 14:36:09 +00:00
|
|
|
func (a *Agent) registerEndpoint(name string, handler interface{}) error {
|
2017-06-16 07:54:09 +00:00
|
|
|
srv, ok := a.delegate.(*consul.Server)
|
|
|
|
if !ok {
|
|
|
|
panic("agent must be a server")
|
|
|
|
}
|
|
|
|
realname := fmt.Sprintf("%s-%d", name, time.Now().UnixNano())
|
|
|
|
a.endpointsLock.Lock()
|
|
|
|
a.endpoints[name] = realname
|
|
|
|
a.endpointsLock.Unlock()
|
|
|
|
return srv.RegisterEndpoint(realname, handler)
|
|
|
|
}
|
|
|
|
|
2013-12-20 23:33:13 +00:00
|
|
|
// RPC is used to make an RPC call to the Consul servers
|
|
|
|
// This allows the agent to implement the Consul.Interface
|
|
|
|
func (a *Agent) RPC(method string, args interface{}, reply interface{}) error {
|
2017-08-10 01:51:55 +00:00
|
|
|
a.endpointsLock.RLock()
|
2017-06-16 07:54:09 +00:00
|
|
|
// fast path: only translate if there are overrides
|
|
|
|
if len(a.endpoints) > 0 {
|
|
|
|
p := strings.SplitN(method, ".", 2)
|
|
|
|
if e := a.endpoints[p[0]]; e != "" {
|
|
|
|
method = e + "." + p[1]
|
|
|
|
}
|
|
|
|
}
|
2017-08-10 01:51:55 +00:00
|
|
|
a.endpointsLock.RUnlock()
|
2017-05-15 14:05:17 +00:00
|
|
|
return a.delegate.RPC(method, args, reply)
|
2013-12-20 23:33:13 +00:00
|
|
|
}
|
|
|
|
|
2016-10-26 02:20:24 +00:00
|
|
|
// SnapshotRPC performs the requested snapshot RPC against the Consul server in
|
|
|
|
// a streaming manner. The contents of in will be read and passed along as the
|
|
|
|
// payload, and the response message will determine the error status, and any
|
|
|
|
// return payload will be written to out.
|
|
|
|
func (a *Agent) SnapshotRPC(args *structs.SnapshotRequest, in io.Reader, out io.Writer,
|
2017-06-15 09:50:28 +00:00
|
|
|
replyFn structs.SnapshotReplyFn) error {
|
2017-05-15 14:05:17 +00:00
|
|
|
return a.delegate.SnapshotRPC(args, in, out, replyFn)
|
2016-10-26 02:20:24 +00:00
|
|
|
}
|
|
|
|
|
2014-04-18 05:46:31 +00:00
|
|
|
// Leave is used to prepare the agent for a graceful shutdown
|
2013-12-20 01:14:46 +00:00
|
|
|
func (a *Agent) Leave() error {
|
2017-05-15 14:05:17 +00:00
|
|
|
return a.delegate.Leave()
|
2013-12-20 01:14:46 +00:00
|
|
|
}
|
|
|
|
|
2017-06-20 07:29:20 +00:00
|
|
|
// ShutdownAgent is used to hard stop the agent. Should be preceded by
|
|
|
|
// Leave to do it gracefully. Should be followed by ShutdownEndpoints to
|
|
|
|
// terminate the HTTP and DNS servers as well.
|
|
|
|
func (a *Agent) ShutdownAgent() error {
|
2013-12-21 00:39:32 +00:00
|
|
|
a.shutdownLock.Lock()
|
|
|
|
defer a.shutdownLock.Unlock()
|
|
|
|
|
|
|
|
if a.shutdown {
|
|
|
|
return nil
|
|
|
|
}
|
2017-05-19 09:53:41 +00:00
|
|
|
a.logger.Println("[INFO] agent: Requesting shutdown")
|
|
|
|
|
2014-01-21 20:05:56 +00:00
|
|
|
// Stop all the checks
|
2019-03-04 14:34:05 +00:00
|
|
|
a.stateLock.Lock()
|
|
|
|
defer a.stateLock.Unlock()
|
2014-01-21 20:05:56 +00:00
|
|
|
for _, chk := range a.checkMonitors {
|
|
|
|
chk.Stop()
|
|
|
|
}
|
|
|
|
for _, chk := range a.checkTTLs {
|
|
|
|
chk.Stop()
|
|
|
|
}
|
2015-01-09 22:43:24 +00:00
|
|
|
for _, chk := range a.checkHTTPs {
|
|
|
|
chk.Stop()
|
|
|
|
}
|
2015-07-23 11:45:08 +00:00
|
|
|
for _, chk := range a.checkTCPs {
|
|
|
|
chk.Stop()
|
|
|
|
}
|
2017-12-27 04:35:22 +00:00
|
|
|
for _, chk := range a.checkGRPCs {
|
|
|
|
chk.Stop()
|
|
|
|
}
|
2017-07-18 18:57:27 +00:00
|
|
|
for _, chk := range a.checkDockers {
|
|
|
|
chk.Stop()
|
|
|
|
}
|
2018-06-30 13:38:56 +00:00
|
|
|
for _, chk := range a.checkAliases {
|
|
|
|
chk.Stop()
|
|
|
|
}
|
2015-07-23 11:45:08 +00:00
|
|
|
|
2018-10-03 19:37:53 +00:00
|
|
|
// Stop gRPC
|
|
|
|
if a.grpcServer != nil {
|
|
|
|
a.grpcServer.Stop()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Stop the proxy config manager
|
|
|
|
if a.proxyConfig != nil {
|
|
|
|
a.proxyConfig.Close()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Stop the proxy process manager
|
2018-05-22 14:11:13 +00:00
|
|
|
if a.proxyManager != nil {
|
2018-06-06 20:04:19 +00:00
|
|
|
// If persistence is disabled (implies DevMode but a subset of DevMode) then
|
|
|
|
// don't leave the proxies running since the agent will not be able to
|
|
|
|
// recover them later.
|
|
|
|
if a.config.DataDir == "" {
|
2018-06-07 09:19:23 +00:00
|
|
|
a.logger.Printf("[WARN] agent: dev mode disabled persistence, killing " +
|
|
|
|
"all proxies since we can't recover them")
|
2018-06-06 20:04:19 +00:00
|
|
|
if err := a.proxyManager.Kill(); err != nil {
|
|
|
|
a.logger.Printf("[WARN] agent: error shutting down proxy manager: %s", err)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if err := a.proxyManager.Close(); err != nil {
|
|
|
|
a.logger.Printf("[WARN] agent: error shutting down proxy manager: %s", err)
|
|
|
|
}
|
2018-05-22 14:11:13 +00:00
|
|
|
}
|
2018-04-27 18:24:49 +00:00
|
|
|
}
|
|
|
|
|
2018-10-04 10:27:11 +00:00
|
|
|
// Stop the cache background work
|
|
|
|
if a.cache != nil {
|
|
|
|
a.cache.Close()
|
|
|
|
}
|
|
|
|
|
2017-05-22 21:59:54 +00:00
|
|
|
var err error
|
|
|
|
if a.delegate != nil {
|
|
|
|
err = a.delegate.Shutdown()
|
2017-05-23 10:15:25 +00:00
|
|
|
if _, ok := a.delegate.(*consul.Server); ok {
|
|
|
|
a.logger.Print("[INFO] agent: consul server down")
|
|
|
|
} else {
|
|
|
|
a.logger.Print("[INFO] agent: consul client down")
|
|
|
|
}
|
2017-05-22 21:59:54 +00:00
|
|
|
}
|
2013-12-21 00:39:32 +00:00
|
|
|
|
2014-05-06 16:57:53 +00:00
|
|
|
pidErr := a.deletePid()
|
|
|
|
if pidErr != nil {
|
|
|
|
a.logger.Println("[WARN] agent: could not delete pid file ", pidErr)
|
|
|
|
}
|
2014-05-06 03:29:50 +00:00
|
|
|
|
2017-06-19 19:34:08 +00:00
|
|
|
a.logger.Println("[INFO] agent: shutdown complete")
|
2013-12-21 00:39:32 +00:00
|
|
|
a.shutdown = true
|
2017-06-19 19:34:08 +00:00
|
|
|
close(a.shutdownCh)
|
2013-12-21 00:39:32 +00:00
|
|
|
return err
|
2017-06-20 07:29:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// ShutdownEndpoints terminates the HTTP and DNS servers. Should be
|
2018-03-19 16:56:00 +00:00
|
|
|
// preceded by ShutdownAgent.
|
2017-06-20 07:29:20 +00:00
|
|
|
func (a *Agent) ShutdownEndpoints() {
|
|
|
|
a.shutdownLock.Lock()
|
|
|
|
defer a.shutdownLock.Unlock()
|
|
|
|
|
2018-03-29 13:45:46 +00:00
|
|
|
if len(a.dnsServers) == 0 && len(a.httpServers) == 0 {
|
2017-06-20 07:29:20 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, srv := range a.dnsServers {
|
|
|
|
a.logger.Printf("[INFO] agent: Stopping DNS server %s (%s)", srv.Server.Addr, srv.Server.Net)
|
|
|
|
srv.Shutdown()
|
|
|
|
}
|
|
|
|
a.dnsServers = nil
|
|
|
|
|
|
|
|
for _, srv := range a.httpServers {
|
2017-11-07 23:06:59 +00:00
|
|
|
a.logger.Printf("[INFO] agent: Stopping %s server %s (%s)", strings.ToUpper(srv.proto), srv.ln.Addr().String(), srv.ln.Addr().Network())
|
2017-06-20 07:29:20 +00:00
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
|
|
|
defer cancel()
|
|
|
|
srv.Shutdown(ctx)
|
|
|
|
if ctx.Err() == context.DeadlineExceeded {
|
2017-11-07 23:06:59 +00:00
|
|
|
a.logger.Printf("[WARN] agent: Timeout stopping %s server %s (%s)", strings.ToUpper(srv.proto), srv.ln.Addr().String(), srv.ln.Addr().Network())
|
2017-06-20 07:29:20 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
a.httpServers = nil
|
|
|
|
|
|
|
|
a.logger.Println("[INFO] agent: Waiting for endpoints to shut down")
|
|
|
|
a.wgServers.Wait()
|
|
|
|
a.logger.Print("[INFO] agent: Endpoints down")
|
2013-12-21 00:39:32 +00:00
|
|
|
}
|
|
|
|
|
2017-05-19 15:51:39 +00:00
|
|
|
// ReloadCh is used to return a channel that can be
|
|
|
|
// used for triggering reloads and returning a response.
|
|
|
|
func (a *Agent) ReloadCh() chan chan error {
|
|
|
|
return a.reloadCh
|
|
|
|
}
|
|
|
|
|
2017-06-02 09:55:29 +00:00
|
|
|
// RetryJoinCh is a channel that transports errors
|
|
|
|
// from the retry join process.
|
|
|
|
func (a *Agent) RetryJoinCh() <-chan error {
|
|
|
|
return a.retryJoinCh
|
|
|
|
}
|
|
|
|
|
2014-04-18 05:46:31 +00:00
|
|
|
// ShutdownCh is used to return a channel that can be
|
|
|
|
// selected to wait for the agent to perform a shutdown.
|
2013-12-21 00:39:32 +00:00
|
|
|
func (a *Agent) ShutdownCh() <-chan struct{} {
|
|
|
|
return a.shutdownCh
|
2013-12-20 01:14:46 +00:00
|
|
|
}
|
2013-12-30 22:42:41 +00:00
|
|
|
|
|
|
|
// JoinLAN is used to have the agent join a LAN cluster
|
|
|
|
func (a *Agent) JoinLAN(addrs []string) (n int, err error) {
|
|
|
|
a.logger.Printf("[INFO] agent: (LAN) joining: %v", addrs)
|
2017-05-15 14:05:17 +00:00
|
|
|
n, err = a.delegate.JoinLAN(addrs)
|
2019-05-24 14:50:18 +00:00
|
|
|
if err == nil {
|
|
|
|
a.logger.Printf("[INFO] agent: (LAN) joined: %d", n)
|
|
|
|
if a.joinLANNotifier != nil {
|
|
|
|
if notifErr := a.joinLANNotifier.Notify(systemd.Ready); notifErr != nil {
|
|
|
|
a.logger.Printf("[DEBUG] agent: systemd notify failed: %v", notifErr)
|
|
|
|
}
|
2017-06-21 04:43:55 +00:00
|
|
|
}
|
2019-05-24 14:50:18 +00:00
|
|
|
} else {
|
|
|
|
a.logger.Printf("[WARN] agent: (LAN) couldn't join: %d Err: %v", n, err)
|
2017-06-21 04:43:55 +00:00
|
|
|
}
|
2013-12-30 22:42:41 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// JoinWAN is used to have the agent join a WAN cluster
|
|
|
|
func (a *Agent) JoinWAN(addrs []string) (n int, err error) {
|
|
|
|
a.logger.Printf("[INFO] agent: (WAN) joining: %v", addrs)
|
2017-05-15 14:05:17 +00:00
|
|
|
if srv, ok := a.delegate.(*consul.Server); ok {
|
|
|
|
n, err = srv.JoinWAN(addrs)
|
2013-12-30 22:42:41 +00:00
|
|
|
} else {
|
|
|
|
err = fmt.Errorf("Must be a server to join WAN cluster")
|
|
|
|
}
|
2019-05-24 14:50:18 +00:00
|
|
|
if err == nil {
|
|
|
|
a.logger.Printf("[INFO] agent: (WAN) joined: %d", n)
|
|
|
|
} else {
|
|
|
|
a.logger.Printf("[WARN] agent: (WAN) couldn't join: %d Err: %v", n, err)
|
|
|
|
}
|
2013-12-30 22:42:41 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// ForceLeave is used to remove a failed node from the cluster
|
|
|
|
func (a *Agent) ForceLeave(node string) (err error) {
|
2018-03-21 15:56:14 +00:00
|
|
|
a.logger.Printf("[INFO] agent: Force leaving node: %v", node)
|
2017-05-15 14:05:17 +00:00
|
|
|
err = a.delegate.RemoveFailedNode(node)
|
2013-12-30 22:42:41 +00:00
|
|
|
if err != nil {
|
2018-03-21 15:56:14 +00:00
|
|
|
a.logger.Printf("[WARN] agent: Failed to remove node: %v", err)
|
2013-12-30 22:42:41 +00:00
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2014-05-25 23:59:48 +00:00
|
|
|
// LocalMember is used to return the local node
|
|
|
|
func (a *Agent) LocalMember() serf.Member {
|
2017-05-15 14:05:17 +00:00
|
|
|
return a.delegate.LocalMember()
|
2014-05-25 23:59:48 +00:00
|
|
|
}
|
|
|
|
|
2014-04-18 05:46:31 +00:00
|
|
|
// LANMembers is used to retrieve the LAN members
|
2013-12-30 22:42:41 +00:00
|
|
|
func (a *Agent) LANMembers() []serf.Member {
|
2017-05-15 14:05:17 +00:00
|
|
|
return a.delegate.LANMembers()
|
2013-12-30 22:42:41 +00:00
|
|
|
}
|
|
|
|
|
2014-04-18 05:46:31 +00:00
|
|
|
// WANMembers is used to retrieve the WAN members
|
2013-12-30 22:42:41 +00:00
|
|
|
func (a *Agent) WANMembers() []serf.Member {
|
2017-05-15 14:05:17 +00:00
|
|
|
if srv, ok := a.delegate.(*consul.Server); ok {
|
|
|
|
return srv.WANMembers()
|
2013-12-30 22:42:41 +00:00
|
|
|
}
|
2017-04-21 01:59:42 +00:00
|
|
|
return nil
|
2013-12-30 22:42:41 +00:00
|
|
|
}
|
2014-01-21 19:52:25 +00:00
|
|
|
|
|
|
|
// StartSync is called once Services and Checks are registered.
|
|
|
|
// This is called to prevent a race between clients and the anti-entropy routines
|
|
|
|
func (a *Agent) StartSync() {
|
2017-08-28 12:17:09 +00:00
|
|
|
go a.sync.Run()
|
2017-08-30 10:25:49 +00:00
|
|
|
a.logger.Printf("[INFO] agent: started state syncer")
|
2014-01-21 19:52:25 +00:00
|
|
|
}
|
2014-01-30 21:39:02 +00:00
|
|
|
|
2018-09-27 14:00:51 +00:00
|
|
|
// PauseSync is used to pause anti-entropy while bulk changes are made. It also
|
|
|
|
// sets state that agent-local watches use to "ride out" config reloads and bulk
|
|
|
|
// updates which might spuriously unload state and reload it again.
|
2014-02-07 20:19:56 +00:00
|
|
|
func (a *Agent) PauseSync() {
|
2018-09-27 14:00:51 +00:00
|
|
|
// Do this outside of lock as it has it's own locking
|
2017-08-28 12:17:09 +00:00
|
|
|
a.sync.Pause()
|
2018-09-27 14:00:51 +00:00
|
|
|
|
|
|
|
// Coordinate local state watchers
|
|
|
|
a.syncMu.Lock()
|
|
|
|
defer a.syncMu.Unlock()
|
|
|
|
if a.syncCh == nil {
|
|
|
|
a.syncCh = make(chan struct{})
|
|
|
|
}
|
2014-02-07 20:19:56 +00:00
|
|
|
}
|
|
|
|
|
2014-04-18 05:46:31 +00:00
|
|
|
// ResumeSync is used to unpause anti-entropy after bulk changes are make
|
2014-02-07 20:19:56 +00:00
|
|
|
func (a *Agent) ResumeSync() {
|
2018-09-27 14:00:51 +00:00
|
|
|
// a.sync maintains a stack/ref count of Pause calls since we call
|
|
|
|
// Pause/Resume in nested way during a reload and AddService. We only want to
|
|
|
|
// trigger local state watchers if this Resume call actually started sync back
|
|
|
|
// up again (i.e. was the last resume on the stack). We could check that
|
|
|
|
// separately with a.sync.Paused but that is racey since another Pause call
|
|
|
|
// might be made between our Resume and checking Paused.
|
|
|
|
resumed := a.sync.Resume()
|
|
|
|
|
|
|
|
if !resumed {
|
|
|
|
// Return early so we don't notify local watchers until we are actually
|
|
|
|
// resumed.
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Coordinate local state watchers
|
|
|
|
a.syncMu.Lock()
|
|
|
|
defer a.syncMu.Unlock()
|
|
|
|
|
|
|
|
if a.syncCh != nil {
|
|
|
|
close(a.syncCh)
|
|
|
|
a.syncCh = nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// syncPausedCh returns either a channel or nil. If nil sync is not paused. If
|
|
|
|
// non-nil, the channel will be closed when sync resumes.
|
|
|
|
func (a *Agent) syncPausedCh() <-chan struct{} {
|
|
|
|
a.syncMu.Lock()
|
|
|
|
defer a.syncMu.Unlock()
|
|
|
|
return a.syncCh
|
2014-02-07 20:19:56 +00:00
|
|
|
}
|
|
|
|
|
2017-08-14 14:36:07 +00:00
|
|
|
// GetLANCoordinate returns the coordinates of this node in the local pools
|
|
|
|
// (assumes coordinates are enabled, so check that before calling).
|
|
|
|
func (a *Agent) GetLANCoordinate() (lib.CoordinateSet, error) {
|
2017-05-15 14:05:17 +00:00
|
|
|
return a.delegate.GetLANCoordinate()
|
2015-10-16 02:28:31 +00:00
|
|
|
}
|
|
|
|
|
2015-06-06 03:31:33 +00:00
|
|
|
// sendCoordinate is a long-running loop that periodically sends our coordinate
|
|
|
|
// to the server. Closing the agent's shutdownChannel will cause this to exit.
|
|
|
|
func (a *Agent) sendCoordinate() {
|
2017-08-14 14:36:07 +00:00
|
|
|
OUTER:
|
2015-04-15 23:12:45 +00:00
|
|
|
for {
|
2015-06-30 19:02:05 +00:00
|
|
|
rate := a.config.SyncCoordinateRateTarget
|
|
|
|
min := a.config.SyncCoordinateIntervalMin
|
2016-01-29 19:42:34 +00:00
|
|
|
intv := lib.RateScaledInterval(rate, min, len(a.LANMembers()))
|
|
|
|
intv = intv + lib.RandomStagger(intv)
|
2015-06-06 03:31:33 +00:00
|
|
|
|
2015-04-15 23:12:45 +00:00
|
|
|
select {
|
2015-04-29 01:47:41 +00:00
|
|
|
case <-time.After(intv):
|
2015-10-27 21:30:29 +00:00
|
|
|
members := a.LANMembers()
|
|
|
|
grok, err := consul.CanServersUnderstandProtocol(members, 3)
|
|
|
|
if err != nil {
|
2017-07-14 05:33:47 +00:00
|
|
|
a.logger.Printf("[ERR] agent: Failed to check servers: %s", err)
|
2015-10-27 21:30:29 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
if !grok {
|
2017-07-14 05:33:47 +00:00
|
|
|
a.logger.Printf("[DEBUG] agent: Skipping coordinate updates until servers are upgraded")
|
2015-10-16 02:28:31 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2017-08-14 14:36:07 +00:00
|
|
|
cs, err := a.GetLANCoordinate()
|
2015-10-27 21:30:29 +00:00
|
|
|
if err != nil {
|
2017-07-14 05:33:47 +00:00
|
|
|
a.logger.Printf("[ERR] agent: Failed to get coordinate: %s", err)
|
2015-06-29 22:53:29 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2017-08-14 14:36:07 +00:00
|
|
|
for segment, coord := range cs {
|
|
|
|
req := structs.CoordinateUpdateRequest{
|
|
|
|
Datacenter: a.config.Datacenter,
|
|
|
|
Node: a.config.NodeName,
|
|
|
|
Segment: segment,
|
|
|
|
Coord: coord,
|
|
|
|
WriteRequest: structs.WriteRequest{Token: a.tokens.AgentToken()},
|
|
|
|
}
|
|
|
|
var reply struct{}
|
|
|
|
if err := a.RPC("Coordinate.Update", &req, &reply); err != nil {
|
|
|
|
if acl.IsErrPermissionDenied(err) {
|
|
|
|
a.logger.Printf("[WARN] agent: Coordinate update blocked by ACLs")
|
|
|
|
} else {
|
|
|
|
a.logger.Printf("[ERR] agent: Coordinate update error: %v", err)
|
|
|
|
}
|
|
|
|
continue OUTER
|
2017-07-14 05:33:47 +00:00
|
|
|
}
|
2015-04-15 23:12:45 +00:00
|
|
|
}
|
2015-04-19 00:49:49 +00:00
|
|
|
case <-a.shutdownCh:
|
2015-04-15 23:12:45 +00:00
|
|
|
return
|
|
|
|
}
|
2015-04-13 20:45:42 +00:00
|
|
|
}
|
2015-04-09 20:23:14 +00:00
|
|
|
}
|
|
|
|
|
2016-08-16 19:52:30 +00:00
|
|
|
// reapServicesInternal does a single pass, looking for services to reap.
|
|
|
|
func (a *Agent) reapServicesInternal() {
|
2017-08-28 12:17:12 +00:00
|
|
|
reaped := make(map[string]bool)
|
2017-08-28 12:17:13 +00:00
|
|
|
for checkID, cs := range a.State.CriticalCheckStates() {
|
2017-08-28 12:17:12 +00:00
|
|
|
serviceID := cs.Check.ServiceID
|
|
|
|
|
2016-08-16 19:52:30 +00:00
|
|
|
// There's nothing to do if there's no service.
|
2017-08-28 12:17:12 +00:00
|
|
|
if serviceID == "" {
|
2016-08-16 19:52:30 +00:00
|
|
|
continue
|
|
|
|
}
|
2016-08-16 07:05:55 +00:00
|
|
|
|
2016-08-16 19:52:30 +00:00
|
|
|
// There might be multiple checks for one service, so
|
|
|
|
// we don't need to reap multiple times.
|
2017-08-28 12:17:12 +00:00
|
|
|
if reaped[serviceID] {
|
2016-08-16 19:52:30 +00:00
|
|
|
continue
|
|
|
|
}
|
2016-08-16 07:05:55 +00:00
|
|
|
|
2016-08-16 19:52:30 +00:00
|
|
|
// See if there's a timeout.
|
2018-03-19 16:56:00 +00:00
|
|
|
// todo(fs): this looks fishy... why is there another data structure in the agent with its own lock?
|
2019-03-04 14:34:05 +00:00
|
|
|
a.stateLock.Lock()
|
2017-08-28 12:17:12 +00:00
|
|
|
timeout := a.checkReapAfter[checkID]
|
2019-03-04 14:34:05 +00:00
|
|
|
a.stateLock.Unlock()
|
2016-08-16 19:52:30 +00:00
|
|
|
|
|
|
|
// Reap, if necessary. We keep track of which service
|
|
|
|
// this is so that we won't try to remove it again.
|
2017-08-28 12:17:12 +00:00
|
|
|
if timeout > 0 && cs.CriticalFor() > timeout {
|
|
|
|
reaped[serviceID] = true
|
2019-03-04 14:34:05 +00:00
|
|
|
if err := a.RemoveService(serviceID, true); err != nil {
|
|
|
|
a.logger.Printf("[ERR] agent: unable to deregister service %q after check %q has been critical for too long: %s",
|
|
|
|
serviceID, checkID, err)
|
|
|
|
} else {
|
|
|
|
a.logger.Printf("[INFO] agent: Check %q for service %q has been critical for too long; deregistered service",
|
|
|
|
checkID, serviceID)
|
|
|
|
}
|
2016-08-16 07:05:55 +00:00
|
|
|
}
|
|
|
|
}
|
2016-08-16 19:52:30 +00:00
|
|
|
}
|
2016-08-16 07:05:55 +00:00
|
|
|
|
2016-08-16 19:52:30 +00:00
|
|
|
// reapServices is a long running goroutine that looks for checks that have been
|
2017-10-26 02:17:41 +00:00
|
|
|
// critical too long and deregisters their associated services.
|
2016-08-16 19:52:30 +00:00
|
|
|
func (a *Agent) reapServices() {
|
2016-08-16 07:05:55 +00:00
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-time.After(a.config.CheckReapInterval):
|
2016-08-16 19:52:30 +00:00
|
|
|
a.reapServicesInternal()
|
2016-08-16 07:05:55 +00:00
|
|
|
|
|
|
|
case <-a.shutdownCh:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2017-06-15 16:46:06 +00:00
|
|
|
// persistedService is used to wrap a service definition and bundle it
|
|
|
|
// with an ACL token so we can restore both at a later agent start.
|
|
|
|
type persistedService struct {
|
|
|
|
Token string
|
|
|
|
Service *structs.NodeService
|
|
|
|
}
|
|
|
|
|
2014-11-24 08:36:03 +00:00
|
|
|
// persistService saves a service definition to a JSON file in the data dir
|
|
|
|
func (a *Agent) persistService(service *structs.NodeService) error {
|
2015-01-08 03:11:21 +00:00
|
|
|
svcPath := filepath.Join(a.config.DataDir, servicesDir, stringHash(service.ID))
|
2016-11-07 18:51:03 +00:00
|
|
|
|
2015-05-06 05:08:03 +00:00
|
|
|
wrapped := persistedService{
|
2017-08-28 12:17:13 +00:00
|
|
|
Token: a.State.ServiceToken(service.ID),
|
2015-05-06 05:08:03 +00:00
|
|
|
Service: service,
|
|
|
|
}
|
|
|
|
encoded, err := json.Marshal(wrapped)
|
|
|
|
if err != nil {
|
2016-04-26 22:03:26 +00:00
|
|
|
return err
|
2015-05-06 05:08:03 +00:00
|
|
|
}
|
2016-11-07 18:51:03 +00:00
|
|
|
|
2018-05-03 20:56:42 +00:00
|
|
|
return file.WriteAtomic(svcPath, encoded)
|
2014-11-24 08:36:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// purgeService removes a persisted service definition file from the data dir
|
|
|
|
func (a *Agent) purgeService(serviceID string) error {
|
2015-01-08 03:11:21 +00:00
|
|
|
svcPath := filepath.Join(a.config.DataDir, servicesDir, stringHash(serviceID))
|
2014-11-24 08:36:03 +00:00
|
|
|
if _, err := os.Stat(svcPath); err == nil {
|
|
|
|
return os.Remove(svcPath)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-05-14 20:55:24 +00:00
|
|
|
// persistedProxy is used to wrap a proxy definition and bundle it with an Proxy
|
|
|
|
// token so we can continue to authenticate the running proxy after a restart.
|
|
|
|
type persistedProxy struct {
|
|
|
|
ProxyToken string
|
|
|
|
Proxy *structs.ConnectManagedProxy
|
2018-07-17 20:16:43 +00:00
|
|
|
|
|
|
|
// Set to true when the proxy information originated from the agents configuration
|
|
|
|
// as opposed to API registration.
|
|
|
|
FromFile bool
|
2018-05-14 20:55:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// persistProxy saves a proxy definition to a JSON file in the data dir
|
2018-07-17 20:16:43 +00:00
|
|
|
func (a *Agent) persistProxy(proxy *local.ManagedProxy, FromFile bool) error {
|
2018-05-14 20:55:24 +00:00
|
|
|
proxyPath := filepath.Join(a.config.DataDir, proxyDir,
|
|
|
|
stringHash(proxy.Proxy.ProxyService.ID))
|
|
|
|
|
|
|
|
wrapped := persistedProxy{
|
|
|
|
ProxyToken: proxy.ProxyToken,
|
|
|
|
Proxy: proxy.Proxy,
|
2018-07-17 20:16:43 +00:00
|
|
|
FromFile: FromFile,
|
2018-05-14 20:55:24 +00:00
|
|
|
}
|
|
|
|
encoded, err := json.Marshal(wrapped)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return file.WriteAtomic(proxyPath, encoded)
|
|
|
|
}
|
|
|
|
|
|
|
|
// purgeProxy removes a persisted proxy definition file from the data dir
|
|
|
|
func (a *Agent) purgeProxy(proxyID string) error {
|
|
|
|
proxyPath := filepath.Join(a.config.DataDir, proxyDir, stringHash(proxyID))
|
|
|
|
if _, err := os.Stat(proxyPath); err == nil {
|
|
|
|
return os.Remove(proxyPath)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2014-11-24 08:36:03 +00:00
|
|
|
// persistCheck saves a check definition to the local agent's state directory
|
2017-06-15 16:46:06 +00:00
|
|
|
func (a *Agent) persistCheck(check *structs.HealthCheck, chkType *structs.CheckType) error {
|
2016-06-06 08:53:30 +00:00
|
|
|
checkPath := filepath.Join(a.config.DataDir, checksDir, checkIDHash(check.CheckID))
|
2014-11-29 20:25:01 +00:00
|
|
|
|
|
|
|
// Create the persisted check
|
2015-04-28 19:44:46 +00:00
|
|
|
wrapped := persistedCheck{
|
|
|
|
Check: check,
|
|
|
|
ChkType: chkType,
|
2017-08-28 12:17:13 +00:00
|
|
|
Token: a.State.CheckToken(check.CheckID),
|
2015-04-28 19:44:46 +00:00
|
|
|
}
|
2014-11-29 20:25:01 +00:00
|
|
|
|
2015-04-28 19:44:46 +00:00
|
|
|
encoded, err := json.Marshal(wrapped)
|
2014-11-29 20:25:01 +00:00
|
|
|
if err != nil {
|
2016-04-26 22:03:26 +00:00
|
|
|
return err
|
2014-11-29 20:25:01 +00:00
|
|
|
}
|
2016-11-07 18:51:03 +00:00
|
|
|
|
2018-05-03 20:56:42 +00:00
|
|
|
return file.WriteAtomic(checkPath, encoded)
|
2014-11-24 08:36:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// purgeCheck removes a persisted check definition file from the data dir
|
2016-06-06 20:19:31 +00:00
|
|
|
func (a *Agent) purgeCheck(checkID types.CheckID) error {
|
2016-06-06 08:53:30 +00:00
|
|
|
checkPath := filepath.Join(a.config.DataDir, checksDir, checkIDHash(checkID))
|
2014-11-24 08:36:03 +00:00
|
|
|
if _, err := os.Stat(checkPath); err == nil {
|
|
|
|
return os.Remove(checkPath)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2014-01-30 21:39:02 +00:00
|
|
|
// AddService is used to add a service entry.
|
|
|
|
// This entry is persistent and the agent will make a best effort to
|
|
|
|
// ensure it is registered
|
2018-10-11 12:22:11 +00:00
|
|
|
func (a *Agent) AddService(service *structs.NodeService, chkTypes []*structs.CheckType, persist bool, token string, source configSource) error {
|
2019-03-04 14:34:05 +00:00
|
|
|
a.stateLock.Lock()
|
|
|
|
defer a.stateLock.Unlock()
|
|
|
|
return a.addServiceLocked(service, chkTypes, persist, token, source)
|
|
|
|
}
|
|
|
|
|
2019-04-24 13:11:08 +00:00
|
|
|
// addServiceLocked adds a service entry to the service manager if enabled, or directly
|
|
|
|
// to the local state if it is not. This function assumes the state lock is already held.
|
2019-03-04 14:34:05 +00:00
|
|
|
func (a *Agent) addServiceLocked(service *structs.NodeService, chkTypes []*structs.CheckType, persist bool, token string, source configSource) error {
|
2019-04-23 06:39:02 +00:00
|
|
|
if err := a.validateService(service, chkTypes); err != nil {
|
|
|
|
return err
|
2019-01-08 10:13:49 +00:00
|
|
|
}
|
|
|
|
|
2019-04-24 13:11:08 +00:00
|
|
|
if a.config.EnableCentralServiceConfig {
|
|
|
|
return a.serviceManager.AddService(service, chkTypes, persist, token, source)
|
2015-02-09 17:22:51 +00:00
|
|
|
}
|
|
|
|
|
2019-04-24 13:11:08 +00:00
|
|
|
return a.addServiceInternal(service, chkTypes, persist, token, source)
|
2019-04-23 06:39:02 +00:00
|
|
|
}
|
2015-02-09 17:30:06 +00:00
|
|
|
|
2019-04-24 13:11:08 +00:00
|
|
|
// addServiceInternal adds the given service and checks to the local state.
|
2019-04-23 06:39:02 +00:00
|
|
|
func (a *Agent) addServiceInternal(service *structs.NodeService, chkTypes []*structs.CheckType, persist bool, token string, source configSource) error {
|
2015-05-06 19:28:42 +00:00
|
|
|
// Pause the service syncs during modification
|
|
|
|
a.PauseSync()
|
|
|
|
defer a.ResumeSync()
|
|
|
|
|
2019-07-17 19:06:50 +00:00
|
|
|
// Take a snapshot of the current state of checks (if any), and when adding
|
|
|
|
// a check that already existed carry over the state before resuming
|
|
|
|
// anti-entropy.
|
2019-03-04 14:34:05 +00:00
|
|
|
snap := a.snapshotCheckState()
|
2014-01-30 21:39:02 +00:00
|
|
|
|
2019-03-04 14:34:05 +00:00
|
|
|
var checks []*structs.HealthCheck
|
2014-11-24 08:36:03 +00:00
|
|
|
|
2014-01-30 21:39:02 +00:00
|
|
|
// Create an associated health check
|
2015-01-14 01:52:17 +00:00
|
|
|
for i, chkType := range chkTypes {
|
2017-05-15 19:49:13 +00:00
|
|
|
checkID := string(chkType.CheckID)
|
|
|
|
if checkID == "" {
|
|
|
|
checkID = fmt.Sprintf("service:%s", service.ID)
|
|
|
|
if len(chkTypes) > 1 {
|
|
|
|
checkID += fmt.Sprintf(":%d", i+1)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
name := chkType.Name
|
|
|
|
if name == "" {
|
|
|
|
name = fmt.Sprintf("Service '%s' check", service.Service)
|
2015-01-14 01:52:17 +00:00
|
|
|
}
|
2014-01-30 21:39:02 +00:00
|
|
|
check := &structs.HealthCheck{
|
|
|
|
Node: a.config.NodeName,
|
2016-06-06 20:19:31 +00:00
|
|
|
CheckID: types.CheckID(checkID),
|
2017-05-15 19:49:13 +00:00
|
|
|
Name: name,
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthCritical,
|
2014-11-07 02:24:04 +00:00
|
|
|
Notes: chkType.Notes,
|
2014-01-30 21:39:02 +00:00
|
|
|
ServiceID: service.ID,
|
|
|
|
ServiceName: service.Service,
|
2017-11-03 09:22:52 +00:00
|
|
|
ServiceTags: service.Tags,
|
2014-01-30 21:39:02 +00:00
|
|
|
}
|
2015-04-12 00:53:48 +00:00
|
|
|
if chkType.Status != "" {
|
|
|
|
check.Status = chkType.Status
|
|
|
|
}
|
2019-03-04 14:34:05 +00:00
|
|
|
|
2019-07-17 19:06:50 +00:00
|
|
|
// Restore the fields from the snapshot.
|
|
|
|
prev, ok := snap[check.CheckID]
|
|
|
|
if ok {
|
|
|
|
check.Output = prev.Output
|
|
|
|
check.Status = prev.Status
|
|
|
|
}
|
|
|
|
|
2019-03-04 14:34:05 +00:00
|
|
|
checks = append(checks, check)
|
|
|
|
}
|
|
|
|
|
|
|
|
// cleanup, store the ids of services and checks that weren't previously
|
|
|
|
// registered so we clean them up if somthing fails halfway through the
|
|
|
|
// process.
|
|
|
|
var cleanupServices []string
|
|
|
|
var cleanupChecks []types.CheckID
|
|
|
|
|
|
|
|
if s := a.State.Service(service.ID); s == nil {
|
|
|
|
cleanupServices = append(cleanupServices, service.ID)
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, check := range checks {
|
|
|
|
if c := a.State.Check(check.CheckID); c == nil {
|
|
|
|
cleanupChecks = append(cleanupChecks, check.CheckID)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
err := a.State.AddServiceWithChecks(service, checks, token)
|
|
|
|
if err != nil {
|
|
|
|
a.cleanupRegistration(cleanupServices, cleanupChecks)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
for i := range checks {
|
|
|
|
if err := a.addCheck(checks[i], chkTypes[i], service, persist, token, source); err != nil {
|
|
|
|
a.cleanupRegistration(cleanupServices, cleanupChecks)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if persist && a.config.DataDir != "" {
|
|
|
|
if err := a.persistCheck(checks[i], chkTypes[i]); err != nil {
|
|
|
|
a.cleanupRegistration(cleanupServices, cleanupChecks)
|
|
|
|
return err
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Persist the service to a file
|
|
|
|
if persist && a.config.DataDir != "" {
|
|
|
|
if err := a.persistService(service); err != nil {
|
|
|
|
a.cleanupRegistration(cleanupServices, cleanupChecks)
|
2014-01-30 21:39:02 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2018-09-27 13:33:12 +00:00
|
|
|
|
2014-01-30 21:39:02 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-04-23 06:39:02 +00:00
|
|
|
// validateService validates an service and its checks, either returning an error or emitting a
|
|
|
|
// warning based on the nature of the error.
|
|
|
|
func (a *Agent) validateService(service *structs.NodeService, chkTypes []*structs.CheckType) error {
|
|
|
|
if service.Service == "" {
|
|
|
|
return fmt.Errorf("Service name missing")
|
|
|
|
}
|
|
|
|
if service.ID == "" && service.Service != "" {
|
|
|
|
service.ID = service.Service
|
|
|
|
}
|
|
|
|
for _, check := range chkTypes {
|
|
|
|
if err := check.Validate(); err != nil {
|
|
|
|
return fmt.Errorf("Check is not valid: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set default weights if not specified. This is important as it ensures AE
|
|
|
|
// doesn't consider the service different since it has nil weights.
|
|
|
|
if service.Weights == nil {
|
|
|
|
service.Weights = &structs.Weights{Passing: 1, Warning: 1}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Warn if the service name is incompatible with DNS
|
|
|
|
if InvalidDnsRe.MatchString(service.Service) {
|
|
|
|
a.logger.Printf("[WARN] agent: Service name %q will not be discoverable "+
|
|
|
|
"via DNS due to invalid characters. Valid characters include "+
|
|
|
|
"all alpha-numerics and dashes.", service.Service)
|
|
|
|
} else if len(service.Service) > MaxDNSLabelLength {
|
|
|
|
a.logger.Printf("[WARN] agent: Service name %q will not be discoverable "+
|
|
|
|
"via DNS due to it being too long. Valid lengths are between "+
|
|
|
|
"1 and 63 bytes.", service.Service)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Warn if any tags are incompatible with DNS
|
|
|
|
for _, tag := range service.Tags {
|
|
|
|
if InvalidDnsRe.MatchString(tag) {
|
|
|
|
a.logger.Printf("[DEBUG] agent: Service tag %q will not be discoverable "+
|
|
|
|
"via DNS due to invalid characters. Valid characters include "+
|
|
|
|
"all alpha-numerics and dashes.", tag)
|
|
|
|
} else if len(tag) > MaxDNSLabelLength {
|
|
|
|
a.logger.Printf("[DEBUG] agent: Service tag %q will not be discoverable "+
|
|
|
|
"via DNS due to it being too long. Valid lengths are between "+
|
|
|
|
"1 and 63 bytes.", tag)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-03-04 14:34:05 +00:00
|
|
|
// cleanupRegistration is called on registration error to ensure no there are no
|
|
|
|
// leftovers after a partial failure
|
|
|
|
func (a *Agent) cleanupRegistration(serviceIDs []string, checksIDs []types.CheckID) {
|
|
|
|
for _, s := range serviceIDs {
|
|
|
|
if err := a.State.RemoveService(s); err != nil {
|
|
|
|
a.logger.Printf("[ERR] consul: service registration: cleanup: failed to remove service %s: %s", s, err)
|
|
|
|
}
|
|
|
|
if err := a.purgeService(s); err != nil {
|
|
|
|
a.logger.Printf("[ERR] consul: service registration: cleanup: failed to purge service %s file: %s", s, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, c := range checksIDs {
|
|
|
|
a.cancelCheckMonitors(c)
|
|
|
|
if err := a.State.RemoveCheck(c); err != nil {
|
|
|
|
a.logger.Printf("[ERR] consul: service registration: cleanup: failed to remove check %s: %s", c, err)
|
|
|
|
}
|
|
|
|
if err := a.purgeCheck(c); err != nil {
|
|
|
|
a.logger.Printf("[ERR] consul: service registration: cleanup: failed to purge check %s file: %s", c, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-01-30 21:39:02 +00:00
|
|
|
// RemoveService is used to remove a service entry.
|
|
|
|
// The agent will make a best effort to ensure it is deregistered
|
2014-11-26 07:58:02 +00:00
|
|
|
func (a *Agent) RemoveService(serviceID string, persist bool) error {
|
2019-03-04 14:34:05 +00:00
|
|
|
a.stateLock.Lock()
|
|
|
|
defer a.stateLock.Unlock()
|
|
|
|
return a.removeServiceLocked(serviceID, persist)
|
|
|
|
}
|
|
|
|
|
|
|
|
// removeServiceLocked is used to remove a service entry.
|
|
|
|
// The agent will make a best effort to ensure it is deregistered
|
|
|
|
func (a *Agent) removeServiceLocked(serviceID string, persist bool) error {
|
2015-01-26 16:06:49 +00:00
|
|
|
// Validate ServiceID
|
|
|
|
if serviceID == "" {
|
|
|
|
return fmt.Errorf("ServiceID missing")
|
|
|
|
}
|
|
|
|
|
2019-04-25 09:11:07 +00:00
|
|
|
// Shut down the config watch in the service manager if enabled.
|
|
|
|
if a.config.EnableCentralServiceConfig {
|
|
|
|
a.serviceManager.RemoveService(serviceID)
|
|
|
|
}
|
2019-04-23 06:39:02 +00:00
|
|
|
|
2019-03-04 14:34:05 +00:00
|
|
|
checks := a.State.Checks()
|
|
|
|
var checkIDs []types.CheckID
|
2019-03-14 15:02:49 +00:00
|
|
|
for id, check := range checks {
|
|
|
|
if check.ServiceID != serviceID {
|
|
|
|
continue
|
|
|
|
}
|
2019-03-04 14:34:05 +00:00
|
|
|
checkIDs = append(checkIDs, id)
|
|
|
|
}
|
|
|
|
|
2019-03-15 20:14:46 +00:00
|
|
|
// Remove the associated managed proxy if it exists
|
|
|
|
// This has to be DONE before purging configuration as might might have issues
|
|
|
|
// With ACLs otherwise
|
|
|
|
for proxyID, p := range a.State.Proxies() {
|
|
|
|
if p.Proxy.TargetServiceID == serviceID {
|
|
|
|
if err := a.removeProxyLocked(proxyID, true); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-15 12:22:08 +00:00
|
|
|
// Remove service immediately
|
2019-03-04 14:34:05 +00:00
|
|
|
if err := a.State.RemoveServiceWithChecks(serviceID, checkIDs); err != nil {
|
2016-11-09 21:56:54 +00:00
|
|
|
a.logger.Printf("[WARN] agent: Failed to deregister service %q: %s", serviceID, err)
|
|
|
|
return nil
|
|
|
|
}
|
2014-01-30 21:39:02 +00:00
|
|
|
|
2014-11-24 08:36:03 +00:00
|
|
|
// Remove the service from the data dir
|
2014-11-26 07:58:02 +00:00
|
|
|
if persist {
|
|
|
|
if err := a.purgeService(serviceID); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2014-11-24 08:36:03 +00:00
|
|
|
}
|
|
|
|
|
2014-01-30 21:39:02 +00:00
|
|
|
// Deregister any associated health checks
|
2019-03-04 14:34:05 +00:00
|
|
|
for checkID, check := range checks {
|
2017-08-28 12:17:11 +00:00
|
|
|
if check.ServiceID != serviceID {
|
2015-01-14 01:52:17 +00:00
|
|
|
continue
|
|
|
|
}
|
2019-03-04 14:34:05 +00:00
|
|
|
if err := a.removeCheckLocked(checkID, persist); err != nil {
|
2015-01-14 01:52:17 +00:00
|
|
|
return err
|
|
|
|
}
|
2015-01-08 06:26:40 +00:00
|
|
|
}
|
|
|
|
|
2018-07-19 15:22:01 +00:00
|
|
|
a.logger.Printf("[DEBUG] agent: removed service %q", serviceID)
|
2018-09-27 13:33:12 +00:00
|
|
|
|
|
|
|
// If any Sidecar services exist for the removed service ID, remove them too.
|
|
|
|
if sidecar := a.State.Service(a.sidecarServiceID(serviceID)); sidecar != nil {
|
|
|
|
// Double check that it's not just an ID collision and we actually added
|
|
|
|
// this from a sidecar.
|
|
|
|
if sidecar.LocallyRegisteredAsSidecar {
|
|
|
|
// Remove it!
|
2019-03-04 14:34:05 +00:00
|
|
|
err := a.removeServiceLocked(a.sidecarServiceID(serviceID), persist)
|
2018-09-27 13:33:12 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-01-08 06:26:40 +00:00
|
|
|
return nil
|
2014-01-30 21:39:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// AddCheck is used to add a health check to the agent.
|
|
|
|
// This entry is persistent and the agent will make a best effort to
|
|
|
|
// ensure it is registered. The Check may include a CheckType which
|
|
|
|
// is used to automatically update the check status
|
2018-10-11 12:22:11 +00:00
|
|
|
func (a *Agent) AddCheck(check *structs.HealthCheck, chkType *structs.CheckType, persist bool, token string, source configSource) error {
|
2019-03-04 14:34:05 +00:00
|
|
|
a.stateLock.Lock()
|
|
|
|
defer a.stateLock.Unlock()
|
|
|
|
return a.addCheckLocked(check, chkType, persist, token, source)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (a *Agent) addCheckLocked(check *structs.HealthCheck, chkType *structs.CheckType, persist bool, token string, source configSource) error {
|
|
|
|
var service *structs.NodeService
|
|
|
|
|
|
|
|
if check.ServiceID != "" {
|
|
|
|
service = a.State.Service(check.ServiceID)
|
|
|
|
if service == nil {
|
|
|
|
return fmt.Errorf("ServiceID %q does not exist", check.ServiceID)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// snapshot the current state of the health check to avoid potential flapping
|
|
|
|
existing := a.State.Check(check.CheckID)
|
|
|
|
defer func() {
|
|
|
|
if existing != nil {
|
|
|
|
a.State.UpdateCheck(check.CheckID, existing.Status, existing.Output)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
err := a.addCheck(check, chkType, service, persist, token, source)
|
|
|
|
if err != nil {
|
|
|
|
a.State.RemoveCheck(check.CheckID)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add to the local state for anti-entropy
|
|
|
|
err = a.State.AddCheck(check, token)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Persist the check
|
|
|
|
if persist && a.config.DataDir != "" {
|
|
|
|
return a.persistCheck(check, chkType)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (a *Agent) addCheck(check *structs.HealthCheck, chkType *structs.CheckType, service *structs.NodeService, persist bool, token string, source configSource) error {
|
2014-01-30 21:39:02 +00:00
|
|
|
if check.CheckID == "" {
|
|
|
|
return fmt.Errorf("CheckID missing")
|
|
|
|
}
|
2017-07-17 18:20:35 +00:00
|
|
|
|
|
|
|
if chkType != nil {
|
2017-10-10 23:54:06 +00:00
|
|
|
if err := chkType.Validate(); err != nil {
|
|
|
|
return fmt.Errorf("Check is not valid: %v", err)
|
2017-07-17 18:20:35 +00:00
|
|
|
}
|
|
|
|
|
2018-10-11 12:22:11 +00:00
|
|
|
if chkType.IsScript() {
|
|
|
|
if source == ConfigSourceLocal && !a.config.EnableLocalScriptChecks {
|
|
|
|
return fmt.Errorf("Scripts are disabled on this agent; to enable, configure 'enable_script_checks' or 'enable_local_script_checks' to true")
|
|
|
|
}
|
|
|
|
|
|
|
|
if source == ConfigSourceRemote && !a.config.EnableRemoteScriptChecks {
|
|
|
|
return fmt.Errorf("Scripts are disabled on this agent from remote calls; to enable, configure 'enable_script_checks' to true")
|
|
|
|
}
|
2017-07-17 18:20:35 +00:00
|
|
|
}
|
2014-01-30 21:39:02 +00:00
|
|
|
}
|
|
|
|
|
2015-01-14 01:52:17 +00:00
|
|
|
if check.ServiceID != "" {
|
2019-03-04 14:34:05 +00:00
|
|
|
check.ServiceName = service.Service
|
|
|
|
check.ServiceTags = service.Tags
|
2015-01-14 01:52:17 +00:00
|
|
|
}
|
|
|
|
|
2014-01-30 21:39:02 +00:00
|
|
|
// Check if already registered
|
|
|
|
if chkType != nil {
|
2019-06-26 15:43:25 +00:00
|
|
|
maxOutputSize := a.config.CheckOutputMaxSize
|
|
|
|
if maxOutputSize == 0 {
|
|
|
|
maxOutputSize = checks.DefaultBufSize
|
|
|
|
}
|
|
|
|
if chkType.OutputMaxSize > 0 && maxOutputSize > chkType.OutputMaxSize {
|
|
|
|
maxOutputSize = chkType.OutputMaxSize
|
|
|
|
}
|
2017-07-12 14:01:42 +00:00
|
|
|
switch {
|
|
|
|
|
|
|
|
case chkType.IsTTL():
|
2014-06-17 23:48:19 +00:00
|
|
|
if existing, ok := a.checkTTLs[check.CheckID]; ok {
|
|
|
|
existing.Stop()
|
2017-07-12 14:01:42 +00:00
|
|
|
delete(a.checkTTLs, check.CheckID)
|
2014-01-30 21:39:02 +00:00
|
|
|
}
|
|
|
|
|
2017-10-25 09:18:07 +00:00
|
|
|
ttl := &checks.CheckTTL{
|
2019-06-26 15:43:25 +00:00
|
|
|
Notify: a.State,
|
|
|
|
CheckID: check.CheckID,
|
|
|
|
TTL: chkType.TTL,
|
|
|
|
Logger: a.logger,
|
|
|
|
OutputMaxSize: maxOutputSize,
|
2014-01-30 21:39:02 +00:00
|
|
|
}
|
2015-06-05 23:17:07 +00:00
|
|
|
|
|
|
|
// Restore persisted state, if any
|
2015-06-08 16:35:10 +00:00
|
|
|
if err := a.loadCheckState(check); err != nil {
|
2015-06-05 23:17:07 +00:00
|
|
|
a.logger.Printf("[WARN] agent: failed restoring state for check %q: %s",
|
|
|
|
check.CheckID, err)
|
|
|
|
}
|
|
|
|
|
2014-01-30 21:39:02 +00:00
|
|
|
ttl.Start()
|
|
|
|
a.checkTTLs[check.CheckID] = ttl
|
|
|
|
|
2017-07-12 14:01:42 +00:00
|
|
|
case chkType.IsHTTP():
|
2015-01-09 22:43:24 +00:00
|
|
|
if existing, ok := a.checkHTTPs[check.CheckID]; ok {
|
|
|
|
existing.Stop()
|
2017-07-12 14:01:42 +00:00
|
|
|
delete(a.checkHTTPs, check.CheckID)
|
2015-01-09 22:43:24 +00:00
|
|
|
}
|
2017-10-25 09:18:07 +00:00
|
|
|
if chkType.Interval < checks.MinInterval {
|
2015-01-09 22:43:24 +00:00
|
|
|
a.logger.Println(fmt.Sprintf("[WARN] agent: check '%s' has interval below minimum of %v",
|
2017-10-25 09:18:07 +00:00
|
|
|
check.CheckID, checks.MinInterval))
|
|
|
|
chkType.Interval = checks.MinInterval
|
2015-01-09 22:43:24 +00:00
|
|
|
}
|
|
|
|
|
2019-03-13 09:29:06 +00:00
|
|
|
tlsClientConfig := a.tlsConfigurator.OutgoingTLSConfigForCheck(chkType.TLSSkipVerify)
|
2017-11-08 02:22:09 +00:00
|
|
|
|
2017-10-25 09:18:07 +00:00
|
|
|
http := &checks.CheckHTTP{
|
2017-11-08 02:22:09 +00:00
|
|
|
Notify: a.State,
|
|
|
|
CheckID: check.CheckID,
|
|
|
|
HTTP: chkType.HTTP,
|
|
|
|
Header: chkType.Header,
|
|
|
|
Method: chkType.Method,
|
|
|
|
Interval: chkType.Interval,
|
|
|
|
Timeout: chkType.Timeout,
|
|
|
|
Logger: a.logger,
|
2019-06-26 15:43:25 +00:00
|
|
|
OutputMaxSize: maxOutputSize,
|
2017-11-08 02:22:09 +00:00
|
|
|
TLSClientConfig: tlsClientConfig,
|
2015-01-09 22:43:24 +00:00
|
|
|
}
|
|
|
|
http.Start()
|
|
|
|
a.checkHTTPs[check.CheckID] = http
|
|
|
|
|
2017-07-12 14:01:42 +00:00
|
|
|
case chkType.IsTCP():
|
2015-07-23 11:45:08 +00:00
|
|
|
if existing, ok := a.checkTCPs[check.CheckID]; ok {
|
|
|
|
existing.Stop()
|
2017-07-12 14:01:42 +00:00
|
|
|
delete(a.checkTCPs, check.CheckID)
|
2015-07-23 11:45:08 +00:00
|
|
|
}
|
2017-10-25 09:18:07 +00:00
|
|
|
if chkType.Interval < checks.MinInterval {
|
2015-07-23 11:45:08 +00:00
|
|
|
a.logger.Println(fmt.Sprintf("[WARN] agent: check '%s' has interval below minimum of %v",
|
2017-10-25 09:18:07 +00:00
|
|
|
check.CheckID, checks.MinInterval))
|
|
|
|
chkType.Interval = checks.MinInterval
|
2015-07-23 11:45:08 +00:00
|
|
|
}
|
|
|
|
|
2017-10-25 09:18:07 +00:00
|
|
|
tcp := &checks.CheckTCP{
|
2017-08-28 12:17:13 +00:00
|
|
|
Notify: a.State,
|
2015-07-23 11:45:08 +00:00
|
|
|
CheckID: check.CheckID,
|
|
|
|
TCP: chkType.TCP,
|
|
|
|
Interval: chkType.Interval,
|
|
|
|
Timeout: chkType.Timeout,
|
|
|
|
Logger: a.logger,
|
|
|
|
}
|
|
|
|
tcp.Start()
|
|
|
|
a.checkTCPs[check.CheckID] = tcp
|
|
|
|
|
2017-12-27 04:35:22 +00:00
|
|
|
case chkType.IsGRPC():
|
|
|
|
if existing, ok := a.checkGRPCs[check.CheckID]; ok {
|
|
|
|
existing.Stop()
|
|
|
|
delete(a.checkGRPCs, check.CheckID)
|
|
|
|
}
|
|
|
|
if chkType.Interval < checks.MinInterval {
|
|
|
|
a.logger.Println(fmt.Sprintf("[WARN] agent: check '%s' has interval below minimum of %v",
|
|
|
|
check.CheckID, checks.MinInterval))
|
|
|
|
chkType.Interval = checks.MinInterval
|
|
|
|
}
|
|
|
|
|
|
|
|
var tlsClientConfig *tls.Config
|
2018-02-03 01:29:34 +00:00
|
|
|
if chkType.GRPCUseTLS {
|
2019-03-13 09:29:06 +00:00
|
|
|
tlsClientConfig = a.tlsConfigurator.OutgoingTLSConfigForCheck(chkType.TLSSkipVerify)
|
2017-12-27 04:35:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
grpc := &checks.CheckGRPC{
|
|
|
|
Notify: a.State,
|
|
|
|
CheckID: check.CheckID,
|
|
|
|
GRPC: chkType.GRPC,
|
|
|
|
Interval: chkType.Interval,
|
|
|
|
Timeout: chkType.Timeout,
|
|
|
|
Logger: a.logger,
|
|
|
|
TLSClientConfig: tlsClientConfig,
|
|
|
|
}
|
|
|
|
grpc.Start()
|
|
|
|
a.checkGRPCs[check.CheckID] = grpc
|
|
|
|
|
2017-07-12 14:01:42 +00:00
|
|
|
case chkType.IsDocker():
|
2015-10-22 22:29:13 +00:00
|
|
|
if existing, ok := a.checkDockers[check.CheckID]; ok {
|
|
|
|
existing.Stop()
|
2017-07-12 14:01:42 +00:00
|
|
|
delete(a.checkDockers, check.CheckID)
|
2015-10-22 22:29:13 +00:00
|
|
|
}
|
2017-10-25 09:18:07 +00:00
|
|
|
if chkType.Interval < checks.MinInterval {
|
2015-10-22 22:29:13 +00:00
|
|
|
a.logger.Println(fmt.Sprintf("[WARN] agent: check '%s' has interval below minimum of %v",
|
2017-10-25 09:18:07 +00:00
|
|
|
check.CheckID, checks.MinInterval))
|
|
|
|
chkType.Interval = checks.MinInterval
|
2015-10-22 22:29:13 +00:00
|
|
|
}
|
|
|
|
|
2017-07-12 14:01:42 +00:00
|
|
|
if a.dockerClient == nil {
|
2019-06-26 15:43:25 +00:00
|
|
|
dc, err := checks.NewDockerClient(os.Getenv("DOCKER_HOST"), int64(maxOutputSize))
|
2017-07-12 14:01:42 +00:00
|
|
|
if err != nil {
|
|
|
|
a.logger.Printf("[ERR] agent: error creating docker client: %s", err)
|
|
|
|
return err
|
|
|
|
}
|
2017-10-25 09:18:07 +00:00
|
|
|
a.logger.Printf("[DEBUG] agent: created docker client for %s", dc.Host())
|
2017-07-12 14:01:42 +00:00
|
|
|
a.dockerClient = dc
|
|
|
|
}
|
|
|
|
|
2017-10-25 09:18:07 +00:00
|
|
|
dockerCheck := &checks.CheckDocker{
|
2017-08-28 12:17:13 +00:00
|
|
|
Notify: a.State,
|
2015-10-22 22:29:13 +00:00
|
|
|
CheckID: check.CheckID,
|
2015-11-18 15:40:02 +00:00
|
|
|
DockerContainerID: chkType.DockerContainerID,
|
2015-10-22 22:29:13 +00:00
|
|
|
Shell: chkType.Shell,
|
2017-10-04 23:48:00 +00:00
|
|
|
ScriptArgs: chkType.ScriptArgs,
|
2015-10-22 22:29:13 +00:00
|
|
|
Interval: chkType.Interval,
|
|
|
|
Logger: a.logger,
|
2017-10-25 09:18:07 +00:00
|
|
|
Client: a.dockerClient,
|
2015-10-26 23:45:12 +00:00
|
|
|
}
|
2017-10-26 10:03:07 +00:00
|
|
|
if prev := a.checkDockers[check.CheckID]; prev != nil {
|
|
|
|
prev.Stop()
|
|
|
|
}
|
2015-10-22 22:29:13 +00:00
|
|
|
dockerCheck.Start()
|
|
|
|
a.checkDockers[check.CheckID] = dockerCheck
|
2017-07-12 14:01:42 +00:00
|
|
|
|
|
|
|
case chkType.IsMonitor():
|
2015-10-26 22:02:23 +00:00
|
|
|
if existing, ok := a.checkMonitors[check.CheckID]; ok {
|
|
|
|
existing.Stop()
|
2017-07-12 14:01:42 +00:00
|
|
|
delete(a.checkMonitors, check.CheckID)
|
2015-10-26 22:02:23 +00:00
|
|
|
}
|
2017-10-25 09:18:07 +00:00
|
|
|
if chkType.Interval < checks.MinInterval {
|
2017-10-04 23:48:00 +00:00
|
|
|
a.logger.Printf("[WARN] agent: check '%s' has interval below minimum of %v",
|
2017-10-25 09:18:07 +00:00
|
|
|
check.CheckID, checks.MinInterval)
|
|
|
|
chkType.Interval = checks.MinInterval
|
2015-10-26 22:02:23 +00:00
|
|
|
}
|
2017-10-25 09:18:07 +00:00
|
|
|
monitor := &checks.CheckMonitor{
|
2019-06-26 15:43:25 +00:00
|
|
|
Notify: a.State,
|
|
|
|
CheckID: check.CheckID,
|
|
|
|
ScriptArgs: chkType.ScriptArgs,
|
|
|
|
Interval: chkType.Interval,
|
|
|
|
Timeout: chkType.Timeout,
|
|
|
|
Logger: a.logger,
|
|
|
|
OutputMaxSize: maxOutputSize,
|
2015-10-26 22:02:23 +00:00
|
|
|
}
|
|
|
|
monitor.Start()
|
|
|
|
a.checkMonitors[check.CheckID] = monitor
|
2017-07-12 14:01:42 +00:00
|
|
|
|
2018-06-30 13:38:56 +00:00
|
|
|
case chkType.IsAlias():
|
|
|
|
if existing, ok := a.checkAliases[check.CheckID]; ok {
|
|
|
|
existing.Stop()
|
|
|
|
delete(a.checkAliases, check.CheckID)
|
|
|
|
}
|
|
|
|
|
|
|
|
var rpcReq structs.NodeSpecificRequest
|
|
|
|
rpcReq.Datacenter = a.config.Datacenter
|
2018-07-12 17:17:53 +00:00
|
|
|
|
|
|
|
// The token to set is really important. The behavior below follows
|
|
|
|
// the same behavior as anti-entropy: we use the user-specified token
|
|
|
|
// if set (either on the service or check definition), otherwise
|
|
|
|
// we use the "UserToken" on the agent. This is tested.
|
|
|
|
rpcReq.Token = a.tokens.UserToken()
|
|
|
|
if token != "" {
|
|
|
|
rpcReq.Token = token
|
|
|
|
}
|
2018-06-30 13:38:56 +00:00
|
|
|
|
|
|
|
chkImpl := &checks.CheckAlias{
|
|
|
|
Notify: a.State,
|
|
|
|
RPC: a.delegate,
|
|
|
|
RPCReq: rpcReq,
|
|
|
|
CheckID: check.CheckID,
|
|
|
|
Node: chkType.AliasNode,
|
|
|
|
ServiceID: chkType.AliasService,
|
|
|
|
}
|
|
|
|
chkImpl.Start()
|
|
|
|
a.checkAliases[check.CheckID] = chkImpl
|
|
|
|
|
2017-07-12 14:01:42 +00:00
|
|
|
default:
|
2015-10-27 02:52:32 +00:00
|
|
|
return fmt.Errorf("Check type is not valid")
|
2014-01-30 21:39:02 +00:00
|
|
|
}
|
2016-08-16 07:05:55 +00:00
|
|
|
|
|
|
|
if chkType.DeregisterCriticalServiceAfter > 0 {
|
|
|
|
timeout := chkType.DeregisterCriticalServiceAfter
|
|
|
|
if timeout < a.config.CheckDeregisterIntervalMin {
|
|
|
|
timeout = a.config.CheckDeregisterIntervalMin
|
|
|
|
a.logger.Println(fmt.Sprintf("[WARN] agent: check '%s' has deregister interval below minimum of %v",
|
|
|
|
check.CheckID, a.config.CheckDeregisterIntervalMin))
|
|
|
|
}
|
|
|
|
a.checkReapAfter[check.CheckID] = timeout
|
|
|
|
} else {
|
|
|
|
delete(a.checkReapAfter, check.CheckID)
|
|
|
|
}
|
2014-01-30 21:39:02 +00:00
|
|
|
}
|
|
|
|
|
2014-11-25 03:24:32 +00:00
|
|
|
return nil
|
2014-01-30 21:39:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// RemoveCheck is used to remove a health check.
|
|
|
|
// The agent will make a best effort to ensure it is deregistered
|
2016-06-06 20:19:31 +00:00
|
|
|
func (a *Agent) RemoveCheck(checkID types.CheckID, persist bool) error {
|
2019-03-04 14:34:05 +00:00
|
|
|
a.stateLock.Lock()
|
|
|
|
defer a.stateLock.Unlock()
|
|
|
|
return a.removeCheckLocked(checkID, persist)
|
|
|
|
}
|
|
|
|
|
|
|
|
// removeCheckLocked is used to remove a health check.
|
|
|
|
// The agent will make a best effort to ensure it is deregistered
|
|
|
|
func (a *Agent) removeCheckLocked(checkID types.CheckID, persist bool) error {
|
2015-01-26 16:06:49 +00:00
|
|
|
// Validate CheckID
|
|
|
|
if checkID == "" {
|
|
|
|
return fmt.Errorf("CheckID missing")
|
|
|
|
}
|
|
|
|
|
2017-07-18 21:54:20 +00:00
|
|
|
a.cancelCheckMonitors(checkID)
|
2019-03-04 14:34:05 +00:00
|
|
|
a.State.RemoveCheck(checkID)
|
2017-07-18 21:54:20 +00:00
|
|
|
|
|
|
|
if persist {
|
|
|
|
if err := a.purgeCheck(checkID); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := a.purgeCheckState(checkID); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
a.logger.Printf("[DEBUG] agent: removed check %q", checkID)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-07-17 20:16:43 +00:00
|
|
|
// addProxyLocked adds a new local Connect Proxy instance to be managed by the agent.
|
|
|
|
//
|
|
|
|
// This assumes that the agent's proxyLock is already held
|
2018-04-16 15:00:20 +00:00
|
|
|
//
|
|
|
|
// It REQUIRES that the service that is being proxied is already present in the
|
|
|
|
// local state. Note that this is only used for agent-managed proxies so we can
|
|
|
|
// ensure that we always make this true. For externally managed and registered
|
|
|
|
// proxies we explicitly allow the proxy to be registered first to make
|
|
|
|
// bootstrap ordering of a new service simpler but the same is not true here
|
|
|
|
// since this is only ever called when setting up a _managed_ proxy which was
|
|
|
|
// registered as part of a service registration either from config or HTTP API
|
|
|
|
// call.
|
2018-05-14 20:55:24 +00:00
|
|
|
//
|
|
|
|
// The restoredProxyToken argument should only be used when restoring proxy
|
|
|
|
// definitions from disk; new proxies must leave it blank to get a new token
|
|
|
|
// assigned. We need to restore from disk to enable to continue authenticating
|
|
|
|
// running proxies that already had that credential injected.
|
2018-07-17 20:16:43 +00:00
|
|
|
func (a *Agent) addProxyLocked(proxy *structs.ConnectManagedProxy, persist, FromFile bool,
|
2018-10-11 12:22:11 +00:00
|
|
|
restoredProxyToken string, source configSource) error {
|
2018-04-16 15:00:20 +00:00
|
|
|
// Lookup the target service token in state if there is one.
|
|
|
|
token := a.State.ServiceToken(proxy.TargetServiceID)
|
|
|
|
|
2018-05-03 17:44:10 +00:00
|
|
|
// Copy the basic proxy structure so it isn't modified w/ defaults
|
|
|
|
proxyCopy := *proxy
|
|
|
|
proxy = &proxyCopy
|
|
|
|
if err := a.applyProxyDefaults(proxy); err != nil {
|
|
|
|
return err
|
2018-05-02 18:38:18 +00:00
|
|
|
}
|
2018-04-27 18:24:49 +00:00
|
|
|
|
2018-04-16 15:00:20 +00:00
|
|
|
// Add the proxy to local state first since we may need to assign a port which
|
|
|
|
// needs to be coordinate under state lock. AddProxy will generate the
|
|
|
|
// NodeService for the proxy populated with the allocated (or configured) port
|
|
|
|
// and an ID, but it doesn't add it to the agent directly since that could
|
|
|
|
// deadlock and we may need to coordinate adding it and persisting etc.
|
2018-05-14 20:55:24 +00:00
|
|
|
proxyState, err := a.State.AddProxy(proxy, token, restoredProxyToken)
|
2018-04-16 15:00:20 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2018-04-27 18:24:49 +00:00
|
|
|
proxyService := proxyState.Proxy.ProxyService
|
|
|
|
|
2018-06-19 11:11:42 +00:00
|
|
|
// Register proxy TCP check. The built in proxy doesn't listen publically
|
|
|
|
// until it's loaded certs so this ensures we won't route traffic until it's
|
|
|
|
// ready.
|
|
|
|
proxyCfg, err := a.applyProxyConfigDefaults(proxyState.Proxy)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2018-07-12 11:57:10 +00:00
|
|
|
chkAddr := a.resolveProxyCheckAddress(proxyCfg)
|
|
|
|
chkTypes := []*structs.CheckType{}
|
|
|
|
if chkAddr != "" {
|
2019-06-04 14:02:38 +00:00
|
|
|
bindPort, ok := proxyCfg["bind_port"].(int)
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("Cannot convert bind_port=%v to an int for creating TCP Check for address %s", proxyCfg["bind_port"], chkAddr)
|
|
|
|
}
|
2018-07-12 11:57:10 +00:00
|
|
|
chkTypes = []*structs.CheckType{
|
|
|
|
&structs.CheckType{
|
2019-06-04 14:02:38 +00:00
|
|
|
Name: "Connect Proxy Listening",
|
|
|
|
TCP: ipaddr.FormatAddressPort(chkAddr, bindPort),
|
2018-07-12 11:57:10 +00:00
|
|
|
Interval: 10 * time.Second,
|
|
|
|
},
|
|
|
|
}
|
2018-06-19 11:11:42 +00:00
|
|
|
}
|
|
|
|
|
2019-03-04 14:34:05 +00:00
|
|
|
err = a.addServiceLocked(proxyService, chkTypes, persist, token, source)
|
2018-04-16 15:00:20 +00:00
|
|
|
if err != nil {
|
|
|
|
// Remove the state too
|
|
|
|
a.State.RemoveProxy(proxyService.ID)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-05-14 20:55:24 +00:00
|
|
|
// Persist the proxy
|
2018-06-06 20:04:19 +00:00
|
|
|
if persist && a.config.DataDir != "" {
|
2018-07-17 20:16:43 +00:00
|
|
|
return a.persistProxy(proxyState, FromFile)
|
2018-05-14 20:55:24 +00:00
|
|
|
}
|
2018-04-16 15:00:20 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-10-11 09:44:42 +00:00
|
|
|
// AddProxy adds a new local Connect Proxy instance to be managed by the agent.
|
2018-07-17 20:16:43 +00:00
|
|
|
//
|
|
|
|
// It REQUIRES that the service that is being proxied is already present in the
|
|
|
|
// local state. Note that this is only used for agent-managed proxies so we can
|
|
|
|
// ensure that we always make this true. For externally managed and registered
|
|
|
|
// proxies we explicitly allow the proxy to be registered first to make
|
|
|
|
// bootstrap ordering of a new service simpler but the same is not true here
|
|
|
|
// since this is only ever called when setting up a _managed_ proxy which was
|
|
|
|
// registered as part of a service registration either from config or HTTP API
|
|
|
|
// call.
|
|
|
|
//
|
|
|
|
// The restoredProxyToken argument should only be used when restoring proxy
|
|
|
|
// definitions from disk; new proxies must leave it blank to get a new token
|
|
|
|
// assigned. We need to restore from disk to enable to continue authenticating
|
|
|
|
// running proxies that already had that credential injected.
|
|
|
|
func (a *Agent) AddProxy(proxy *structs.ConnectManagedProxy, persist, FromFile bool,
|
2018-10-11 12:22:11 +00:00
|
|
|
restoredProxyToken string, source configSource) error {
|
2019-03-04 14:34:05 +00:00
|
|
|
a.stateLock.Lock()
|
|
|
|
defer a.stateLock.Unlock()
|
2018-10-11 12:22:11 +00:00
|
|
|
return a.addProxyLocked(proxy, persist, FromFile, restoredProxyToken, source)
|
2018-07-17 20:16:43 +00:00
|
|
|
}
|
|
|
|
|
2018-07-12 11:57:10 +00:00
|
|
|
// resolveProxyCheckAddress returns the best address to use for a TCP check of
|
|
|
|
// the proxy's public listener. It expects the input to already have default
|
|
|
|
// values populated by applyProxyConfigDefaults. It may return an empty string
|
|
|
|
// indicating that the TCP check should not be created at all.
|
|
|
|
//
|
|
|
|
// By default this uses the proxy's bind address which in turn defaults to the
|
|
|
|
// agent's bind address. If the proxy bind address ends up being 0.0.0.0 we have
|
|
|
|
// to assume the agent can dial it over loopback which is usually true.
|
|
|
|
//
|
|
|
|
// In some topologies such as proxy being in a different container, the IP the
|
|
|
|
// agent used to dial proxy over a local bridge might not be the same as the
|
|
|
|
// container's public routable IP address so we allow a manual override of the
|
|
|
|
// check address in config "tcp_check_address" too.
|
|
|
|
//
|
|
|
|
// Finally the TCP check can be disabled by another manual override
|
|
|
|
// "disable_tcp_check" in cases where the agent will never be able to dial the
|
|
|
|
// proxy directly for some reason.
|
|
|
|
func (a *Agent) resolveProxyCheckAddress(proxyCfg map[string]interface{}) string {
|
|
|
|
// If user disabled the check return empty string
|
|
|
|
if disable, ok := proxyCfg["disable_tcp_check"].(bool); ok && disable {
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
|
|
|
|
// If user specified a custom one, use that
|
|
|
|
if chkAddr, ok := proxyCfg["tcp_check_address"].(string); ok && chkAddr != "" {
|
|
|
|
return chkAddr
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we have a bind address and its diallable, use that
|
|
|
|
if bindAddr, ok := proxyCfg["bind_address"].(string); ok &&
|
|
|
|
bindAddr != "" && bindAddr != "0.0.0.0" && bindAddr != "[::]" {
|
|
|
|
return bindAddr
|
|
|
|
}
|
|
|
|
|
|
|
|
// Default to localhost
|
|
|
|
return "127.0.0.1"
|
|
|
|
}
|
|
|
|
|
2018-06-19 11:11:42 +00:00
|
|
|
// applyProxyConfigDefaults takes a *structs.ConnectManagedProxy and returns
|
|
|
|
// it's Config map merged with any defaults from the Agent's config. It would be
|
|
|
|
// nicer if this were defined as a method on structs.ConnectManagedProxy but we
|
|
|
|
// can't do that because ot the import cycle it causes with agent/config.
|
|
|
|
func (a *Agent) applyProxyConfigDefaults(p *structs.ConnectManagedProxy) (map[string]interface{}, error) {
|
|
|
|
if p == nil || p.ProxyService == nil {
|
|
|
|
// Should never happen but protect from panic
|
|
|
|
return nil, fmt.Errorf("invalid proxy state")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lookup the target service
|
|
|
|
target := a.State.Service(p.TargetServiceID)
|
|
|
|
if target == nil {
|
|
|
|
// Can happen during deregistration race between proxy and scheduler.
|
|
|
|
return nil, fmt.Errorf("unknown target service ID: %s", p.TargetServiceID)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Merge globals defaults
|
|
|
|
config := make(map[string]interface{})
|
|
|
|
for k, v := range a.config.ConnectProxyDefaultConfig {
|
|
|
|
if _, ok := config[k]; !ok {
|
|
|
|
config[k] = v
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Copy config from the proxy
|
|
|
|
for k, v := range p.Config {
|
|
|
|
config[k] = v
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set defaults for anything that is still not specified but required.
|
|
|
|
// Note that these are not included in the content hash. Since we expect
|
|
|
|
// them to be static in general but some like the default target service
|
|
|
|
// port might not be. In that edge case services can set that explicitly
|
|
|
|
// when they re-register which will be caught though.
|
|
|
|
if _, ok := config["bind_port"]; !ok {
|
|
|
|
config["bind_port"] = p.ProxyService.Port
|
|
|
|
}
|
|
|
|
if _, ok := config["bind_address"]; !ok {
|
|
|
|
// Default to binding to the same address the agent is configured to
|
|
|
|
// bind to.
|
|
|
|
config["bind_address"] = a.config.BindAddr.String()
|
|
|
|
}
|
|
|
|
if _, ok := config["local_service_address"]; !ok {
|
|
|
|
// Default to localhost and the port the service registered with
|
|
|
|
config["local_service_address"] = fmt.Sprintf("127.0.0.1:%d", target.Port)
|
|
|
|
}
|
2018-06-22 06:03:31 +00:00
|
|
|
|
|
|
|
// Basic type conversions for expected types.
|
|
|
|
if raw, ok := config["bind_port"]; ok {
|
|
|
|
switch v := raw.(type) {
|
|
|
|
case float64:
|
|
|
|
// Common since HCL/JSON parse as float64
|
|
|
|
config["bind_port"] = int(v)
|
|
|
|
|
|
|
|
// NOTE(mitchellh): No default case since errors and validation
|
|
|
|
// are handled by the ServiceDefinition.Validate function.
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-06-19 11:11:42 +00:00
|
|
|
return config, nil
|
|
|
|
}
|
|
|
|
|
2018-05-03 17:44:10 +00:00
|
|
|
// applyProxyDefaults modifies the given proxy by applying any configured
|
|
|
|
// defaults, such as the default execution mode, command, etc.
|
|
|
|
func (a *Agent) applyProxyDefaults(proxy *structs.ConnectManagedProxy) error {
|
|
|
|
// Set the default exec mode
|
|
|
|
if proxy.ExecMode == structs.ProxyExecModeUnspecified {
|
|
|
|
mode, err := structs.NewProxyExecMode(a.config.ConnectProxyDefaultExecMode)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
proxy.ExecMode = mode
|
|
|
|
}
|
|
|
|
if proxy.ExecMode == structs.ProxyExecModeUnspecified {
|
|
|
|
proxy.ExecMode = structs.ProxyExecModeDaemon
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set the default command to the globally configured default
|
|
|
|
if len(proxy.Command) == 0 {
|
|
|
|
switch proxy.ExecMode {
|
|
|
|
case structs.ProxyExecModeDaemon:
|
|
|
|
proxy.Command = a.config.ConnectProxyDefaultDaemonCommand
|
|
|
|
|
|
|
|
case structs.ProxyExecModeScript:
|
|
|
|
proxy.Command = a.config.ConnectProxyDefaultScriptCommand
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If there is no globally configured default we need to get the
|
|
|
|
// default command so we can do "consul connect proxy"
|
|
|
|
if len(proxy.Command) == 0 {
|
2018-06-15 20:04:04 +00:00
|
|
|
command, err := defaultProxyCommand(a.config)
|
2018-05-03 17:44:10 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
proxy.Command = command
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-07-17 20:16:43 +00:00
|
|
|
// removeProxyLocked stops and removes a local proxy instance.
|
|
|
|
//
|
|
|
|
// It is assumed that this function is called while holding the proxyLock already
|
|
|
|
func (a *Agent) removeProxyLocked(proxyID string, persist bool) error {
|
2018-04-16 15:00:20 +00:00
|
|
|
// Validate proxyID
|
|
|
|
if proxyID == "" {
|
|
|
|
return fmt.Errorf("proxyID missing")
|
|
|
|
}
|
|
|
|
|
2018-04-27 18:24:49 +00:00
|
|
|
// Remove the proxy from the local state
|
2018-06-13 06:21:50 +00:00
|
|
|
p, err := a.State.RemoveProxy(proxyID)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remove the proxy service as well. The proxy ID is also the ID
|
|
|
|
// of the servie, but we might as well use the service pointer.
|
2019-03-04 14:34:05 +00:00
|
|
|
if err := a.removeServiceLocked(p.Proxy.ProxyService.ID, persist); err != nil {
|
2018-04-16 15:00:20 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-06-06 20:04:19 +00:00
|
|
|
if persist && a.config.DataDir != "" {
|
2018-05-14 20:55:24 +00:00
|
|
|
return a.purgeProxy(proxyID)
|
|
|
|
}
|
2018-04-16 15:00:20 +00:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-07-17 20:16:43 +00:00
|
|
|
// RemoveProxy stops and removes a local proxy instance.
|
|
|
|
func (a *Agent) RemoveProxy(proxyID string, persist bool) error {
|
2019-03-04 14:34:05 +00:00
|
|
|
a.stateLock.Lock()
|
|
|
|
defer a.stateLock.Unlock()
|
2018-07-17 20:16:43 +00:00
|
|
|
return a.removeProxyLocked(proxyID, persist)
|
|
|
|
}
|
|
|
|
|
2018-05-07 04:46:22 +00:00
|
|
|
// verifyProxyToken takes a token and attempts to verify it against the
|
2018-05-10 16:04:33 +00:00
|
|
|
// targetService name. If targetProxy is specified, then the local proxy token
|
|
|
|
// must exactly match the given proxy ID. cert, config, etc.).
|
2018-05-07 04:02:44 +00:00
|
|
|
//
|
2018-05-10 16:04:33 +00:00
|
|
|
// The given token may be a local-only proxy token or it may be an ACL token. We
|
|
|
|
// will attempt to verify the local proxy token first.
|
|
|
|
//
|
2018-06-18 19:37:00 +00:00
|
|
|
// The effective ACL token is returned along with a boolean which is true if the
|
|
|
|
// match was against a proxy token rather than an ACL token, and any error. In
|
|
|
|
// the case the token matches a proxy token, then the ACL token used to register
|
|
|
|
// that proxy's target service is returned for use in any RPC calls the proxy
|
|
|
|
// needs to make on behalf of that service. If the token was an ACL token
|
|
|
|
// already then it is always returned. Provided error is nil, a valid ACL token
|
|
|
|
// is always returned.
|
|
|
|
func (a *Agent) verifyProxyToken(token, targetService,
|
|
|
|
targetProxy string) (string, bool, error) {
|
2018-05-07 04:46:22 +00:00
|
|
|
// If we specify a target proxy, we look up that proxy directly. Otherwise,
|
|
|
|
// we resolve with any proxy we can find.
|
|
|
|
var proxy *local.ManagedProxy
|
|
|
|
if targetProxy != "" {
|
|
|
|
proxy = a.State.Proxy(targetProxy)
|
|
|
|
if proxy == nil {
|
2018-06-18 19:37:00 +00:00
|
|
|
return "", false, fmt.Errorf("unknown proxy service ID: %q", targetProxy)
|
2018-05-07 04:46:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// If the token DOESN'T match, then we reset the proxy which will
|
|
|
|
// cause the logic below to fall back to normal ACLs. Otherwise,
|
|
|
|
// we keep the proxy set because we also have to verify that the
|
|
|
|
// target service matches on the proxy.
|
|
|
|
if token != proxy.ProxyToken {
|
|
|
|
proxy = nil
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
proxy = a.resolveProxyToken(token)
|
2018-05-07 04:02:44 +00:00
|
|
|
}
|
|
|
|
|
2018-05-07 04:46:22 +00:00
|
|
|
// The existence of a token isn't enough, we also need to verify
|
|
|
|
// that the service name of the matching proxy matches our target
|
|
|
|
// service.
|
|
|
|
if proxy != nil {
|
2018-05-19 06:27:02 +00:00
|
|
|
// Get the target service since we only have the name. The nil
|
|
|
|
// check below should never be true since a proxy token always
|
|
|
|
// represents the existence of a local service.
|
|
|
|
target := a.State.Service(proxy.Proxy.TargetServiceID)
|
|
|
|
if target == nil {
|
2018-06-18 19:37:00 +00:00
|
|
|
return "", false, fmt.Errorf("proxy target service not found: %q",
|
2018-05-19 06:27:02 +00:00
|
|
|
proxy.Proxy.TargetServiceID)
|
|
|
|
}
|
|
|
|
|
|
|
|
if target.Service != targetService {
|
2018-06-18 19:37:00 +00:00
|
|
|
return "", false, acl.ErrPermissionDenied
|
2018-05-07 04:46:22 +00:00
|
|
|
}
|
|
|
|
|
2018-05-10 16:04:33 +00:00
|
|
|
// Resolve the actual ACL token used to register the proxy/service and
|
|
|
|
// return that for use in RPC calls.
|
2018-06-18 19:37:00 +00:00
|
|
|
return a.State.ServiceToken(proxy.Proxy.TargetServiceID), true, nil
|
2018-05-07 04:46:22 +00:00
|
|
|
}
|
|
|
|
|
2018-05-07 04:02:44 +00:00
|
|
|
// Doesn't match, we have to do a full token resolution. The required
|
2018-06-18 19:37:00 +00:00
|
|
|
// permission for any proxy-related endpoint is service:write, since
|
2018-05-07 04:02:44 +00:00
|
|
|
// to register a proxy you require that permission and sensitive data
|
|
|
|
// is usually present in the configuration.
|
|
|
|
rule, err := a.resolveToken(token)
|
|
|
|
if err != nil {
|
2018-06-18 19:37:00 +00:00
|
|
|
return "", false, err
|
2018-05-07 04:02:44 +00:00
|
|
|
}
|
2018-05-19 06:27:02 +00:00
|
|
|
if rule != nil && !rule.ServiceWrite(targetService, nil) {
|
2018-06-18 19:37:00 +00:00
|
|
|
return "", false, acl.ErrPermissionDenied
|
2018-05-07 04:02:44 +00:00
|
|
|
}
|
|
|
|
|
2018-06-18 19:37:00 +00:00
|
|
|
return token, false, nil
|
2018-05-07 04:02:44 +00:00
|
|
|
}
|
|
|
|
|
2017-07-18 21:54:20 +00:00
|
|
|
func (a *Agent) cancelCheckMonitors(checkID types.CheckID) {
|
2014-01-30 21:39:02 +00:00
|
|
|
// Stop any monitors
|
2016-08-16 07:05:55 +00:00
|
|
|
delete(a.checkReapAfter, checkID)
|
2014-01-30 21:39:02 +00:00
|
|
|
if check, ok := a.checkMonitors[checkID]; ok {
|
|
|
|
check.Stop()
|
|
|
|
delete(a.checkMonitors, checkID)
|
|
|
|
}
|
2015-01-12 22:34:39 +00:00
|
|
|
if check, ok := a.checkHTTPs[checkID]; ok {
|
|
|
|
check.Stop()
|
|
|
|
delete(a.checkHTTPs, checkID)
|
|
|
|
}
|
2015-07-23 11:45:08 +00:00
|
|
|
if check, ok := a.checkTCPs[checkID]; ok {
|
|
|
|
check.Stop()
|
|
|
|
delete(a.checkTCPs, checkID)
|
|
|
|
}
|
2017-12-27 04:35:22 +00:00
|
|
|
if check, ok := a.checkGRPCs[checkID]; ok {
|
|
|
|
check.Stop()
|
|
|
|
delete(a.checkGRPCs, checkID)
|
|
|
|
}
|
2014-01-30 21:39:02 +00:00
|
|
|
if check, ok := a.checkTTLs[checkID]; ok {
|
|
|
|
check.Stop()
|
|
|
|
delete(a.checkTTLs, checkID)
|
|
|
|
}
|
2017-07-18 18:50:37 +00:00
|
|
|
if check, ok := a.checkDockers[checkID]; ok {
|
|
|
|
check.Stop()
|
|
|
|
delete(a.checkDockers, checkID)
|
|
|
|
}
|
2014-01-30 21:39:02 +00:00
|
|
|
}
|
|
|
|
|
2016-08-16 07:05:55 +00:00
|
|
|
// updateTTLCheck is used to update the status of a TTL check via the Agent API.
|
|
|
|
func (a *Agent) updateTTLCheck(checkID types.CheckID, status, output string) error {
|
2019-03-04 14:34:05 +00:00
|
|
|
a.stateLock.Lock()
|
|
|
|
defer a.stateLock.Unlock()
|
2014-01-30 21:39:02 +00:00
|
|
|
|
2016-08-16 07:05:55 +00:00
|
|
|
// Grab the TTL check.
|
2014-01-30 21:39:02 +00:00
|
|
|
check, ok := a.checkTTLs[checkID]
|
|
|
|
if !ok {
|
2016-06-20 22:25:21 +00:00
|
|
|
return fmt.Errorf("CheckID %q does not have associated TTL", checkID)
|
2014-01-30 21:39:02 +00:00
|
|
|
}
|
|
|
|
|
2016-08-16 07:05:55 +00:00
|
|
|
// Set the status through CheckTTL to reset the TTL.
|
2019-06-26 15:43:25 +00:00
|
|
|
outputTruncated := check.SetStatus(status, output)
|
2015-06-05 23:17:07 +00:00
|
|
|
|
2016-08-16 07:05:55 +00:00
|
|
|
// We don't write any files in dev mode so bail here.
|
2018-06-06 20:04:19 +00:00
|
|
|
if a.config.DataDir == "" {
|
2015-11-29 04:40:05 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-08-16 07:05:55 +00:00
|
|
|
// Persist the state so the TTL check can come up in a good state after
|
|
|
|
// an agent restart, especially with long TTL values.
|
2019-06-26 15:43:25 +00:00
|
|
|
if err := a.persistCheckState(check, status, outputTruncated); err != nil {
|
2015-06-05 23:17:07 +00:00
|
|
|
return fmt.Errorf("failed persisting state for check %q: %s", checkID, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// persistCheckState is used to record the check status into the data dir.
|
|
|
|
// This allows the state to be restored on a later agent start. Currently
|
|
|
|
// only useful for TTL based checks.
|
2017-10-25 09:18:07 +00:00
|
|
|
func (a *Agent) persistCheckState(check *checks.CheckTTL, status, output string) error {
|
2015-06-05 23:17:07 +00:00
|
|
|
// Create the persisted state
|
|
|
|
state := persistedCheckState{
|
|
|
|
CheckID: check.CheckID,
|
|
|
|
Status: status,
|
|
|
|
Output: output,
|
|
|
|
Expires: time.Now().Add(check.TTL).Unix(),
|
|
|
|
}
|
|
|
|
|
|
|
|
// Encode the state
|
|
|
|
buf, err := json.Marshal(state)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create the state dir if it doesn't exist
|
|
|
|
dir := filepath.Join(a.config.DataDir, checkStateDir)
|
|
|
|
if err := os.MkdirAll(dir, 0700); err != nil {
|
|
|
|
return fmt.Errorf("failed creating check state dir %q: %s", dir, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Write the state to the file
|
2016-06-06 08:53:30 +00:00
|
|
|
file := filepath.Join(dir, checkIDHash(check.CheckID))
|
2016-11-07 18:51:03 +00:00
|
|
|
|
|
|
|
// Create temp file in same dir, to make more likely atomic
|
2016-08-03 15:32:21 +00:00
|
|
|
tempFile := file + ".tmp"
|
|
|
|
|
2016-11-07 20:24:31 +00:00
|
|
|
// persistCheckState is called frequently, so don't use writeFileAtomic to avoid calling fsync here
|
2016-08-03 15:32:21 +00:00
|
|
|
if err := ioutil.WriteFile(tempFile, buf, 0600); err != nil {
|
|
|
|
return fmt.Errorf("failed writing temp file %q: %s", tempFile, err)
|
|
|
|
}
|
|
|
|
if err := os.Rename(tempFile, file); err != nil {
|
|
|
|
return fmt.Errorf("failed to rename temp file from %q to %q: %s", tempFile, file, err)
|
2015-06-05 23:17:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-06-08 16:35:10 +00:00
|
|
|
// loadCheckState is used to restore the persisted state of a check.
|
|
|
|
func (a *Agent) loadCheckState(check *structs.HealthCheck) error {
|
2015-06-05 23:17:07 +00:00
|
|
|
// Try to read the persisted state for this check
|
2016-06-06 08:53:30 +00:00
|
|
|
file := filepath.Join(a.config.DataDir, checkStateDir, checkIDHash(check.CheckID))
|
2015-06-05 23:17:07 +00:00
|
|
|
buf, err := ioutil.ReadFile(file)
|
|
|
|
if err != nil {
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return fmt.Errorf("failed reading file %q: %s", file, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Decode the state data
|
|
|
|
var p persistedCheckState
|
|
|
|
if err := json.Unmarshal(buf, &p); err != nil {
|
2017-06-08 07:50:47 +00:00
|
|
|
a.logger.Printf("[ERR] agent: failed decoding check state: %s", err)
|
2016-11-07 18:51:03 +00:00
|
|
|
return a.purgeCheckState(check.CheckID)
|
2015-06-05 23:17:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Check if the state has expired
|
2015-06-05 23:45:05 +00:00
|
|
|
if time.Now().Unix() >= p.Expires {
|
2015-06-05 23:17:07 +00:00
|
|
|
a.logger.Printf("[DEBUG] agent: check state expired for %q, not restoring", check.CheckID)
|
2015-06-05 23:59:41 +00:00
|
|
|
return a.purgeCheckState(check.CheckID)
|
2015-06-05 23:17:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Restore the fields from the state
|
|
|
|
check.Output = p.Output
|
|
|
|
check.Status = p.Status
|
2014-01-30 21:39:02 +00:00
|
|
|
return nil
|
|
|
|
}
|
2014-02-24 00:42:39 +00:00
|
|
|
|
2015-06-05 23:57:14 +00:00
|
|
|
// purgeCheckState is used to purge the state of a check from the data dir
|
2016-06-06 20:19:31 +00:00
|
|
|
func (a *Agent) purgeCheckState(checkID types.CheckID) error {
|
2016-06-06 08:53:30 +00:00
|
|
|
file := filepath.Join(a.config.DataDir, checkStateDir, checkIDHash(checkID))
|
2015-06-05 23:57:14 +00:00
|
|
|
err := os.Remove(file)
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-06-02 09:35:59 +00:00
|
|
|
func (a *Agent) GossipEncrypted() bool {
|
|
|
|
return a.delegate.Encrypted()
|
|
|
|
}
|
|
|
|
|
2014-02-24 00:42:39 +00:00
|
|
|
// Stats is used to get various debugging state from the sub-systems
|
|
|
|
func (a *Agent) Stats() map[string]map[string]string {
|
2017-05-15 14:05:17 +00:00
|
|
|
stats := a.delegate.Stats()
|
2014-02-24 00:42:39 +00:00
|
|
|
stats["agent"] = map[string]string{
|
2017-08-28 12:17:12 +00:00
|
|
|
"check_monitors": strconv.Itoa(len(a.checkMonitors)),
|
|
|
|
"check_ttls": strconv.Itoa(len(a.checkTTLs)),
|
|
|
|
}
|
2017-08-28 12:17:13 +00:00
|
|
|
for k, v := range a.State.Stats() {
|
2017-08-28 12:17:12 +00:00
|
|
|
stats["agent"][k] = v
|
2014-02-24 00:42:39 +00:00
|
|
|
}
|
2014-06-06 21:40:22 +00:00
|
|
|
|
|
|
|
revision := a.config.Revision
|
|
|
|
if len(revision) > 8 {
|
|
|
|
revision = revision[:8]
|
|
|
|
}
|
|
|
|
stats["build"] = map[string]string{
|
|
|
|
"revision": revision,
|
|
|
|
"version": a.config.Version,
|
|
|
|
"prerelease": a.config.VersionPrerelease,
|
|
|
|
}
|
2014-02-24 00:42:39 +00:00
|
|
|
return stats
|
|
|
|
}
|
2014-05-06 03:29:50 +00:00
|
|
|
|
2014-05-06 19:43:33 +00:00
|
|
|
// storePid is used to write out our PID to a file if necessary
|
2014-05-06 16:57:53 +00:00
|
|
|
func (a *Agent) storePid() error {
|
2014-05-06 19:43:33 +00:00
|
|
|
// Quit fast if no pidfile
|
2014-05-06 03:29:50 +00:00
|
|
|
pidPath := a.config.PidFile
|
2014-05-06 19:43:33 +00:00
|
|
|
if pidPath == "" {
|
|
|
|
return nil
|
|
|
|
}
|
2014-05-06 03:29:50 +00:00
|
|
|
|
2014-05-06 19:43:33 +00:00
|
|
|
// Open the PID file
|
|
|
|
pidFile, err := os.OpenFile(pidPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0666)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Could not open pid file: %v", err)
|
2014-05-06 03:29:50 +00:00
|
|
|
}
|
2014-05-06 19:43:33 +00:00
|
|
|
defer pidFile.Close()
|
2014-05-06 16:57:53 +00:00
|
|
|
|
2014-05-06 19:43:33 +00:00
|
|
|
// Write out the PID
|
|
|
|
pid := os.Getpid()
|
|
|
|
_, err = pidFile.WriteString(fmt.Sprintf("%d", pid))
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Could not write to pid file: %s", err)
|
|
|
|
}
|
2014-05-06 16:57:53 +00:00
|
|
|
return nil
|
2014-05-06 03:29:50 +00:00
|
|
|
}
|
|
|
|
|
2014-05-06 19:43:33 +00:00
|
|
|
// deletePid is used to delete our PID on exit
|
2014-05-06 16:57:53 +00:00
|
|
|
func (a *Agent) deletePid() error {
|
2014-05-06 19:43:33 +00:00
|
|
|
// Quit fast if no pidfile
|
2014-05-06 03:29:50 +00:00
|
|
|
pidPath := a.config.PidFile
|
2014-05-06 19:43:33 +00:00
|
|
|
if pidPath == "" {
|
|
|
|
return nil
|
|
|
|
}
|
2014-05-06 03:29:50 +00:00
|
|
|
|
2014-05-06 19:43:33 +00:00
|
|
|
stat, err := os.Stat(pidPath)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Could not remove pid file: %s", err)
|
|
|
|
}
|
2014-05-06 03:29:50 +00:00
|
|
|
|
2014-05-06 19:43:33 +00:00
|
|
|
if stat.IsDir() {
|
|
|
|
return fmt.Errorf("Specified pid file path is directory")
|
2014-05-06 03:29:50 +00:00
|
|
|
}
|
2014-05-06 16:57:53 +00:00
|
|
|
|
2014-05-06 19:43:33 +00:00
|
|
|
err = os.Remove(pidPath)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Could not remove pid file: %s", err)
|
|
|
|
}
|
2014-05-06 16:57:53 +00:00
|
|
|
return nil
|
2014-05-06 03:29:50 +00:00
|
|
|
}
|
2014-11-26 07:58:02 +00:00
|
|
|
|
2015-01-08 02:05:46 +00:00
|
|
|
// loadServices will load service definitions from configuration and persisted
|
|
|
|
// definitions on disk, and load them into the local agent.
|
2017-09-25 18:40:42 +00:00
|
|
|
func (a *Agent) loadServices(conf *config.RuntimeConfig) error {
|
2014-11-26 07:58:02 +00:00
|
|
|
// Register the services from config
|
|
|
|
for _, service := range conf.Services {
|
|
|
|
ns := service.NodeService()
|
2017-10-10 23:54:06 +00:00
|
|
|
chkTypes, err := service.CheckTypes()
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Failed to validate checks for service %q: %v", service.Name, err)
|
|
|
|
}
|
2018-09-27 13:33:12 +00:00
|
|
|
|
|
|
|
// Grab and validate sidecar if there is one too
|
|
|
|
sidecar, sidecarChecks, sidecarToken, err := a.sidecarServiceFromNodeService(ns, service.Token)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Failed to validate sidecar for service %q: %v", service.Name, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remove sidecar from NodeService now it's done it's job it's just a config
|
|
|
|
// syntax sugar and shouldn't be persisted in local or server state.
|
|
|
|
ns.Connect.SidecarService = nil
|
|
|
|
|
2019-03-04 14:34:05 +00:00
|
|
|
if err := a.addServiceLocked(ns, chkTypes, false, service.Token, ConfigSourceLocal); err != nil {
|
2017-10-10 23:54:06 +00:00
|
|
|
return fmt.Errorf("Failed to register service %q: %v", service.Name, err)
|
2014-11-26 07:58:02 +00:00
|
|
|
}
|
2018-09-27 13:33:12 +00:00
|
|
|
|
|
|
|
// If there is a sidecar service, register that too.
|
|
|
|
if sidecar != nil {
|
2019-03-04 14:34:05 +00:00
|
|
|
if err := a.addServiceLocked(sidecar, sidecarChecks, false, sidecarToken, ConfigSourceLocal); err != nil {
|
2018-09-27 13:33:12 +00:00
|
|
|
return fmt.Errorf("Failed to register sidecar for service %q: %v", service.Name, err)
|
|
|
|
}
|
|
|
|
}
|
2014-11-26 07:58:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Load any persisted services
|
2015-01-08 05:24:47 +00:00
|
|
|
svcDir := filepath.Join(a.config.DataDir, servicesDir)
|
2015-06-04 21:33:30 +00:00
|
|
|
files, err := ioutil.ReadDir(svcDir)
|
|
|
|
if err != nil {
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return fmt.Errorf("Failed reading services dir %q: %s", svcDir, err)
|
2014-11-26 07:58:02 +00:00
|
|
|
}
|
2015-06-04 21:33:30 +00:00
|
|
|
for _, fi := range files {
|
|
|
|
// Skip all dirs
|
|
|
|
if fi.IsDir() {
|
|
|
|
continue
|
|
|
|
}
|
2014-11-26 07:58:02 +00:00
|
|
|
|
2017-07-24 17:37:14 +00:00
|
|
|
// Skip all partially written temporary files
|
|
|
|
if strings.HasSuffix(fi.Name(), "tmp") {
|
2018-03-21 15:56:14 +00:00
|
|
|
a.logger.Printf("[WARN] agent: Ignoring temporary service file %v", fi.Name())
|
2017-07-24 17:37:14 +00:00
|
|
|
continue
|
|
|
|
}
|
2017-07-25 02:07:48 +00:00
|
|
|
|
2015-06-04 21:33:30 +00:00
|
|
|
// Open the file for reading
|
|
|
|
file := filepath.Join(svcDir, fi.Name())
|
|
|
|
fh, err := os.Open(file)
|
2015-01-08 05:24:47 +00:00
|
|
|
if err != nil {
|
2015-06-04 21:33:30 +00:00
|
|
|
return fmt.Errorf("failed opening service file %q: %s", file, err)
|
2015-01-08 05:24:47 +00:00
|
|
|
}
|
2015-06-04 21:33:30 +00:00
|
|
|
|
|
|
|
// Read the contents into a buffer
|
|
|
|
buf, err := ioutil.ReadAll(fh)
|
|
|
|
fh.Close()
|
2015-01-08 05:24:47 +00:00
|
|
|
if err != nil {
|
2015-06-04 21:33:30 +00:00
|
|
|
return fmt.Errorf("failed reading service file %q: %s", file, err)
|
2015-01-08 05:24:47 +00:00
|
|
|
}
|
|
|
|
|
2015-06-04 21:33:30 +00:00
|
|
|
// Try decoding the service definition
|
|
|
|
var p persistedService
|
|
|
|
if err := json.Unmarshal(buf, &p); err != nil {
|
2015-04-28 19:18:41 +00:00
|
|
|
// Backwards-compatibility for pre-0.5.1 persisted services
|
2015-06-04 21:33:30 +00:00
|
|
|
if err := json.Unmarshal(buf, &p.Service); err != nil {
|
2018-03-21 15:56:14 +00:00
|
|
|
a.logger.Printf("[ERR] agent: Failed decoding service file %q: %s", file, err)
|
2018-01-19 22:07:36 +00:00
|
|
|
continue
|
2015-04-28 19:18:41 +00:00
|
|
|
}
|
2015-01-08 05:24:47 +00:00
|
|
|
}
|
2015-06-04 21:33:30 +00:00
|
|
|
serviceID := p.Service.ID
|
2015-01-08 05:24:47 +00:00
|
|
|
|
2017-08-28 12:17:13 +00:00
|
|
|
if a.State.Service(serviceID) != nil {
|
2015-01-08 05:24:47 +00:00
|
|
|
// Purge previously persisted service. This allows config to be
|
|
|
|
// preferred over services persisted from the API.
|
2015-01-08 06:26:40 +00:00
|
|
|
a.logger.Printf("[DEBUG] agent: service %q exists, not restoring from %q",
|
2015-06-04 21:33:30 +00:00
|
|
|
serviceID, file)
|
|
|
|
if err := a.purgeService(serviceID); err != nil {
|
|
|
|
return fmt.Errorf("failed purging service %q: %s", serviceID, err)
|
|
|
|
}
|
2015-01-08 05:24:47 +00:00
|
|
|
} else {
|
2015-01-08 06:26:40 +00:00
|
|
|
a.logger.Printf("[DEBUG] agent: restored service definition %q from %q",
|
2015-06-04 21:33:30 +00:00
|
|
|
serviceID, file)
|
2019-03-04 14:34:05 +00:00
|
|
|
if err := a.addServiceLocked(p.Service, nil, false, p.Token, ConfigSourceLocal); err != nil {
|
2015-06-04 21:33:30 +00:00
|
|
|
return fmt.Errorf("failed adding service %q: %s", serviceID, err)
|
|
|
|
}
|
2015-01-08 05:24:47 +00:00
|
|
|
}
|
2015-06-04 21:33:30 +00:00
|
|
|
}
|
2015-01-08 05:24:47 +00:00
|
|
|
|
2015-06-04 21:33:30 +00:00
|
|
|
return nil
|
2014-11-26 07:58:02 +00:00
|
|
|
}
|
|
|
|
|
2017-08-30 10:25:49 +00:00
|
|
|
// unloadServices will deregister all services.
|
2015-01-08 02:05:46 +00:00
|
|
|
func (a *Agent) unloadServices() error {
|
2017-08-28 12:17:13 +00:00
|
|
|
for id := range a.State.Services() {
|
2019-03-04 14:34:05 +00:00
|
|
|
if err := a.removeServiceLocked(id, false); err != nil {
|
2017-08-28 12:17:11 +00:00
|
|
|
return fmt.Errorf("Failed deregistering service '%s': %v", id, err)
|
2014-11-26 07:58:02 +00:00
|
|
|
}
|
|
|
|
}
|
2015-01-08 02:05:46 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// loadChecks loads check definitions and/or persisted check definitions from
|
|
|
|
// disk and re-registers them with the local agent.
|
2019-07-17 19:06:50 +00:00
|
|
|
func (a *Agent) loadChecks(conf *config.RuntimeConfig, snap map[types.CheckID]*structs.HealthCheck) error {
|
2014-11-26 07:58:02 +00:00
|
|
|
// Register the checks from config
|
|
|
|
for _, check := range conf.Checks {
|
|
|
|
health := check.HealthCheck(conf.NodeName)
|
2019-07-17 19:06:50 +00:00
|
|
|
|
|
|
|
// Restore the fields from the snapshot.
|
|
|
|
if prev, ok := snap[health.CheckID]; ok {
|
|
|
|
health.Output = prev.Output
|
|
|
|
health.Status = prev.Status
|
|
|
|
}
|
|
|
|
|
2017-05-15 19:49:13 +00:00
|
|
|
chkType := check.CheckType()
|
2019-03-04 14:34:05 +00:00
|
|
|
if err := a.addCheckLocked(health, chkType, false, check.Token, ConfigSourceLocal); err != nil {
|
2014-11-26 07:58:02 +00:00
|
|
|
return fmt.Errorf("Failed to register check '%s': %v %v", check.Name, err, check)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Load any persisted checks
|
2015-01-08 05:24:47 +00:00
|
|
|
checkDir := filepath.Join(a.config.DataDir, checksDir)
|
2015-06-04 21:33:30 +00:00
|
|
|
files, err := ioutil.ReadDir(checkDir)
|
|
|
|
if err != nil {
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return fmt.Errorf("Failed reading checks dir %q: %s", checkDir, err)
|
2014-11-26 07:58:02 +00:00
|
|
|
}
|
2015-06-04 21:33:30 +00:00
|
|
|
for _, fi := range files {
|
|
|
|
// Ignore dirs - we only care about the check definition files
|
|
|
|
if fi.IsDir() {
|
|
|
|
continue
|
|
|
|
}
|
2014-11-26 07:58:02 +00:00
|
|
|
|
2015-06-04 21:33:30 +00:00
|
|
|
// Open the file for reading
|
|
|
|
file := filepath.Join(checkDir, fi.Name())
|
|
|
|
fh, err := os.Open(file)
|
2015-01-08 05:24:47 +00:00
|
|
|
if err != nil {
|
2015-06-04 21:33:30 +00:00
|
|
|
return fmt.Errorf("Failed opening check file %q: %s", file, err)
|
2015-01-08 05:24:47 +00:00
|
|
|
}
|
2015-06-04 21:33:30 +00:00
|
|
|
|
|
|
|
// Read the contents into a buffer
|
|
|
|
buf, err := ioutil.ReadAll(fh)
|
|
|
|
fh.Close()
|
2015-01-08 05:24:47 +00:00
|
|
|
if err != nil {
|
2015-06-04 21:33:30 +00:00
|
|
|
return fmt.Errorf("failed reading check file %q: %s", file, err)
|
2015-01-08 05:24:47 +00:00
|
|
|
}
|
|
|
|
|
2015-06-04 21:33:30 +00:00
|
|
|
// Decode the check
|
2015-01-08 05:24:47 +00:00
|
|
|
var p persistedCheck
|
2015-06-04 21:33:30 +00:00
|
|
|
if err := json.Unmarshal(buf, &p); err != nil {
|
2018-03-21 15:56:14 +00:00
|
|
|
a.logger.Printf("[ERR] agent: Failed decoding check file %q: %s", file, err)
|
2018-01-19 22:07:36 +00:00
|
|
|
continue
|
2015-01-08 05:24:47 +00:00
|
|
|
}
|
2015-06-04 21:33:30 +00:00
|
|
|
checkID := p.Check.CheckID
|
2015-01-08 05:24:47 +00:00
|
|
|
|
2017-08-28 12:17:13 +00:00
|
|
|
if a.State.Check(checkID) != nil {
|
2015-01-08 05:24:47 +00:00
|
|
|
// Purge previously persisted check. This allows config to be
|
|
|
|
// preferred over persisted checks from the API.
|
2015-01-08 06:26:40 +00:00
|
|
|
a.logger.Printf("[DEBUG] agent: check %q exists, not restoring from %q",
|
2015-06-04 21:33:30 +00:00
|
|
|
checkID, file)
|
|
|
|
if err := a.purgeCheck(checkID); err != nil {
|
|
|
|
return fmt.Errorf("Failed purging check %q: %s", checkID, err)
|
|
|
|
}
|
2015-01-08 05:24:47 +00:00
|
|
|
} else {
|
|
|
|
// Default check to critical to avoid placing potentially unhealthy
|
|
|
|
// services into the active pool
|
2017-04-19 23:00:11 +00:00
|
|
|
p.Check.Status = api.HealthCritical
|
2015-01-08 05:24:47 +00:00
|
|
|
|
2019-07-17 19:06:50 +00:00
|
|
|
// Restore the fields from the snapshot.
|
|
|
|
if prev, ok := snap[p.Check.CheckID]; ok {
|
|
|
|
p.Check.Output = prev.Output
|
|
|
|
p.Check.Status = prev.Status
|
|
|
|
}
|
|
|
|
|
2019-03-04 14:34:05 +00:00
|
|
|
if err := a.addCheckLocked(p.Check, p.ChkType, false, p.Token, ConfigSourceLocal); err != nil {
|
2015-03-11 23:13:19 +00:00
|
|
|
// Purge the check if it is unable to be restored.
|
|
|
|
a.logger.Printf("[WARN] agent: Failed to restore check %q: %s",
|
2015-06-04 21:33:30 +00:00
|
|
|
checkID, err)
|
|
|
|
if err := a.purgeCheck(checkID); err != nil {
|
|
|
|
return fmt.Errorf("Failed purging check %q: %s", checkID, err)
|
|
|
|
}
|
2015-03-11 23:13:19 +00:00
|
|
|
}
|
2015-01-08 06:26:40 +00:00
|
|
|
a.logger.Printf("[DEBUG] agent: restored health check %q from %q",
|
2015-06-04 21:33:30 +00:00
|
|
|
p.Check.CheckID, file)
|
2015-01-08 05:24:47 +00:00
|
|
|
}
|
2015-06-04 21:33:30 +00:00
|
|
|
}
|
2015-01-08 05:24:47 +00:00
|
|
|
|
2015-06-04 21:33:30 +00:00
|
|
|
return nil
|
2014-11-26 07:58:02 +00:00
|
|
|
}
|
2015-01-08 02:05:46 +00:00
|
|
|
|
|
|
|
// unloadChecks will deregister all checks known to the local agent.
|
|
|
|
func (a *Agent) unloadChecks() error {
|
2017-08-28 12:17:13 +00:00
|
|
|
for id := range a.State.Checks() {
|
2019-03-04 14:34:05 +00:00
|
|
|
if err := a.removeCheckLocked(id, false); err != nil {
|
2017-08-28 12:17:11 +00:00
|
|
|
return fmt.Errorf("Failed deregistering check '%s': %s", id, err)
|
2015-01-08 02:05:46 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
2015-01-15 08:16:34 +00:00
|
|
|
|
2018-07-17 20:16:43 +00:00
|
|
|
// loadPersistedProxies will load connect proxy definitions from their
|
|
|
|
// persisted state on disk and return a slice of them
|
|
|
|
//
|
|
|
|
// This does not add them to the local
|
|
|
|
func (a *Agent) loadPersistedProxies() (map[string]persistedProxy, error) {
|
|
|
|
persistedProxies := make(map[string]persistedProxy)
|
2018-04-16 15:00:20 +00:00
|
|
|
|
2018-05-14 20:55:24 +00:00
|
|
|
proxyDir := filepath.Join(a.config.DataDir, proxyDir)
|
|
|
|
files, err := ioutil.ReadDir(proxyDir)
|
|
|
|
if err != nil {
|
2018-07-17 20:16:43 +00:00
|
|
|
if !os.IsNotExist(err) {
|
|
|
|
return nil, fmt.Errorf("Failed reading proxies dir %q: %s", proxyDir, err)
|
2018-05-14 20:55:24 +00:00
|
|
|
}
|
|
|
|
}
|
2018-07-17 20:16:43 +00:00
|
|
|
|
2018-05-14 20:55:24 +00:00
|
|
|
for _, fi := range files {
|
|
|
|
// Skip all dirs
|
|
|
|
if fi.IsDir() {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Skip all partially written temporary files
|
|
|
|
if strings.HasSuffix(fi.Name(), "tmp") {
|
2018-07-17 20:16:43 +00:00
|
|
|
return nil, fmt.Errorf("Ignoring temporary proxy file %v", fi.Name())
|
2018-05-14 20:55:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Open the file for reading
|
|
|
|
file := filepath.Join(proxyDir, fi.Name())
|
|
|
|
fh, err := os.Open(file)
|
|
|
|
if err != nil {
|
2018-07-17 20:16:43 +00:00
|
|
|
return nil, fmt.Errorf("failed opening proxy file %q: %s", file, err)
|
2018-05-14 20:55:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Read the contents into a buffer
|
|
|
|
buf, err := ioutil.ReadAll(fh)
|
|
|
|
fh.Close()
|
|
|
|
if err != nil {
|
2018-07-17 20:16:43 +00:00
|
|
|
return nil, fmt.Errorf("failed reading proxy file %q: %s", file, err)
|
2018-05-14 20:55:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Try decoding the proxy definition
|
|
|
|
var p persistedProxy
|
|
|
|
if err := json.Unmarshal(buf, &p); err != nil {
|
2018-07-17 20:16:43 +00:00
|
|
|
return nil, fmt.Errorf("Failed decoding proxy file %q: %s", file, err)
|
2018-05-14 20:55:24 +00:00
|
|
|
}
|
2018-07-17 20:16:43 +00:00
|
|
|
svcID := p.Proxy.TargetServiceID
|
|
|
|
|
|
|
|
persistedProxies[svcID] = p
|
|
|
|
}
|
|
|
|
|
|
|
|
return persistedProxies, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// loadProxies will load connect proxy definitions from configuration and
|
|
|
|
// persisted definitions on disk, and load them into the local agent.
|
|
|
|
func (a *Agent) loadProxies(conf *config.RuntimeConfig) error {
|
|
|
|
persistedProxies, persistenceErr := a.loadPersistedProxies()
|
2018-05-14 20:55:24 +00:00
|
|
|
|
2018-07-17 20:16:43 +00:00
|
|
|
for _, svc := range conf.Services {
|
|
|
|
if svc.Connect != nil {
|
|
|
|
proxy, err := svc.ConnectManagedProxy()
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed adding proxy: %s", err)
|
|
|
|
}
|
|
|
|
if proxy == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
restoredToken := ""
|
|
|
|
if persisted, ok := persistedProxies[proxy.TargetServiceID]; ok {
|
|
|
|
restoredToken = persisted.ProxyToken
|
|
|
|
}
|
|
|
|
|
2018-10-11 12:22:11 +00:00
|
|
|
if err := a.addProxyLocked(proxy, true, true, restoredToken, ConfigSourceLocal); err != nil {
|
2018-07-17 20:16:43 +00:00
|
|
|
return fmt.Errorf("failed adding proxy: %s", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, persisted := range persistedProxies {
|
|
|
|
proxyID := persisted.Proxy.ProxyService.ID
|
|
|
|
if persisted.FromFile && a.State.Proxy(proxyID) == nil {
|
|
|
|
// Purge proxies that were configured previously but are no longer in the config
|
|
|
|
a.logger.Printf("[DEBUG] agent: purging stale persisted proxy %q", proxyID)
|
2018-05-14 20:55:24 +00:00
|
|
|
if err := a.purgeProxy(proxyID); err != nil {
|
2018-07-17 20:16:43 +00:00
|
|
|
return fmt.Errorf("failed purging proxy %q: %v", proxyID, err)
|
2018-05-14 20:55:24 +00:00
|
|
|
}
|
2018-07-17 20:16:43 +00:00
|
|
|
} else if !persisted.FromFile {
|
|
|
|
if a.State.Proxy(proxyID) == nil {
|
|
|
|
a.logger.Printf("[DEBUG] agent: restored proxy definition %q", proxyID)
|
2018-10-11 12:22:11 +00:00
|
|
|
if err := a.addProxyLocked(persisted.Proxy, false, false, persisted.ProxyToken, ConfigSourceLocal); err != nil {
|
2018-07-17 20:16:43 +00:00
|
|
|
return fmt.Errorf("failed adding proxy %q: %v", proxyID, err)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
a.logger.Printf("[WARN] agent: proxy definition %q was overwritten by a proxy definition within a config file", proxyID)
|
2018-05-14 20:55:24 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-07-17 20:16:43 +00:00
|
|
|
|
|
|
|
return persistenceErr
|
2018-04-16 15:00:20 +00:00
|
|
|
}
|
|
|
|
|
2019-02-27 19:28:31 +00:00
|
|
|
type persistedTokens struct {
|
|
|
|
Replication string `json:"replication,omitempty"`
|
|
|
|
AgentMaster string `json:"agent_master,omitempty"`
|
|
|
|
Default string `json:"default,omitempty"`
|
|
|
|
Agent string `json:"agent,omitempty"`
|
|
|
|
}
|
|
|
|
|
|
|
|
func (a *Agent) getPersistedTokens() (*persistedTokens, error) {
|
|
|
|
persistedTokens := &persistedTokens{}
|
|
|
|
if !a.config.ACLEnableTokenPersistence {
|
|
|
|
return persistedTokens, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
a.persistedTokensLock.RLock()
|
|
|
|
defer a.persistedTokensLock.RUnlock()
|
|
|
|
|
|
|
|
tokensFullPath := filepath.Join(a.config.DataDir, tokensPath)
|
|
|
|
|
|
|
|
buf, err := ioutil.ReadFile(tokensFullPath)
|
|
|
|
if err != nil {
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
// non-existence is not an error we care about
|
|
|
|
return persistedTokens, nil
|
|
|
|
}
|
|
|
|
return persistedTokens, fmt.Errorf("failed reading tokens file %q: %s", tokensFullPath, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := json.Unmarshal(buf, persistedTokens); err != nil {
|
|
|
|
return persistedTokens, fmt.Errorf("failed to decode tokens file %q: %s", tokensFullPath, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return persistedTokens, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (a *Agent) loadTokens(conf *config.RuntimeConfig) error {
|
|
|
|
persistedTokens, persistenceErr := a.getPersistedTokens()
|
|
|
|
|
|
|
|
if persistenceErr != nil {
|
|
|
|
a.logger.Printf("[WARN] unable to load persisted tokens: %v", persistenceErr)
|
|
|
|
}
|
|
|
|
|
|
|
|
if persistedTokens.Default != "" {
|
|
|
|
a.tokens.UpdateUserToken(persistedTokens.Default, token.TokenSourceAPI)
|
|
|
|
|
|
|
|
if conf.ACLToken != "" {
|
|
|
|
a.logger.Printf("[WARN] \"default\" token present in both the configuration and persisted token store, using the persisted token")
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
a.tokens.UpdateUserToken(conf.ACLToken, token.TokenSourceConfig)
|
|
|
|
}
|
|
|
|
|
|
|
|
if persistedTokens.Agent != "" {
|
|
|
|
a.tokens.UpdateAgentToken(persistedTokens.Agent, token.TokenSourceAPI)
|
|
|
|
|
|
|
|
if conf.ACLAgentToken != "" {
|
|
|
|
a.logger.Printf("[WARN] \"agent\" token present in both the configuration and persisted token store, using the persisted token")
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
a.tokens.UpdateAgentToken(conf.ACLAgentToken, token.TokenSourceConfig)
|
|
|
|
}
|
|
|
|
|
|
|
|
if persistedTokens.AgentMaster != "" {
|
|
|
|
a.tokens.UpdateAgentMasterToken(persistedTokens.AgentMaster, token.TokenSourceAPI)
|
|
|
|
|
|
|
|
if conf.ACLAgentMasterToken != "" {
|
|
|
|
a.logger.Printf("[WARN] \"agent_master\" token present in both the configuration and persisted token store, using the persisted token")
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
a.tokens.UpdateAgentMasterToken(conf.ACLAgentMasterToken, token.TokenSourceConfig)
|
|
|
|
}
|
|
|
|
|
|
|
|
if persistedTokens.Replication != "" {
|
|
|
|
a.tokens.UpdateReplicationToken(persistedTokens.Replication, token.TokenSourceAPI)
|
|
|
|
|
|
|
|
if conf.ACLReplicationToken != "" {
|
|
|
|
a.logger.Printf("[WARN] \"replication\" token present in both the configuration and persisted token store, using the persisted token")
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
a.tokens.UpdateReplicationToken(conf.ACLReplicationToken, token.TokenSourceConfig)
|
|
|
|
}
|
|
|
|
|
|
|
|
return persistenceErr
|
|
|
|
}
|
|
|
|
|
2018-04-16 15:00:20 +00:00
|
|
|
// unloadProxies will deregister all proxies known to the local agent.
|
|
|
|
func (a *Agent) unloadProxies() error {
|
2018-04-18 20:48:58 +00:00
|
|
|
for id := range a.State.Proxies() {
|
2018-07-17 20:16:43 +00:00
|
|
|
if err := a.removeProxyLocked(id, false); err != nil {
|
2018-04-18 20:48:58 +00:00
|
|
|
return fmt.Errorf("Failed deregistering proxy '%s': %s", id, err)
|
|
|
|
}
|
|
|
|
}
|
2018-04-16 15:00:20 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-02-17 20:00:04 +00:00
|
|
|
// snapshotCheckState is used to snapshot the current state of the health
|
|
|
|
// checks. This is done before we reload our checks, so that we can properly
|
|
|
|
// restore into the same state.
|
2016-06-06 20:19:31 +00:00
|
|
|
func (a *Agent) snapshotCheckState() map[types.CheckID]*structs.HealthCheck {
|
2017-08-28 12:17:13 +00:00
|
|
|
return a.State.Checks()
|
2015-02-17 20:00:04 +00:00
|
|
|
}
|
|
|
|
|
2017-01-11 19:41:12 +00:00
|
|
|
// loadMetadata loads node metadata fields from the agent config and
|
2017-01-05 22:10:26 +00:00
|
|
|
// updates them on the local agent.
|
2017-09-25 18:40:42 +00:00
|
|
|
func (a *Agent) loadMetadata(conf *config.RuntimeConfig) error {
|
2017-08-28 12:17:12 +00:00
|
|
|
meta := map[string]string{}
|
|
|
|
for k, v := range conf.NodeMeta {
|
|
|
|
meta[k] = v
|
2017-01-11 19:41:12 +00:00
|
|
|
}
|
2017-08-28 12:17:12 +00:00
|
|
|
meta[structs.MetaSegmentKey] = conf.SegmentName
|
2017-08-28 12:17:13 +00:00
|
|
|
return a.State.LoadMetadata(meta)
|
2017-01-11 19:41:12 +00:00
|
|
|
}
|
|
|
|
|
2017-01-05 22:10:26 +00:00
|
|
|
// unloadMetadata resets the local metadata state
|
2017-01-11 19:41:12 +00:00
|
|
|
func (a *Agent) unloadMetadata() {
|
2017-08-28 12:17:13 +00:00
|
|
|
a.State.UnloadMetadata()
|
2017-01-05 22:10:26 +00:00
|
|
|
}
|
|
|
|
|
2015-01-15 20:20:57 +00:00
|
|
|
// serviceMaintCheckID returns the ID of a given service's maintenance check
|
2016-06-06 20:19:31 +00:00
|
|
|
func serviceMaintCheckID(serviceID string) types.CheckID {
|
2016-11-29 21:15:20 +00:00
|
|
|
return types.CheckID(structs.ServiceMaintPrefix + serviceID)
|
2015-01-15 20:20:57 +00:00
|
|
|
}
|
|
|
|
|
2015-01-15 08:25:36 +00:00
|
|
|
// EnableServiceMaintenance will register a false health check against the given
|
|
|
|
// service ID with critical status. This will exclude the service from queries.
|
2015-09-10 18:43:59 +00:00
|
|
|
func (a *Agent) EnableServiceMaintenance(serviceID, reason, token string) error {
|
2017-08-28 12:17:13 +00:00
|
|
|
service, ok := a.State.Services()[serviceID]
|
2015-01-15 18:51:00 +00:00
|
|
|
if !ok {
|
2015-01-15 08:16:34 +00:00
|
|
|
return fmt.Errorf("No service registered with ID %q", serviceID)
|
|
|
|
}
|
|
|
|
|
2015-01-15 20:20:57 +00:00
|
|
|
// Check if maintenance mode is not already enabled
|
|
|
|
checkID := serviceMaintCheckID(serviceID)
|
2017-08-28 12:17:13 +00:00
|
|
|
if _, ok := a.State.Checks()[checkID]; ok {
|
2015-01-15 18:51:00 +00:00
|
|
|
return nil
|
2015-01-15 08:16:34 +00:00
|
|
|
}
|
|
|
|
|
2015-01-21 20:21:57 +00:00
|
|
|
// Use default notes if no reason provided
|
|
|
|
if reason == "" {
|
2015-01-21 22:45:09 +00:00
|
|
|
reason = defaultServiceMaintReason
|
2015-01-21 20:21:57 +00:00
|
|
|
}
|
|
|
|
|
2015-01-15 08:16:34 +00:00
|
|
|
// Create and register the critical health check
|
|
|
|
check := &structs.HealthCheck{
|
|
|
|
Node: a.config.NodeName,
|
2015-01-15 20:20:57 +00:00
|
|
|
CheckID: checkID,
|
2015-01-15 08:16:34 +00:00
|
|
|
Name: "Service Maintenance Mode",
|
2015-01-21 20:21:57 +00:00
|
|
|
Notes: reason,
|
2015-01-15 08:16:34 +00:00
|
|
|
ServiceID: service.ID,
|
|
|
|
ServiceName: service.Service,
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthCritical,
|
2015-01-15 08:16:34 +00:00
|
|
|
}
|
2018-10-11 12:22:11 +00:00
|
|
|
a.AddCheck(check, nil, true, token, ConfigSourceLocal)
|
2015-01-22 19:14:28 +00:00
|
|
|
a.logger.Printf("[INFO] agent: Service %q entered maintenance mode", serviceID)
|
2015-01-15 08:16:34 +00:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-01-15 08:25:36 +00:00
|
|
|
// DisableServiceMaintenance will deregister the fake maintenance mode check
|
|
|
|
// if the service has been marked as in maintenance.
|
2015-01-15 08:16:34 +00:00
|
|
|
func (a *Agent) DisableServiceMaintenance(serviceID string) error {
|
2017-08-28 12:17:13 +00:00
|
|
|
if _, ok := a.State.Services()[serviceID]; !ok {
|
2015-01-15 08:16:34 +00:00
|
|
|
return fmt.Errorf("No service registered with ID %q", serviceID)
|
|
|
|
}
|
|
|
|
|
2015-01-15 20:20:57 +00:00
|
|
|
// Check if maintenance mode is enabled
|
|
|
|
checkID := serviceMaintCheckID(serviceID)
|
2017-08-28 12:17:13 +00:00
|
|
|
if _, ok := a.State.Checks()[checkID]; !ok {
|
2015-01-15 20:20:57 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-01-15 08:16:34 +00:00
|
|
|
// Deregister the maintenance check
|
2015-01-15 20:20:57 +00:00
|
|
|
a.RemoveCheck(checkID, true)
|
2015-01-22 19:14:28 +00:00
|
|
|
a.logger.Printf("[INFO] agent: Service %q left maintenance mode", serviceID)
|
2015-01-15 20:20:57 +00:00
|
|
|
|
2015-01-15 08:16:34 +00:00
|
|
|
return nil
|
|
|
|
}
|
2015-01-15 19:20:22 +00:00
|
|
|
|
|
|
|
// EnableNodeMaintenance places a node into maintenance mode.
|
2015-09-10 18:43:59 +00:00
|
|
|
func (a *Agent) EnableNodeMaintenance(reason, token string) {
|
2015-01-15 19:20:22 +00:00
|
|
|
// Ensure node maintenance is not already enabled
|
2017-08-28 12:17:13 +00:00
|
|
|
if _, ok := a.State.Checks()[structs.NodeMaint]; ok {
|
2015-01-15 19:20:22 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-01-21 20:21:57 +00:00
|
|
|
// Use a default notes value
|
|
|
|
if reason == "" {
|
2015-01-21 22:45:09 +00:00
|
|
|
reason = defaultNodeMaintReason
|
2015-01-21 20:21:57 +00:00
|
|
|
}
|
|
|
|
|
2015-01-15 19:20:22 +00:00
|
|
|
// Create and register the node maintenance check
|
|
|
|
check := &structs.HealthCheck{
|
|
|
|
Node: a.config.NodeName,
|
2016-11-29 21:15:20 +00:00
|
|
|
CheckID: structs.NodeMaint,
|
2015-01-15 19:20:22 +00:00
|
|
|
Name: "Node Maintenance Mode",
|
2015-01-21 20:21:57 +00:00
|
|
|
Notes: reason,
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthCritical,
|
2015-01-15 19:20:22 +00:00
|
|
|
}
|
2018-10-11 12:22:11 +00:00
|
|
|
a.AddCheck(check, nil, true, token, ConfigSourceLocal)
|
2015-01-22 19:14:28 +00:00
|
|
|
a.logger.Printf("[INFO] agent: Node entered maintenance mode")
|
2015-01-15 19:20:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// DisableNodeMaintenance removes a node from maintenance mode
|
|
|
|
func (a *Agent) DisableNodeMaintenance() {
|
2017-08-28 12:17:13 +00:00
|
|
|
if _, ok := a.State.Checks()[structs.NodeMaint]; !ok {
|
2015-01-15 20:20:57 +00:00
|
|
|
return
|
|
|
|
}
|
2016-11-29 21:15:20 +00:00
|
|
|
a.RemoveCheck(structs.NodeMaint, true)
|
2015-01-22 19:14:28 +00:00
|
|
|
a.logger.Printf("[INFO] agent: Node left maintenance mode")
|
2015-01-15 19:20:22 +00:00
|
|
|
}
|
2015-11-12 17:19:33 +00:00
|
|
|
|
2018-04-08 21:28:29 +00:00
|
|
|
func (a *Agent) loadLimits(conf *config.RuntimeConfig) {
|
2018-06-11 19:51:17 +00:00
|
|
|
a.config.RPCRateLimit = conf.RPCRateLimit
|
|
|
|
a.config.RPCMaxBurst = conf.RPCMaxBurst
|
2018-04-08 21:28:29 +00:00
|
|
|
}
|
|
|
|
|
2017-09-25 18:40:42 +00:00
|
|
|
func (a *Agent) ReloadConfig(newCfg *config.RuntimeConfig) error {
|
2017-06-02 12:56:49 +00:00
|
|
|
// Bulk update the services and checks
|
|
|
|
a.PauseSync()
|
|
|
|
defer a.ResumeSync()
|
|
|
|
|
2019-03-04 14:34:05 +00:00
|
|
|
a.stateLock.Lock()
|
|
|
|
defer a.stateLock.Unlock()
|
|
|
|
|
2019-07-17 19:06:50 +00:00
|
|
|
// Snapshot the current state, and use that to initialize the checks when
|
|
|
|
// they are recreated.
|
2017-06-02 12:56:49 +00:00
|
|
|
snap := a.snapshotCheckState()
|
|
|
|
|
|
|
|
// First unload all checks, services, and metadata. This lets us begin the reload
|
|
|
|
// with a clean slate.
|
2018-04-18 21:03:51 +00:00
|
|
|
if err := a.unloadProxies(); err != nil {
|
|
|
|
return fmt.Errorf("Failed unloading proxies: %s", err)
|
|
|
|
}
|
2017-06-02 12:56:49 +00:00
|
|
|
if err := a.unloadServices(); err != nil {
|
2017-06-24 19:52:41 +00:00
|
|
|
return fmt.Errorf("Failed unloading services: %s", err)
|
2017-06-02 12:56:49 +00:00
|
|
|
}
|
|
|
|
if err := a.unloadChecks(); err != nil {
|
2017-06-24 19:52:41 +00:00
|
|
|
return fmt.Errorf("Failed unloading checks: %s", err)
|
2017-06-02 12:56:49 +00:00
|
|
|
}
|
|
|
|
a.unloadMetadata()
|
|
|
|
|
2019-02-27 19:28:31 +00:00
|
|
|
// Reload tokens - should be done before all the other loading
|
|
|
|
// to ensure the correct tokens are available for attaching to
|
|
|
|
// the checks and service registrations.
|
|
|
|
a.loadTokens(newCfg)
|
|
|
|
|
2019-03-13 09:29:06 +00:00
|
|
|
if err := a.tlsConfigurator.Update(newCfg.ToTLSUtilConfig()); err != nil {
|
|
|
|
return fmt.Errorf("Failed reloading tls configuration: %s", err)
|
|
|
|
}
|
|
|
|
|
2017-06-02 12:56:49 +00:00
|
|
|
// Reload service/check definitions and metadata.
|
|
|
|
if err := a.loadServices(newCfg); err != nil {
|
2017-06-24 19:52:41 +00:00
|
|
|
return fmt.Errorf("Failed reloading services: %s", err)
|
2017-06-02 12:56:49 +00:00
|
|
|
}
|
2018-04-16 15:00:20 +00:00
|
|
|
if err := a.loadProxies(newCfg); err != nil {
|
|
|
|
return fmt.Errorf("Failed reloading proxies: %s", err)
|
|
|
|
}
|
2019-07-17 19:06:50 +00:00
|
|
|
if err := a.loadChecks(newCfg, snap); err != nil {
|
2017-06-24 19:52:41 +00:00
|
|
|
return fmt.Errorf("Failed reloading checks: %s", err)
|
2017-06-02 12:56:49 +00:00
|
|
|
}
|
|
|
|
if err := a.loadMetadata(newCfg); err != nil {
|
2017-06-24 19:52:41 +00:00
|
|
|
return fmt.Errorf("Failed reloading metadata: %s", err)
|
2017-06-02 12:56:49 +00:00
|
|
|
}
|
|
|
|
|
2017-06-24 19:52:41 +00:00
|
|
|
if err := a.reloadWatches(newCfg); err != nil {
|
|
|
|
return fmt.Errorf("Failed reloading watches: %v", err)
|
2017-06-02 12:56:49 +00:00
|
|
|
}
|
|
|
|
|
2018-06-11 19:51:17 +00:00
|
|
|
a.loadLimits(newCfg)
|
|
|
|
|
2019-04-24 18:11:54 +00:00
|
|
|
for _, s := range a.dnsServers {
|
|
|
|
if err := s.ReloadConfig(newCfg); err != nil {
|
|
|
|
return fmt.Errorf("Failed reloading dns config : %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-26 18:25:03 +00:00
|
|
|
// this only gets used by the consulConfig function and since
|
|
|
|
// that is only ever done during init and reload here then
|
|
|
|
// an in place modification is safe as reloads cannot be
|
|
|
|
// concurrent due to both gaing a full lock on the stateLock
|
|
|
|
a.config.ConfigEntryBootstrap = newCfg.ConfigEntryBootstrap
|
|
|
|
|
2018-06-11 19:51:17 +00:00
|
|
|
// create the config for the rpc server/client
|
|
|
|
consulCfg, err := a.consulConfig()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := a.delegate.ReloadConfig(consulCfg); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2018-04-08 21:28:29 +00:00
|
|
|
|
2017-08-08 19:33:30 +00:00
|
|
|
// Update filtered metrics
|
2018-06-14 12:52:48 +00:00
|
|
|
metrics.UpdateFilter(newCfg.Telemetry.AllowedPrefixes,
|
|
|
|
newCfg.Telemetry.BlockedPrefixes)
|
2017-08-08 19:33:30 +00:00
|
|
|
|
2017-08-28 12:17:13 +00:00
|
|
|
a.State.SetDiscardCheckOutput(newCfg.DiscardCheckOutput)
|
2017-10-11 00:04:52 +00:00
|
|
|
|
2017-06-24 19:52:41 +00:00
|
|
|
return nil
|
2017-06-02 12:56:49 +00:00
|
|
|
}
|
2018-04-11 08:52:51 +00:00
|
|
|
|
|
|
|
// registerCache configures the cache and registers all the supported
|
|
|
|
// types onto the cache. This is NOT safe to call multiple times so
|
|
|
|
// care should be taken to call this exactly once after the cache
|
|
|
|
// field has been initialized.
|
|
|
|
func (a *Agent) registerCache() {
|
2018-09-06 10:34:28 +00:00
|
|
|
// Note that you should register the _agent_ as the RPC implementation and not
|
|
|
|
// the a.delegate directly, otherwise tests that rely on overriding RPC
|
|
|
|
// routing via a.registerEndpoint will not work.
|
|
|
|
|
2018-04-11 08:52:51 +00:00
|
|
|
a.cache.RegisterType(cachetype.ConnectCARootName, &cachetype.ConnectCARoot{
|
2018-09-06 10:34:28 +00:00
|
|
|
RPC: a,
|
2018-04-11 08:52:51 +00:00
|
|
|
}, &cache.RegisterOptions{
|
|
|
|
// Maintain a blocking query, retry dropped connections quickly
|
|
|
|
Refresh: true,
|
2018-05-09 18:04:52 +00:00
|
|
|
RefreshTimer: 0 * time.Second,
|
2018-04-11 08:52:51 +00:00
|
|
|
RefreshTimeout: 10 * time.Minute,
|
|
|
|
})
|
2018-04-17 23:26:58 +00:00
|
|
|
|
2018-04-30 21:23:49 +00:00
|
|
|
a.cache.RegisterType(cachetype.ConnectCALeafName, &cachetype.ConnectCALeaf{
|
2019-01-10 12:46:11 +00:00
|
|
|
RPC: a,
|
|
|
|
Cache: a.cache,
|
|
|
|
Datacenter: a.config.Datacenter,
|
|
|
|
TestOverrideCAChangeInitialDelay: a.config.ConnectTestCALeafRootChangeSpread,
|
2018-04-30 21:23:49 +00:00
|
|
|
}, &cache.RegisterOptions{
|
|
|
|
// Maintain a blocking query, retry dropped connections quickly
|
|
|
|
Refresh: true,
|
2018-05-09 18:04:52 +00:00
|
|
|
RefreshTimer: 0 * time.Second,
|
2018-04-30 21:23:49 +00:00
|
|
|
RefreshTimeout: 10 * time.Minute,
|
|
|
|
})
|
|
|
|
|
2018-04-17 23:26:58 +00:00
|
|
|
a.cache.RegisterType(cachetype.IntentionMatchName, &cachetype.IntentionMatch{
|
2018-09-06 10:34:28 +00:00
|
|
|
RPC: a,
|
|
|
|
}, &cache.RegisterOptions{
|
|
|
|
// Maintain a blocking query, retry dropped connections quickly
|
|
|
|
Refresh: true,
|
|
|
|
RefreshTimer: 0 * time.Second,
|
|
|
|
RefreshTimeout: 10 * time.Minute,
|
|
|
|
})
|
|
|
|
|
|
|
|
a.cache.RegisterType(cachetype.CatalogServicesName, &cachetype.CatalogServices{
|
|
|
|
RPC: a,
|
|
|
|
}, &cache.RegisterOptions{
|
|
|
|
// Maintain a blocking query, retry dropped connections quickly
|
|
|
|
Refresh: true,
|
|
|
|
RefreshTimer: 0 * time.Second,
|
|
|
|
RefreshTimeout: 10 * time.Minute,
|
|
|
|
})
|
|
|
|
|
|
|
|
a.cache.RegisterType(cachetype.HealthServicesName, &cachetype.HealthServices{
|
|
|
|
RPC: a,
|
2018-04-17 23:26:58 +00:00
|
|
|
}, &cache.RegisterOptions{
|
|
|
|
// Maintain a blocking query, retry dropped connections quickly
|
|
|
|
Refresh: true,
|
2018-05-09 18:04:52 +00:00
|
|
|
RefreshTimer: 0 * time.Second,
|
2018-04-17 23:26:58 +00:00
|
|
|
RefreshTimeout: 10 * time.Minute,
|
|
|
|
})
|
2018-09-06 10:34:28 +00:00
|
|
|
|
|
|
|
a.cache.RegisterType(cachetype.PreparedQueryName, &cachetype.PreparedQuery{
|
|
|
|
RPC: a,
|
|
|
|
}, &cache.RegisterOptions{
|
|
|
|
// Prepared queries don't support blocking
|
|
|
|
Refresh: false,
|
|
|
|
})
|
2019-02-25 19:06:01 +00:00
|
|
|
|
|
|
|
a.cache.RegisterType(cachetype.NodeServicesName, &cachetype.NodeServices{
|
|
|
|
RPC: a,
|
|
|
|
}, &cache.RegisterOptions{
|
|
|
|
// Maintain a blocking query, retry dropped connections quickly
|
|
|
|
Refresh: true,
|
|
|
|
RefreshTimer: 0 * time.Second,
|
|
|
|
RefreshTimeout: 10 * time.Minute,
|
|
|
|
})
|
2019-04-23 06:39:02 +00:00
|
|
|
|
|
|
|
a.cache.RegisterType(cachetype.ResolvedServiceConfigName, &cachetype.ResolvedServiceConfig{
|
|
|
|
RPC: a,
|
|
|
|
}, &cache.RegisterOptions{
|
|
|
|
// Maintain a blocking query, retry dropped connections quickly
|
|
|
|
Refresh: true,
|
|
|
|
RefreshTimer: 0 * time.Second,
|
|
|
|
RefreshTimeout: 10 * time.Minute,
|
|
|
|
})
|
2019-06-24 18:11:34 +00:00
|
|
|
|
|
|
|
a.cache.RegisterType(cachetype.CatalogListServicesName, &cachetype.CatalogListServices{
|
|
|
|
RPC: a,
|
|
|
|
}, &cache.RegisterOptions{
|
|
|
|
Refresh: true,
|
|
|
|
RefreshTimer: 0 * time.Second,
|
|
|
|
RefreshTimeout: 10 * time.Minute,
|
|
|
|
})
|
|
|
|
|
|
|
|
a.cache.RegisterType(cachetype.CatalogDatacentersName, &cachetype.CatalogDatacenters{
|
|
|
|
RPC: a,
|
|
|
|
}, &cache.RegisterOptions{
|
|
|
|
Refresh: false,
|
|
|
|
})
|
2019-06-20 19:04:39 +00:00
|
|
|
|
|
|
|
a.cache.RegisterType(cachetype.InternalServiceDumpName, &cachetype.InternalServiceDump{
|
|
|
|
RPC: a,
|
|
|
|
}, &cache.RegisterOptions{
|
|
|
|
Refresh: true,
|
|
|
|
RefreshTimer: 0 * time.Second,
|
|
|
|
RefreshTimeout: 10 * time.Minute,
|
|
|
|
})
|
2019-07-02 03:10:51 +00:00
|
|
|
|
|
|
|
a.cache.RegisterType(cachetype.CompiledDiscoveryChainName, &cachetype.CompiledDiscoveryChain{
|
|
|
|
RPC: a,
|
|
|
|
}, &cache.RegisterOptions{
|
|
|
|
// Maintain a blocking query, retry dropped connections quickly
|
|
|
|
Refresh: true,
|
|
|
|
RefreshTimer: 0 * time.Second,
|
|
|
|
RefreshTimeout: 10 * time.Minute,
|
|
|
|
})
|
2019-07-02 00:45:42 +00:00
|
|
|
|
|
|
|
a.cache.RegisterType(cachetype.ConfigEntriesName, &cachetype.ConfigEntries{
|
|
|
|
RPC: a,
|
|
|
|
}, &cache.RegisterOptions{
|
|
|
|
// Maintain a blocking query, retry dropped connections quickly
|
|
|
|
Refresh: true,
|
|
|
|
RefreshTimer: 0 * time.Second,
|
|
|
|
RefreshTimeout: 10 * time.Minute,
|
|
|
|
})
|
2018-04-11 08:52:51 +00:00
|
|
|
}
|
2018-05-03 17:44:10 +00:00
|
|
|
|
|
|
|
// defaultProxyCommand returns the default Connect managed proxy command.
|
2018-06-15 20:04:04 +00:00
|
|
|
func defaultProxyCommand(agentCfg *config.RuntimeConfig) ([]string, error) {
|
2019-03-06 17:13:28 +00:00
|
|
|
// Get the path to the current executable. This is cached once by the
|
2018-05-03 17:44:10 +00:00
|
|
|
// library so this is effectively just a variable read.
|
|
|
|
execPath, err := os.Executable()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// "consul connect proxy" default value for managed daemon proxy
|
2018-06-15 20:04:04 +00:00
|
|
|
cmd := []string{execPath, "connect", "proxy"}
|
|
|
|
|
|
|
|
if agentCfg != nil && agentCfg.LogLevel != "INFO" {
|
|
|
|
cmd = append(cmd, "-log-level", agentCfg.LogLevel)
|
|
|
|
}
|
|
|
|
return cmd, nil
|
2018-05-03 17:44:10 +00:00
|
|
|
}
|