2013-12-20 01:14:46 +00:00
package agent
2013-12-20 23:33:13 +00:00
import (
2017-05-19 09:53:41 +00:00
"context"
2017-04-10 18:57:24 +00:00
"crypto/sha512"
2017-05-24 13:22:56 +00:00
"crypto/tls"
2014-11-24 08:36:03 +00:00
"encoding/json"
2013-12-20 23:33:13 +00:00
"fmt"
2013-12-21 00:39:32 +00:00
"io"
2015-06-04 21:33:30 +00:00
"io/ioutil"
2014-01-01 00:45:13 +00:00
"net"
2017-05-19 09:53:41 +00:00
"net/http"
2013-12-21 00:39:32 +00:00
"os"
2014-09-06 00:22:33 +00:00
"path/filepath"
2019-09-26 02:55:52 +00:00
"regexp"
2014-02-24 00:42:39 +00:00
"strconv"
2016-12-02 05:35:38 +00:00
"strings"
2013-12-21 00:39:32 +00:00
"sync"
2015-06-05 23:17:07 +00:00
"time"
2014-06-16 21:36:12 +00:00
2020-01-31 16:19:37 +00:00
"github.com/hashicorp/go-connlimit"
2020-01-28 23:50:41 +00:00
"github.com/hashicorp/go-hclog"
2019-10-04 21:10:02 +00:00
"github.com/hashicorp/go-memdb"
2020-06-10 20:47:35 +00:00
"github.com/mitchellh/cli"
2019-10-04 21:10:02 +00:00
2018-10-03 19:37:53 +00:00
"google.golang.org/grpc"
2020-06-10 20:47:35 +00:00
"google.golang.org/grpc/grpclog"
2018-10-03 19:37:53 +00:00
2019-09-26 02:55:52 +00:00
"github.com/armon/go-metrics"
2017-08-23 14:52:48 +00:00
"github.com/hashicorp/consul/acl"
2017-08-28 12:17:09 +00:00
"github.com/hashicorp/consul/agent/ae"
2020-06-10 20:47:35 +00:00
autoconf "github.com/hashicorp/consul/agent/auto-config"
2018-04-11 08:52:51 +00:00
"github.com/hashicorp/consul/agent/cache"
2019-02-25 19:06:01 +00:00
cachetype "github.com/hashicorp/consul/agent/cache-types"
2017-10-25 09:18:07 +00:00
"github.com/hashicorp/consul/agent/checks"
2017-09-25 18:40:42 +00:00
"github.com/hashicorp/consul/agent/config"
pkg refactor
command/agent/* -> agent/*
command/consul/* -> agent/consul/*
command/agent/command{,_test}.go -> command/agent{,_test}.go
command/base/command.go -> command/base.go
command/base/* -> command/*
commands.go -> command/commands.go
The script which did the refactor is:
(
cd $GOPATH/src/github.com/hashicorp/consul
git mv command/agent/command.go command/agent.go
git mv command/agent/command_test.go command/agent_test.go
git mv command/agent/flag_slice_value{,_test}.go command/
git mv command/agent .
git mv command/base/command.go command/base.go
git mv command/base/config_util{,_test}.go command/
git mv commands.go command/
git mv consul agent
rmdir command/base/
gsed -i -e 's|package agent|package command|' command/agent{,_test}.go
gsed -i -e 's|package agent|package command|' command/flag_slice_value{,_test}.go
gsed -i -e 's|package base|package command|' command/base.go command/config_util{,_test}.go
gsed -i -e 's|package main|package command|' command/commands.go
gsed -i -e 's|base.Command|BaseCommand|' command/commands.go
gsed -i -e 's|agent.Command|AgentCommand|' command/commands.go
gsed -i -e 's|\tCommand:|\tBaseCommand:|' command/commands.go
gsed -i -e 's|base\.||' command/commands.go
gsed -i -e 's|command\.||' command/commands.go
gsed -i -e 's|command|c|' main.go
gsed -i -e 's|range Commands|range command.Commands|' main.go
gsed -i -e 's|Commands: Commands|Commands: command.Commands|' main.go
gsed -i -e 's|base\.BoolValue|BoolValue|' command/operator_autopilot_set.go
gsed -i -e 's|base\.DurationValue|DurationValue|' command/operator_autopilot_set.go
gsed -i -e 's|base\.StringValue|StringValue|' command/operator_autopilot_set.go
gsed -i -e 's|base\.UintValue|UintValue|' command/operator_autopilot_set.go
gsed -i -e 's|\bCommand\b|BaseCommand|' command/base.go
gsed -i -e 's|BaseCommand Options|Command Options|' command/base.go
gsed -i -e 's|base.Command|BaseCommand|' command/*.go
gsed -i -e 's|c\.Command|c.BaseCommand|g' command/*.go
gsed -i -e 's|\tCommand:|\tBaseCommand:|' command/*_test.go
gsed -i -e 's|base\.||' command/*_test.go
gsed -i -e 's|\bCommand\b|AgentCommand|' command/agent{,_test}.go
gsed -i -e 's|cmd.AgentCommand|cmd.BaseCommand|' command/agent.go
gsed -i -e 's|cli.AgentCommand = new(Command)|cli.Command = new(AgentCommand)|' command/agent_test.go
gsed -i -e 's|exec.AgentCommand|exec.Command|' command/agent_test.go
gsed -i -e 's|exec.BaseCommand|exec.Command|' command/agent_test.go
gsed -i -e 's|NewTestAgent|agent.NewTestAgent|' command/agent_test.go
gsed -i -e 's|= TestConfig|= agent.TestConfig|' command/agent_test.go
gsed -i -e 's|: RetryJoin|: agent.RetryJoin|' command/agent_test.go
gsed -i -e 's|\.\./\.\./|../|' command/config_util_test.go
gsed -i -e 's|\bverifyUniqueListeners|VerifyUniqueListeners|' agent/config{,_test}.go command/agent.go
gsed -i -e 's|\bserfLANKeyring\b|SerfLANKeyring|g' agent/{agent,keyring,testagent}.go command/agent.go
gsed -i -e 's|\bserfWANKeyring\b|SerfWANKeyring|g' agent/{agent,keyring,testagent}.go command/agent.go
gsed -i -e 's|\bNewAgent\b|agent.New|g' command/agent{,_test}.go
gsed -i -e 's|\bNewAgent|New|' agent/{acl_test,agent,testagent}.go
gsed -i -e 's|\bAgent\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bBool\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bConfig\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bDefaultConfig\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bDevConfig\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bMergeConfig\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bReadConfigPaths\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bParseMetaPair\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bSerfLANKeyring\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bSerfWANKeyring\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|circonus\.agent|circonus|g' command/agent{,_test}.go
gsed -i -e 's|logger\.agent|logger|g' command/agent{,_test}.go
gsed -i -e 's|metrics\.agent|metrics|g' command/agent{,_test}.go
gsed -i -e 's|// agent.Agent|// agent|' command/agent{,_test}.go
gsed -i -e 's|a\.agent\.Config|a.Config|' command/agent{,_test}.go
gsed -i -e 's|agent\.AppendSliceValue|AppendSliceValue|' command/{configtest,validate}.go
gsed -i -e 's|consul/consul|agent/consul|' GNUmakefile
gsed -i -e 's|\.\./test|../../test|' agent/consul/server_test.go
# fix imports
f=$(grep -rl 'github.com/hashicorp/consul/command/agent' * | grep '\.go')
gsed -i -e 's|github.com/hashicorp/consul/command/agent|github.com/hashicorp/consul/agent|' $f
goimports -w $f
f=$(grep -rl 'github.com/hashicorp/consul/consul' * | grep '\.go')
gsed -i -e 's|github.com/hashicorp/consul/consul|github.com/hashicorp/consul/agent/consul|' $f
goimports -w $f
goimports -w command/*.go main.go
)
2017-06-09 22:28:28 +00:00
"github.com/hashicorp/consul/agent/consul"
2017-08-28 12:17:12 +00:00
"github.com/hashicorp/consul/agent/local"
2020-06-10 20:15:32 +00:00
"github.com/hashicorp/consul/agent/pool"
2018-10-03 19:37:53 +00:00
"github.com/hashicorp/consul/agent/proxycfg"
2017-07-06 10:34:00 +00:00
"github.com/hashicorp/consul/agent/structs"
2017-06-21 04:43:55 +00:00
"github.com/hashicorp/consul/agent/systemd"
2017-07-26 18:03:43 +00:00
"github.com/hashicorp/consul/agent/token"
2018-10-03 19:37:53 +00:00
"github.com/hashicorp/consul/agent/xds"
2017-04-19 23:00:11 +00:00
"github.com/hashicorp/consul/api"
2019-04-26 16:33:01 +00:00
"github.com/hashicorp/consul/api/watch"
2017-05-15 20:10:36 +00:00
"github.com/hashicorp/consul/ipaddr"
2016-01-29 19:42:34 +00:00
"github.com/hashicorp/consul/lib"
2018-05-03 20:56:42 +00:00
"github.com/hashicorp/consul/lib/file"
2020-01-28 23:50:41 +00:00
"github.com/hashicorp/consul/logging"
2019-02-26 15:52:07 +00:00
"github.com/hashicorp/consul/tlsutil"
2016-06-06 20:19:31 +00:00
"github.com/hashicorp/consul/types"
2019-09-26 02:55:52 +00:00
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/go-uuid"
2017-09-07 19:17:20 +00:00
"github.com/hashicorp/memberlist"
2017-02-24 04:32:13 +00:00
"github.com/hashicorp/raft"
2014-06-16 21:36:12 +00:00
"github.com/hashicorp/serf/serf"
2017-02-01 18:27:04 +00:00
"github.com/shirou/gopsutil/host"
2017-11-07 23:06:59 +00:00
"golang.org/x/net/http2"
2013-12-20 23:33:13 +00:00
)
2014-11-24 08:36:03 +00:00
const (
// Path to save agent service definitions
2019-09-24 15:04:48 +00:00
servicesDir = "services"
serviceConfigDir = "services/configs"
2014-11-24 08:36:03 +00:00
2018-05-14 20:55:24 +00:00
// Path to save agent proxy definitions
proxyDir = "proxies"
2014-11-24 08:36:03 +00:00
// Path to save local agent checks
2015-06-05 23:17:07 +00:00
checksDir = "checks"
checkStateDir = "checks/state"
2015-01-16 20:39:15 +00:00
2019-02-27 19:28:31 +00:00
// Name of the file tokens will be persisted within
tokensPath = "acl-tokens.json"
2015-01-21 22:45:09 +00:00
// Default reasons for node/service maintenance mode
defaultNodeMaintReason = "Maintenance mode is enabled for this node, " +
"but no reason was provided. This is a default message."
defaultServiceMaintReason = "Maintenance mode is enabled for this " +
"service, but no reason was provided. This is a default message."
2019-06-27 20:22:07 +00:00
// ID of the roots watch
rootsWatchID = "roots"
// ID of the leaf watch
leafWatchID = "leaf"
2019-09-26 02:55:52 +00:00
// maxQueryTime is used to bound the limit of a blocking query
maxQueryTime = 600 * time . Second
// defaultQueryTime is the amount of time we block waiting for a change
// if no time is specified. Previously we would wait the maxQueryTime.
defaultQueryTime = 300 * time . Second
)
var (
httpAddrRE = regexp . MustCompile ( ` ^(http[s]?://)(\[.*?\]|\[?[\w\-\.]+)(:\d+)?([^?]*)(\?.*)?$ ` )
grpcAddrRE = regexp . MustCompile ( "(.*)((?::)(?:[0-9]+))(.*)$" )
2014-11-24 08:36:03 +00:00
)
2018-10-11 12:22:11 +00:00
type configSource int
const (
ConfigSourceLocal configSource = iota
ConfigSourceRemote
)
2019-09-24 15:04:48 +00:00
var configSourceToName = map [ configSource ] string {
ConfigSourceLocal : "local" ,
ConfigSourceRemote : "remote" ,
}
var configSourceFromName = map [ string ] configSource {
"local" : ConfigSourceLocal ,
"remote" : ConfigSourceRemote ,
// If the value is not found in the persisted config file, then use the
// former default.
"" : ConfigSourceLocal ,
}
func ( s configSource ) String ( ) string {
return configSourceToName [ s ]
}
// ConfigSourceFromName will unmarshal the string form of a configSource.
func ConfigSourceFromName ( name string ) ( configSource , bool ) {
s , ok := configSourceFromName [ name ]
return s , ok
}
2017-06-15 09:42:07 +00:00
// delegate defines the interface shared by both
2017-05-15 14:05:17 +00:00
// consul.Client and consul.Server.
2017-06-15 09:42:07 +00:00
type delegate interface {
2017-08-14 14:36:07 +00:00
GetLANCoordinate ( ) ( lib . CoordinateSet , error )
2017-05-15 14:05:17 +00:00
Leave ( ) error
LANMembers ( ) [ ] serf . Member
2017-09-05 19:22:20 +00:00
LANMembersAllSegments ( ) ( [ ] serf . Member , error )
2017-08-30 23:44:04 +00:00
LANSegmentMembers ( segment string ) ( [ ] serf . Member , error )
2017-05-15 14:05:17 +00:00
LocalMember ( ) serf . Member
JoinLAN ( addrs [ ] string ) ( n int , err error )
2019-10-04 21:10:02 +00:00
RemoveFailedNode ( node string , prune bool ) error
2018-10-19 16:04:07 +00:00
ResolveToken ( secretID string ) ( acl . Authorizer , error )
2020-05-13 17:00:08 +00:00
ResolveTokenToIdentity ( secretID string ) ( structs . ACLIdentity , error )
2019-12-18 18:46:53 +00:00
ResolveTokenAndDefaultMeta ( secretID string , entMeta * structs . EnterpriseMeta , authzContext * acl . AuthorizerContext ) ( acl . Authorizer , error )
2017-05-15 14:05:17 +00:00
RPC ( method string , args interface { } , reply interface { } ) error
2018-10-19 16:04:07 +00:00
ACLsEnabled ( ) bool
UseLegacyACLs ( ) bool
2017-06-15 09:50:28 +00:00
SnapshotRPC ( args * structs . SnapshotRequest , in io . Reader , out io . Writer , replyFn structs . SnapshotReplyFn ) error
2017-05-15 14:05:17 +00:00
Shutdown ( ) error
Stats ( ) map [ string ] map [ string ] string
2018-06-11 19:51:17 +00:00
ReloadConfig ( config * consul . Config ) error
2018-05-24 14:36:42 +00:00
enterpriseDelegate
2017-05-15 14:05:17 +00:00
}
2015-02-09 17:22:51 +00:00
2017-06-21 04:43:55 +00:00
// notifier is called after a successful JoinLAN.
type notifier interface {
Notify ( string ) error
}
2020-01-27 19:54:32 +00:00
// Agent is the long running process that is run on every machine.
2017-05-15 14:05:17 +00:00
// It exposes an RPC interface that is used by the CLI to control the
// agent. The agent runs the query interfaces like HTTP, DNS, and RPC.
// However, it can run in either a client, or server mode. In server
// mode, it runs a full Consul server. In client-only mode, it only forwards
// requests to other Consul servers.
2013-12-20 01:14:46 +00:00
type Agent struct {
2020-06-10 20:47:35 +00:00
autoConf * autoconf . AutoConfig
2017-05-23 17:04:06 +00:00
// config is the agent configuration.
2017-09-25 18:40:42 +00:00
config * config . RuntimeConfig
2013-12-20 23:33:13 +00:00
2013-12-21 00:39:32 +00:00
// Used for writing our logs
2020-01-28 23:50:41 +00:00
logger hclog . InterceptLogger
2013-12-21 00:39:32 +00:00
2020-03-30 18:32:13 +00:00
// LogOutput is a Writer which is used when creating dependencies that
// require logging. Note that this LogOutput is not used by the agent logger,
// so setting this field does not result in the agent logs being written to
// LogOutput.
// FIXME: refactor so that: dependencies accept an hclog.Logger,
// or LogOutput is part of RuntimeConfig, or change Agent.logger to be
// a new type with an Out() io.Writer method which returns this value.
2017-05-19 15:51:39 +00:00
LogOutput io . Writer
2013-12-21 00:39:32 +00:00
2017-08-08 08:31:38 +00:00
// In-memory sink used for collecting metrics
MemSink * metrics . InmemSink
2017-05-15 14:05:17 +00:00
// delegate is either a *consul.Server or *consul.Client
// depending on the configuration
2017-06-15 09:42:07 +00:00
delegate delegate
2013-12-21 00:39:32 +00:00
2018-10-19 16:04:07 +00:00
// aclMasterAuthorizer is an object that helps manage local ACL enforcement.
aclMasterAuthorizer acl . Authorizer
2016-12-14 07:21:14 +00:00
2014-01-16 01:14:50 +00:00
// state stores a local representation of the node,
// services and checks. Used for anti-entropy.
2017-08-28 12:17:13 +00:00
State * local . State
2014-01-21 20:05:56 +00:00
2017-08-28 12:17:09 +00:00
// sync manages the synchronization of the local
// and the remote state.
sync * ae . StateSyncer
2018-09-27 14:00:51 +00:00
// syncMu and syncCh are used to coordinate agent endpoints that are blocking
// on local state during a config reload.
syncMu sync . Mutex
syncCh chan struct { }
2018-04-11 08:52:51 +00:00
// cache is the in-memory cache for data the Agent requests.
cache * cache . Cache
2016-08-16 07:05:55 +00:00
// checkReapAfter maps the check ID to a timeout after which we should
// reap its associated service
2019-12-10 02:26:41 +00:00
checkReapAfter map [ structs . CheckID ] time . Duration
2016-08-16 07:05:55 +00:00
2014-01-21 20:05:56 +00:00
// checkMonitors maps the check ID to an associated monitor
2019-12-10 02:26:41 +00:00
checkMonitors map [ structs . CheckID ] * checks . CheckMonitor
2015-01-09 22:43:24 +00:00
// checkHTTPs maps the check ID to an associated HTTP check
2019-12-10 02:26:41 +00:00
checkHTTPs map [ structs . CheckID ] * checks . CheckHTTP
2015-01-09 22:43:24 +00:00
2015-07-23 11:45:08 +00:00
// checkTCPs maps the check ID to an associated TCP check
2019-12-10 02:26:41 +00:00
checkTCPs map [ structs . CheckID ] * checks . CheckTCP
2015-07-23 11:45:08 +00:00
2017-12-27 04:35:22 +00:00
// checkGRPCs maps the check ID to an associated GRPC check
2019-12-10 02:26:41 +00:00
checkGRPCs map [ structs . CheckID ] * checks . CheckGRPC
2017-12-27 04:35:22 +00:00
2015-01-09 22:43:24 +00:00
// checkTTLs maps the check ID to an associated check TTL
2019-12-10 02:26:41 +00:00
checkTTLs map [ structs . CheckID ] * checks . CheckTTL
2015-01-09 22:43:24 +00:00
2015-10-22 22:29:13 +00:00
// checkDockers maps the check ID to an associated Docker Exec based check
2019-12-10 02:26:41 +00:00
checkDockers map [ structs . CheckID ] * checks . CheckDocker
2015-10-22 22:29:13 +00:00
2018-06-30 13:38:56 +00:00
// checkAliases maps the check ID to an associated Alias checks
2019-12-10 02:26:41 +00:00
checkAliases map [ structs . CheckID ] * checks . CheckAlias
2018-06-30 13:38:56 +00:00
2019-09-26 02:55:52 +00:00
// exposedPorts tracks listener ports for checks exposed through a proxy
exposedPorts map [ string ] int
2019-03-04 14:34:05 +00:00
// stateLock protects the agent state
stateLock sync . Mutex
2014-01-21 20:05:56 +00:00
2017-07-12 14:01:42 +00:00
// dockerClient is the client for performing docker health checks.
2017-10-25 09:18:07 +00:00
dockerClient * checks . DockerClient
2017-07-12 14:01:42 +00:00
2014-08-27 23:49:12 +00:00
// eventCh is used to receive user events
eventCh chan serf . UserEvent
2014-08-28 00:01:10 +00:00
// eventBuf stores the most recent events in a ring buffer
// using eventIndex as the next index to insert into. This
// is guarded by eventLock. When an insert happens, the
// eventNotify group is notified.
2014-08-28 17:56:30 +00:00
eventBuf [ ] * UserEvent
2014-08-28 00:01:10 +00:00
eventIndex int
eventLock sync . RWMutex
2017-06-15 16:45:30 +00:00
eventNotify NotifyGroup
2014-08-28 00:01:10 +00:00
2014-01-21 20:05:56 +00:00
shutdown bool
shutdownCh chan struct { }
shutdownLock sync . Mutex
2015-11-12 17:19:33 +00:00
2017-06-21 04:43:55 +00:00
// joinLANNotifier is called after a successful JoinLAN.
joinLANNotifier notifier
2017-06-02 09:55:29 +00:00
// retryJoinCh transports errors from the retry join
// attempts.
retryJoinCh chan error
2017-06-16 07:54:09 +00:00
// endpoints maps unique RPC endpoint names to common ones
// to allow overriding of RPC handlers since the golang
// net/rpc server does not allow this.
2017-05-22 22:00:14 +00:00
endpoints map [ string ] string
endpointsLock sync . RWMutex
2017-05-19 09:53:41 +00:00
// dnsServer provides the DNS API
dnsServers [ ] * DNSServer
// httpServers provides the HTTP API on various endpoints
httpServers [ ] * HTTPServer
// wgServers is the wait group for all HTTP and DNS servers
wgServers sync . WaitGroup
2017-06-24 19:52:41 +00:00
// watchPlans tracks all the currently-running watch plans for the
// agent.
watchPlans [ ] * watch . Plan
2017-07-26 18:03:43 +00:00
// tokens holds ACL tokens initially from the configuration, but can
// be updated at runtime, so should always be used instead of going to
// the configuration directly.
tokens * token . Store
2018-05-02 18:38:18 +00:00
2018-10-03 19:37:53 +00:00
// proxyConfig is the manager for proxy service (Kind = connect-proxy)
// configuration state. This ensures all state needed by a proxy registration
// is maintained in cache and handles pushing updates to that state into XDS
2019-08-09 19:19:30 +00:00
// server to be pushed out to Envoy.
2018-10-03 19:37:53 +00:00
proxyConfig * proxycfg . Manager
2019-04-24 13:46:30 +00:00
// serviceManager is the manager for combining local service registrations with
// the centrally configured proxy/service defaults.
2019-04-18 04:35:19 +00:00
serviceManager * ServiceManager
2018-10-03 19:37:53 +00:00
// grpcServer is the server instance used currently to serve xDS API for
// Envoy.
grpcServer * grpc . Server
2019-02-26 15:52:07 +00:00
2019-02-27 09:14:59 +00:00
// tlsConfigurator is the central instance to provide a *tls.Config
// based on the current consul configuration.
2019-02-26 15:52:07 +00:00
tlsConfigurator * tlsutil . Configurator
2019-02-27 19:28:31 +00:00
// persistedTokensLock is used to synchronize access to the persisted token
// store within the data directory. This will prevent loading while writing as
// well as multiple concurrent writes.
persistedTokensLock sync . RWMutex
2020-01-31 16:19:37 +00:00
// httpConnLimiter is used to limit connections to the HTTP server by client
// IP.
httpConnLimiter connlimit . Limiter
2020-04-17 20:27:39 +00:00
2020-06-10 20:15:32 +00:00
// Connection Pool
connPool * pool . ConnPool
2020-04-17 20:27:39 +00:00
// enterpriseAgent embeds fields that we only access in consul-enterprise builds
enterpriseAgent
2013-12-20 01:14:46 +00:00
}
2020-06-10 20:47:35 +00:00
type agentOptions struct {
2020-06-24 14:15:25 +00:00
logger hclog . InterceptLogger
builderOpts config . BuilderOpts
ui cli . Ui
config * config . RuntimeConfig
overrides [ ] config . Source
writers [ ] io . Writer
initTelemetry bool
2020-06-10 20:47:35 +00:00
}
type AgentOption func ( opt * agentOptions )
2020-06-24 14:15:25 +00:00
// WithTelemetry is used to control whether the agent will
// set up metrics.
func WithTelemetry ( initTelemetry bool ) AgentOption {
return func ( opt * agentOptions ) {
opt . initTelemetry = initTelemetry
}
}
2020-06-10 20:47:35 +00:00
// WithLogger is used to override any automatic logger creation
// and provide one already built instead. This is mostly useful
// for testing.
func WithLogger ( logger hclog . InterceptLogger ) AgentOption {
return func ( opt * agentOptions ) {
opt . logger = logger
2013-12-24 00:20:51 +00:00
}
2020-06-10 20:47:35 +00:00
}
// WithBuilderOpts specifies the command line config.BuilderOpts to use that the agent
// is being started with
func WithBuilderOpts ( builderOpts config . BuilderOpts ) AgentOption {
return func ( opt * agentOptions ) {
opt . builderOpts = builderOpts
2013-12-24 00:20:51 +00:00
}
2020-06-10 20:47:35 +00:00
}
2013-12-24 00:20:51 +00:00
2020-06-10 20:47:35 +00:00
// WithCLI provides a cli.Ui instance to use when emitting configuration
// warnings during the first configuration parsing.
func WithCLI ( ui cli . Ui ) AgentOption {
return func ( opt * agentOptions ) {
opt . ui = ui
}
}
// WithLogWriter will add an additional log output to the logger that gets
// configured after configuration parsing
func WithLogWriter ( writer io . Writer ) AgentOption {
return func ( opt * agentOptions ) {
opt . writers = append ( opt . writers , writer )
2020-06-10 20:15:32 +00:00
}
2020-06-10 20:47:35 +00:00
}
// WithOverrides is used to provide a config source to append to the tail sources
// during config building. It is really only useful for testing to tune non-user
// configurable tunables to make various tests converge more quickly than they
// could otherwise.
func WithOverrides ( overrides ... config . Source ) AgentOption {
return func ( opt * agentOptions ) {
opt . overrides = overrides
}
}
// WithConfig provides an already parsed configuration to the Agent
// Deprecated: Should allow the agent to parse the configuration.
func WithConfig ( config * config . RuntimeConfig ) AgentOption {
return func ( opt * agentOptions ) {
opt . config = config
}
}
2020-06-10 20:15:32 +00:00
2020-06-10 20:47:35 +00:00
func flattenAgentOptions ( options [ ] AgentOption ) agentOptions {
var flat agentOptions
for _ , opt := range options {
opt ( & flat )
}
return flat
}
// New process the desired options and creates a new Agent.
// This process will
// * parse the config given the config Flags
// * setup logging
// * using predefined logger given in an option
// OR
// * initialize a new logger from the configuration
// including setting up gRPC logging
// * initialize telemetry
// * create a TLS Configurator
// * build a shared connection pool
// * create the ServiceManager
// * setup the NodeID if one isn't provided in the configuration
// * create the AutoConfig object for future use in fully
// resolving the configuration
func New ( options ... AgentOption ) ( * Agent , error ) {
flat := flattenAgentOptions ( options )
// Create most of the agent
2019-06-27 20:22:07 +00:00
a := Agent {
2020-06-19 19:16:00 +00:00
checkReapAfter : make ( map [ structs . CheckID ] time . Duration ) ,
checkMonitors : make ( map [ structs . CheckID ] * checks . CheckMonitor ) ,
checkTTLs : make ( map [ structs . CheckID ] * checks . CheckTTL ) ,
checkHTTPs : make ( map [ structs . CheckID ] * checks . CheckHTTP ) ,
checkTCPs : make ( map [ structs . CheckID ] * checks . CheckTCP ) ,
checkGRPCs : make ( map [ structs . CheckID ] * checks . CheckGRPC ) ,
checkDockers : make ( map [ structs . CheckID ] * checks . CheckDocker ) ,
checkAliases : make ( map [ structs . CheckID ] * checks . CheckAlias ) ,
eventCh : make ( chan serf . UserEvent , 1024 ) ,
eventBuf : make ( [ ] * UserEvent , 256 ) ,
joinLANNotifier : & systemd . Notifier { } ,
retryJoinCh : make ( chan error ) ,
shutdownCh : make ( chan struct { } ) ,
endpoints : make ( map [ string ] string ) ,
tokens : new ( token . Store ) ,
logger : flat . logger ,
2020-06-10 20:47:35 +00:00
}
// parse the configuration and handle the error/warnings
config , warnings , err := autoconf . LoadConfig ( flat . builderOpts , config . Source { } , flat . overrides ... )
if err != nil {
return nil , err
}
for _ , w := range warnings {
if a . logger != nil {
a . logger . Warn ( w )
} else if flat . ui != nil {
flat . ui . Warn ( w )
} else {
fmt . Fprint ( os . Stderr , w )
}
}
// set the config in the agent, this is just the preliminary configuration as we haven't
// loaded any auto-config sources yet.
a . config = config
if flat . logger == nil {
logConf := & logging . Config {
LogLevel : config . LogLevel ,
LogJSON : config . LogJSON ,
Name : logging . Agent ,
EnableSyslog : config . EnableSyslog ,
SyslogFacility : config . SyslogFacility ,
LogFilePath : config . LogFile ,
LogRotateDuration : config . LogRotateDuration ,
LogRotateBytes : config . LogRotateBytes ,
LogRotateMaxFiles : config . LogRotateMaxFiles ,
}
logger , logOutput , err := logging . Setup ( logConf , flat . writers )
if err != nil {
return nil , err
}
a . logger = logger
a . LogOutput = logOutput
grpclog . SetLoggerV2 ( logging . NewGRPCLogger ( logConf , a . logger ) )
}
2020-06-24 14:15:25 +00:00
if flat . initTelemetry {
memSink , err := lib . InitTelemetry ( config . Telemetry )
if err != nil {
return nil , fmt . Errorf ( "Failed to initialize telemetry: %w" , err )
}
a . MemSink = memSink
2020-06-10 20:47:35 +00:00
}
// TODO (autoconf) figure out how to let this setting be pushed down via autoconf
// right now it gets defaulted if unset so this check actually doesn't do much
// for a normal running agent.
if a . config . Datacenter == "" {
return nil , fmt . Errorf ( "Must configure a Datacenter" )
}
if a . config . DataDir == "" && ! a . config . DevMode {
return nil , fmt . Errorf ( "Must configure a DataDir" )
}
tlsConfigurator , err := tlsutil . NewConfigurator ( a . config . ToTLSUtilConfig ( ) , a . logger )
if err != nil {
return nil , err
2019-06-27 20:22:07 +00:00
}
2020-06-10 20:47:35 +00:00
a . tlsConfigurator = tlsConfigurator
2020-06-10 20:15:32 +00:00
err = a . initializeConnectionPool ( )
if err != nil {
return nil , fmt . Errorf ( "Failed to initialize the connection pool: %w" , err )
}
2019-06-27 20:22:07 +00:00
a . serviceManager = NewServiceManager ( & a )
2017-06-30 21:56:05 +00:00
2018-10-19 16:04:07 +00:00
if err := a . initializeACLs ( ) ; err != nil {
return nil , err
}
2019-06-27 20:22:07 +00:00
// Retrieve or generate the node ID before setting up the rest of the
// agent, which depends on it.
2020-06-10 20:47:35 +00:00
if err := a . setupNodeID ( a . config ) ; err != nil {
2019-06-27 20:22:07 +00:00
return nil , fmt . Errorf ( "Failed to setup node ID: %v" , err )
}
2020-06-10 20:47:35 +00:00
acOpts := [ ] autoconf . Option {
autoconf . WithDirectRPC ( a . connPool ) ,
autoconf . WithTLSConfigurator ( a . tlsConfigurator ) ,
autoconf . WithBuilderOpts ( flat . builderOpts ) ,
autoconf . WithLogger ( a . logger ) ,
autoconf . WithOverrides ( flat . overrides ... ) ,
}
ac , err := autoconf . New ( acOpts ... )
if err != nil {
return nil , err
}
a . autoConf = ac
2019-06-27 20:22:07 +00:00
return & a , nil
2017-05-19 09:53:41 +00:00
}
2016-12-02 05:35:38 +00:00
2020-06-10 20:47:35 +00:00
// GetLogger retrieves the agents logger
// TODO make export the logger field and get rid of this method
// This is here for now to simplify the work I am doing and make
// reviewing the final PR easier.
func ( a * Agent ) GetLogger ( ) hclog . InterceptLogger {
return a . logger
}
// GetConfig retrieves the agents config
// TODO make export the config field and get rid of this method
// This is here for now to simplify the work I am doing and make
// reviewing the final PR easier.
func ( a * Agent ) GetConfig ( ) * config . RuntimeConfig {
a . stateLock . Lock ( )
defer a . stateLock . Unlock ( )
return a . config
}
2020-06-10 20:15:32 +00:00
func ( a * Agent ) initializeConnectionPool ( ) error {
var rpcSrcAddr * net . TCPAddr
if ! ipaddr . IsAny ( a . config . RPCBindAddr ) {
rpcSrcAddr = & net . TCPAddr { IP : a . config . RPCBindAddr . IP }
}
// Ensure we have a log output for the connection pool.
logOutput := a . LogOutput
if logOutput == nil {
logOutput = os . Stderr
}
pool := & pool . ConnPool {
Server : a . config . ServerMode ,
SrcAddr : rpcSrcAddr ,
LogOutput : logOutput ,
TLSConfigurator : a . tlsConfigurator ,
Datacenter : a . config . Datacenter ,
}
if a . config . ServerMode {
pool . MaxTime = 2 * time . Minute
pool . MaxStreams = 64
} else {
pool . MaxTime = 127 * time . Second
pool . MaxStreams = 32
}
a . connPool = pool
return nil
}
2020-01-27 19:54:32 +00:00
// LocalConfig takes a config.RuntimeConfig and maps the fields to a local.Config
2017-08-28 12:17:13 +00:00
func LocalConfig ( cfg * config . RuntimeConfig ) local . Config {
lc := local . Config {
AdvertiseAddr : cfg . AdvertiseAddrLAN . String ( ) ,
CheckUpdateInterval : cfg . CheckUpdateInterval ,
Datacenter : cfg . Datacenter ,
DiscardCheckOutput : cfg . DiscardCheckOutput ,
NodeID : cfg . NodeID ,
NodeName : cfg . NodeName ,
TaggedAddresses : map [ string ] string { } ,
}
for k , v := range cfg . TaggedAddresses {
lc . TaggedAddresses [ k ] = v
}
return lc
}
2020-01-27 19:54:32 +00:00
// Start verifies its configuration and runs an agent's various subprocesses.
2020-06-19 19:16:00 +00:00
func ( a * Agent ) Start ( ctx context . Context ) error {
2019-03-04 14:34:05 +00:00
a . stateLock . Lock ( )
defer a . stateLock . Unlock ( )
2020-06-10 20:47:35 +00:00
// This needs to be done early on as it will potentially alter the configuration
// and then how other bits are brought up
2020-06-19 19:16:00 +00:00
c , err := a . autoConf . InitialConfiguration ( ctx )
2020-06-10 20:47:35 +00:00
if err != nil {
return err
}
// copy over the existing node id, this cannot be
// changed while running anyways but this prevents
// breaking some existing behavior. then overwrite
// the configuration
c . NodeID = a . config . NodeID
a . config = c
if err := a . tlsConfigurator . Update ( a . config . ToTLSUtilConfig ( ) ) ; err != nil {
return fmt . Errorf ( "Failed to load TLS configurations after applying auto-config settings: %w" , err )
}
2017-05-19 09:53:41 +00:00
2020-04-02 07:59:23 +00:00
if err := a . CheckSecurity ( c ) ; err != nil {
a . logger . Error ( "Security error while parsing configuration: %#v" , err )
return err
}
2018-03-27 19:00:33 +00:00
// Warn if the node name is incompatible with DNS
if InvalidDnsRe . MatchString ( a . config . NodeName ) {
2020-01-28 23:50:41 +00:00
a . logger . Warn ( "Node name will not be discoverable " +
2018-03-27 19:00:33 +00:00
"via DNS due to invalid characters. Valid characters include " +
2020-01-28 23:50:41 +00:00
"all alpha-numerics and dashes." ,
"node_name" , a . config . NodeName ,
)
2018-03-27 20:31:27 +00:00
} else if len ( a . config . NodeName ) > MaxDNSLabelLength {
2020-01-28 23:50:41 +00:00
a . logger . Warn ( "Node name will not be discoverable " +
2018-03-27 19:00:33 +00:00
"via DNS due to it being too long. Valid lengths are between " +
2020-01-28 23:50:41 +00:00
"1 and 63 bytes." ,
"node_name" , a . config . NodeName ,
)
2018-03-27 19:00:33 +00:00
}
2018-03-27 20:31:27 +00:00
2019-02-27 19:28:31 +00:00
// load the tokens - this requires the logger to be setup
// which is why we can't do this in New
a . loadTokens ( a . config )
2020-02-04 20:58:56 +00:00
a . loadEnterpriseTokens ( a . config )
2019-02-27 19:28:31 +00:00
2017-06-29 12:35:55 +00:00
// create the local state
2017-08-30 10:25:49 +00:00
a . State = local . NewState ( LocalConfig ( c ) , a . logger , a . tokens )
2017-08-28 12:17:09 +00:00
// create the state synchronization manager which performs
// regular and on-demand state synchronizations (anti-entropy).
2017-10-19 09:20:24 +00:00
a . sync = ae . NewStateSyncer ( a . State , c . AEInterval , a . shutdownCh , a . logger )
2017-06-29 12:35:55 +00:00
2018-04-22 21:00:32 +00:00
// create the cache
2018-04-11 08:52:51 +00:00
a . cache = cache . New ( nil )
2017-06-29 12:35:55 +00:00
// create the config for the rpc server/client
consulCfg , err := a . consulConfig ( )
if err != nil {
return err
}
2017-08-28 12:17:09 +00:00
// ServerUp is used to inform that a new consul server is now
// up. This can be used to speed up the sync process if we are blocking
// waiting to discover a consul server
2017-08-30 10:25:49 +00:00
consulCfg . ServerUp = a . sync . SyncFull . Trigger
2017-06-29 12:35:55 +00:00
2020-04-16 22:07:52 +00:00
err = a . initEnterprise ( consulCfg )
if err != nil {
return fmt . Errorf ( "failed to start Consul enterprise component: %v" , err )
}
2019-12-06 20:35:58 +00:00
2020-06-10 20:15:32 +00:00
options := [ ] consul . ConsulOption {
consul . WithLogger ( a . logger ) ,
consul . WithTokenStore ( a . tokens ) ,
consul . WithTLSConfigurator ( a . tlsConfigurator ) ,
consul . WithConnectionPool ( a . connPool ) ,
2019-03-13 09:29:06 +00:00
}
2019-02-26 15:52:07 +00:00
2016-08-16 07:05:55 +00:00
// Setup either the client or the server.
2017-09-25 18:40:42 +00:00
if c . ServerMode {
2020-06-10 20:15:32 +00:00
server , err := consul . NewServerWithOptions ( consulCfg , options ... )
2017-05-19 09:53:41 +00:00
if err != nil {
2017-06-29 12:35:55 +00:00
return fmt . Errorf ( "Failed to start Consul server: %v" , err )
2017-05-19 09:53:41 +00:00
}
a . delegate = server
2013-12-20 23:33:13 +00:00
} else {
2020-06-10 20:15:32 +00:00
client , err := consul . NewClientWithOptions ( consulCfg , options ... )
2017-05-19 09:53:41 +00:00
if err != nil {
2017-06-29 12:35:55 +00:00
return fmt . Errorf ( "Failed to start Consul client: %v" , err )
2017-05-19 09:53:41 +00:00
}
a . delegate = client
2013-12-20 23:33:13 +00:00
}
2017-08-30 10:25:49 +00:00
// the staggering of the state syncing depends on the cluster size.
a . sync . ClusterSize = func ( ) int { return len ( a . delegate . LANMembers ( ) ) }
// link the state with the consul server/client and the state syncer
// via callbacks. After several attempts this was easier than using
// channels since the event notification needs to be non-blocking
// and that should be hidden in the state syncer implementation.
a . State . Delegate = a . delegate
a . State . TriggerSyncChanges = a . sync . SyncChanges . Trigger
2018-04-22 21:00:32 +00:00
// Register the cache. We do this much later so the delegate is
// populated from above.
a . registerCache ( )
2019-06-27 20:22:07 +00:00
if a . config . AutoEncryptTLS && ! a . config . ServerMode {
2020-06-19 19:16:00 +00:00
reply , err := a . setupClientAutoEncrypt ( ctx )
2019-06-27 20:22:07 +00:00
if err != nil {
return fmt . Errorf ( "AutoEncrypt failed: %s" , err )
}
rootsReq , leafReq , err := a . setupClientAutoEncryptCache ( reply )
if err != nil {
return fmt . Errorf ( "AutoEncrypt failed: %s" , err )
}
if err = a . setupClientAutoEncryptWatching ( rootsReq , leafReq ) ; err != nil {
return fmt . Errorf ( "AutoEncrypt failed: %s" , err )
}
2020-01-28 23:50:41 +00:00
a . logger . Info ( "automatically upgraded to TLS" )
2019-06-27 20:22:07 +00:00
}
2019-09-24 15:04:48 +00:00
a . serviceManager . Start ( )
2017-01-05 22:10:26 +00:00
// Load checks/services/metadata.
2020-03-09 11:59:41 +00:00
if err := a . loadServices ( c , nil ) ; err != nil {
2017-05-19 09:53:41 +00:00
return err
2014-11-24 08:36:03 +00:00
}
2019-07-17 19:06:50 +00:00
if err := a . loadChecks ( c , nil ) ; err != nil {
2017-05-19 09:53:41 +00:00
return err
2014-11-24 08:36:03 +00:00
}
2017-05-19 09:53:41 +00:00
if err := a . loadMetadata ( c ) ; err != nil {
return err
2017-01-05 22:10:26 +00:00
}
2014-11-24 08:36:03 +00:00
2018-10-03 19:37:53 +00:00
// Start the proxy config manager.
a . proxyConfig , err = proxycfg . NewManager ( proxycfg . ManagerConfig {
Cache : a . cache ,
2020-01-28 23:50:41 +00:00
Logger : a . logger . Named ( logging . ProxyConfig ) ,
2018-10-03 19:37:53 +00:00
State : a . State ,
Source : & structs . QuerySource {
Node : a . config . NodeName ,
Datacenter : a . config . Datacenter ,
Segment : a . config . SegmentName ,
} ,
2020-04-27 23:36:20 +00:00
DNSConfig : proxycfg . DNSConfig {
Domain : a . config . DNSDomain ,
AltDomain : a . config . DNSAltDomain ,
} ,
2020-03-09 20:59:02 +00:00
TLSConfigurator : a . tlsConfigurator ,
2018-10-03 19:37:53 +00:00
} )
if err != nil {
return err
}
2018-10-04 13:08:12 +00:00
go func ( ) {
if err := a . proxyConfig . Run ( ) ; err != nil {
2020-01-28 23:50:41 +00:00
a . logger . Error ( "proxy config manager exited with error" , "error" , err )
2018-10-04 13:08:12 +00:00
}
} ( )
2018-10-03 19:37:53 +00:00
2016-08-16 07:05:55 +00:00
// Start watching for critical services to deregister, based on their
// checks.
2017-05-19 09:53:41 +00:00
go a . reapServices ( )
2016-08-16 07:05:55 +00:00
// Start handling events.
2017-05-19 09:53:41 +00:00
go a . handleEvents ( )
2014-08-27 23:49:12 +00:00
2015-06-06 03:31:33 +00:00
// Start sending network coordinate to the server.
2017-05-19 09:53:41 +00:00
if ! c . DisableCoordinates {
go a . sendCoordinate ( )
2015-06-06 03:31:33 +00:00
}
2016-08-16 07:05:55 +00:00
// Write out the PID file if necessary.
2017-05-19 09:53:41 +00:00
if err := a . storePid ( ) ; err != nil {
return err
2014-05-06 16:57:53 +00:00
}
2014-05-06 03:29:50 +00:00
2017-05-24 13:22:56 +00:00
// start DNS servers
if err := a . listenAndServeDNS ( ) ; err != nil {
return err
}
2020-01-31 16:19:37 +00:00
// Configure the http connection limiter.
a . httpConnLimiter . SetConfig ( connlimit . Config {
MaxConnsPerClientIP : a . config . HTTPMaxConnsPerClient ,
} )
2017-11-07 23:06:59 +00:00
// Create listeners and unstarted servers; see comment on listenHTTP why
// we are doing this.
servers , err := a . listenHTTP ( )
2017-05-24 13:22:56 +00:00
if err != nil {
return err
}
2017-11-07 23:06:59 +00:00
// Start HTTP and HTTPS servers.
for _ , srv := range servers {
if err := a . serveHTTP ( srv ) ; err != nil {
2017-05-24 13:22:56 +00:00
return err
}
a . httpServers = append ( a . httpServers , srv )
}
2017-06-02 09:55:29 +00:00
2018-10-03 19:37:53 +00:00
// Start gRPC server.
if err := a . listenAndServeGRPC ( ) ; err != nil {
return err
}
2017-06-09 08:03:49 +00:00
// register watches
2017-06-24 19:52:41 +00:00
if err := a . reloadWatches ( a . config ) ; err != nil {
2017-06-09 08:03:49 +00:00
return err
}
2017-06-02 09:55:29 +00:00
// start retry join
2017-08-19 08:44:19 +00:00
go a . retryJoinLAN ( )
2020-03-09 20:59:02 +00:00
if a . config . ServerMode {
go a . retryJoinWAN ( )
}
2017-06-02 09:55:29 +00:00
2017-05-24 13:22:56 +00:00
return nil
}
2020-06-19 19:16:00 +00:00
func ( a * Agent ) setupClientAutoEncrypt ( ctx context . Context ) ( * structs . SignedResponse , error ) {
2019-06-27 20:22:07 +00:00
client := a . delegate . ( * consul . Client )
addrs := a . config . StartJoinAddrsLAN
disco , err := newDiscover ( )
if err != nil && len ( addrs ) == 0 {
return nil , err
}
2020-03-09 20:59:02 +00:00
addrs = append ( addrs , retryJoinAddrs ( disco , retryJoinSerfVariant , "LAN" , a . config . RetryJoinLAN , a . logger ) ... )
2019-06-27 20:22:07 +00:00
2020-06-19 19:16:00 +00:00
reply , priv , err := client . RequestAutoEncryptCerts ( ctx , addrs , a . config . ServerPort , a . tokens . AgentToken ( ) )
2019-06-27 20:22:07 +00:00
if err != nil {
return nil , err
}
connectCAPems := [ ] string { }
for _ , ca := range reply . ConnectCARoots . Roots {
connectCAPems = append ( connectCAPems , ca . RootCert )
}
if err := a . tlsConfigurator . UpdateAutoEncrypt ( reply . ManualCARoots , connectCAPems , reply . IssuedCert . CertPEM , priv , reply . VerifyServerHostname ) ; err != nil {
return nil , err
}
return reply , nil
}
func ( a * Agent ) setupClientAutoEncryptCache ( reply * structs . SignedResponse ) ( * structs . DCSpecificRequest , * cachetype . ConnectCALeafRequest , error ) {
rootsReq := & structs . DCSpecificRequest {
Datacenter : a . config . Datacenter ,
QueryOptions : structs . QueryOptions { Token : a . tokens . AgentToken ( ) } ,
}
2019-08-27 21:45:58 +00:00
// prepolutate roots cache
2019-06-27 20:22:07 +00:00
rootRes := cache . FetchResult { Value : & reply . ConnectCARoots , Index : reply . ConnectCARoots . QueryMeta . Index }
if err := a . cache . Prepopulate ( cachetype . ConnectCARootName , rootRes , a . config . Datacenter , a . tokens . AgentToken ( ) , rootsReq . CacheInfo ( ) . Key ) ; err != nil {
return nil , nil , err
}
leafReq := & cachetype . ConnectCALeafRequest {
Datacenter : a . config . Datacenter ,
Token : a . tokens . AgentToken ( ) ,
Agent : a . config . NodeName ,
2020-01-17 22:25:26 +00:00
DNSSAN : a . config . AutoEncryptDNSSAN ,
IPSAN : a . config . AutoEncryptIPSAN ,
2019-06-27 20:22:07 +00:00
}
2019-08-27 21:45:58 +00:00
// prepolutate leaf cache
2019-06-27 20:22:07 +00:00
certRes := cache . FetchResult { Value : & reply . IssuedCert , Index : reply . ConnectCARoots . QueryMeta . Index }
if err := a . cache . Prepopulate ( cachetype . ConnectCALeafName , certRes , a . config . Datacenter , a . tokens . AgentToken ( ) , leafReq . Key ( ) ) ; err != nil {
return nil , nil , err
}
return rootsReq , leafReq , nil
}
func ( a * Agent ) setupClientAutoEncryptWatching ( rootsReq * structs . DCSpecificRequest , leafReq * cachetype . ConnectCALeafRequest ) error {
// setup watches
ch := make ( chan cache . UpdateEvent , 10 )
ctx , cancel := context . WithCancel ( context . Background ( ) )
// Watch for root changes
err := a . cache . Notify ( ctx , cachetype . ConnectCARootName , rootsReq , rootsWatchID , ch )
if err != nil {
cancel ( )
return err
}
// Watch the leaf cert
err = a . cache . Notify ( ctx , cachetype . ConnectCALeafName , leafReq , leafWatchID , ch )
if err != nil {
cancel ( )
return err
}
// Setup actions in case the watches are firing.
go func ( ) {
for {
select {
case <- a . shutdownCh :
cancel ( )
return
case <- ctx . Done ( ) :
return
case u := <- ch :
switch u . CorrelationID {
case rootsWatchID :
roots , ok := u . Result . ( * structs . IndexedCARoots )
if ! ok {
err := fmt . Errorf ( "invalid type for roots response: %T" , u . Result )
2020-01-28 23:50:41 +00:00
a . logger . Error ( "watch error for correlation id" ,
"correlation_id" , u . CorrelationID ,
"error" , err ,
)
2019-06-27 20:22:07 +00:00
continue
}
pems := [ ] string { }
for _ , root := range roots . Roots {
pems = append ( pems , root . RootCert )
}
a . tlsConfigurator . UpdateAutoEncryptCA ( pems )
case leafWatchID :
leaf , ok := u . Result . ( * structs . IssuedCert )
if ! ok {
err := fmt . Errorf ( "invalid type for leaf response: %T" , u . Result )
2020-01-28 23:50:41 +00:00
a . logger . Error ( "watch error for correlation id" ,
"correlation_id" , u . CorrelationID ,
"error" , err ,
)
2019-06-27 20:22:07 +00:00
continue
}
a . tlsConfigurator . UpdateAutoEncryptCert ( leaf . CertPEM , leaf . PrivateKeyPEM )
}
}
}
} ( )
// Setup safety net in case the auto_encrypt cert doesn't get renewed
// in time. The agent would be stuck in that case because the watches
// never use the AutoEncrypt.Sign endpoint.
go func ( ) {
2020-06-05 22:08:30 +00:00
// Check 10sec after cert expires. The agent cache
// should be handling the expiration and renew before
// it.
// If there is no cert, AutoEncryptCertNotAfter returns
// a value in the past which immediately triggers the
// renew, but this case shouldn't happen because at
// this point, auto_encrypt was just being setup
// successfully.
interval := a . tlsConfigurator . AutoEncryptCertNotAfter ( ) . Sub ( time . Now ( ) . Add ( 10 * time . Second ) )
autoLogger := a . logger . Named ( logging . AutoEncrypt )
2019-06-27 20:22:07 +00:00
for {
2020-01-28 23:50:41 +00:00
a . logger . Debug ( "setting up client certificate expiration check on interval" , "interval" , interval )
2019-06-27 20:22:07 +00:00
select {
case <- a . shutdownCh :
return
case <- time . After ( interval ) :
// check auto encrypt client cert expiration
if a . tlsConfigurator . AutoEncryptCertExpired ( ) {
2020-01-28 23:50:41 +00:00
autoLogger . Debug ( "client certificate expired." )
2020-06-19 19:16:00 +00:00
// Background because the context is mainly useful when the agent is first starting up.
reply , err := a . setupClientAutoEncrypt ( context . Background ( ) )
2019-06-27 20:22:07 +00:00
if err != nil {
2020-01-28 23:50:41 +00:00
autoLogger . Error ( "client certificate expired, failed to renew" , "error" , err )
2019-06-27 20:22:07 +00:00
// in case of an error, try again in one minute
interval = time . Minute
continue
}
_ , _ , err = a . setupClientAutoEncryptCache ( reply )
if err != nil {
2020-01-28 23:50:41 +00:00
autoLogger . Error ( "client certificate expired, failed to populate cache" , "error" , err )
2019-06-27 20:22:07 +00:00
// in case of an error, try again in one minute
interval = time . Minute
continue
}
}
}
}
} ( )
return nil
}
2018-10-03 19:37:53 +00:00
func ( a * Agent ) listenAndServeGRPC ( ) error {
if len ( a . config . GRPCAddrs ) < 1 {
return nil
}
2020-03-21 18:59:39 +00:00
xdsServer := & xds . Server {
2018-10-19 16:04:07 +00:00
Logger : a . logger ,
CfgMgr : a . proxyConfig ,
Authz : a ,
ResolveToken : a . resolveToken ,
2019-09-26 02:55:52 +00:00
CheckFetcher : a ,
CfgFetcher : a ,
2018-10-03 19:37:53 +00:00
}
2020-03-21 18:59:39 +00:00
xdsServer . Initialize ( )
2019-01-11 15:43:18 +00:00
2018-10-03 19:37:53 +00:00
var err error
2019-02-13 17:49:54 +00:00
if a . config . HTTPSPort > 0 {
// gRPC uses the same TLS settings as the HTTPS API. If HTTPS is
// enabled then gRPC will require HTTPS as well.
2020-03-21 18:59:39 +00:00
a . grpcServer , err = xdsServer . GRPCServer ( a . tlsConfigurator )
2019-02-13 17:49:54 +00:00
} else {
2020-03-21 18:59:39 +00:00
a . grpcServer , err = xdsServer . GRPCServer ( nil )
2019-02-13 17:49:54 +00:00
}
2018-10-03 19:37:53 +00:00
if err != nil {
return err
}
ln , err := a . startListeners ( a . config . GRPCAddrs )
if err != nil {
return err
}
for _ , l := range ln {
go func ( innerL net . Listener ) {
2020-01-28 23:50:41 +00:00
a . logger . Info ( "Started gRPC server" ,
"address" , innerL . Addr ( ) . String ( ) ,
"network" , innerL . Addr ( ) . Network ( ) ,
)
2018-10-03 19:37:53 +00:00
err := a . grpcServer . Serve ( innerL )
if err != nil {
2020-01-28 23:50:41 +00:00
a . logger . Error ( "gRPC server failed" , "error" , err )
2018-10-03 19:37:53 +00:00
}
} ( l )
}
return nil
}
2017-05-24 13:22:56 +00:00
func ( a * Agent ) listenAndServeDNS ( ) error {
2017-09-25 18:40:42 +00:00
notif := make ( chan net . Addr , len ( a . config . DNSAddrs ) )
2018-09-07 14:48:29 +00:00
errCh := make ( chan error , len ( a . config . DNSAddrs ) )
2017-09-25 18:40:42 +00:00
for _ , addr := range a . config . DNSAddrs {
2017-05-24 13:22:56 +00:00
// create server
s , err := NewDNSServer ( a )
2017-05-19 09:53:41 +00:00
if err != nil {
2017-05-24 13:22:56 +00:00
return err
2017-05-19 09:53:41 +00:00
}
2017-05-24 13:22:56 +00:00
a . dnsServers = append ( a . dnsServers , s )
// start server
a . wgServers . Add ( 1 )
2017-09-25 18:40:42 +00:00
go func ( addr net . Addr ) {
2017-05-24 13:22:56 +00:00
defer a . wgServers . Done ( )
2017-09-25 18:40:42 +00:00
err := s . ListenAndServe ( addr . Network ( ) , addr . String ( ) , func ( ) { notif <- addr } )
2017-05-24 13:22:56 +00:00
if err != nil && ! strings . Contains ( err . Error ( ) , "accept" ) {
2018-09-07 14:48:29 +00:00
errCh <- err
2017-05-24 13:22:56 +00:00
}
2017-09-25 18:40:42 +00:00
} ( addr )
2017-05-19 09:53:41 +00:00
}
2017-05-24 13:22:56 +00:00
// wait for servers to be up
timeout := time . After ( time . Second )
2018-09-07 14:48:29 +00:00
var merr * multierror . Error
2017-09-25 18:40:42 +00:00
for range a . config . DNSAddrs {
2017-05-24 13:22:56 +00:00
select {
2017-09-25 18:40:42 +00:00
case addr := <- notif :
2020-01-28 23:50:41 +00:00
a . logger . Info ( "Started DNS server" ,
"address" , addr . String ( ) ,
"network" , addr . Network ( ) ,
)
2019-02-27 19:28:31 +00:00
2018-09-07 14:48:29 +00:00
case err := <- errCh :
merr = multierror . Append ( merr , err )
2017-05-24 13:22:56 +00:00
case <- timeout :
2018-09-07 14:48:29 +00:00
merr = multierror . Append ( merr , fmt . Errorf ( "agent: timeout starting DNS servers" ) )
2020-05-14 21:02:52 +00:00
return merr . ErrorOrNil ( )
2017-05-24 13:22:56 +00:00
}
}
2018-09-07 14:48:29 +00:00
return merr . ErrorOrNil ( )
2017-05-19 09:53:41 +00:00
}
2018-10-03 19:37:53 +00:00
func ( a * Agent ) startListeners ( addrs [ ] net . Addr ) ( [ ] net . Listener , error ) {
var ln [ ] net . Listener
for _ , addr := range addrs {
var l net . Listener
var err error
switch x := addr . ( type ) {
case * net . UnixAddr :
l , err = a . listenSocket ( x . Name )
if err != nil {
return nil , err
}
case * net . TCPAddr :
l , err = net . Listen ( "tcp" , x . String ( ) )
if err != nil {
return nil , err
}
l = & tcpKeepAliveListener { l . ( * net . TCPListener ) }
default :
return nil , fmt . Errorf ( "unsupported address type %T" , addr )
}
ln = append ( ln , l )
}
return ln , nil
}
2017-05-24 13:22:56 +00:00
// listenHTTP binds listeners to the provided addresses and also returns
// pre-configured HTTP servers which are not yet started. The motivation is
// that in the current startup/shutdown setup we de-couple the listener
// creation from the server startup assuming that if any of the listeners
// cannot be bound we fail immediately and later failures do not occur.
// Therefore, starting a server with a running listener is assumed to not
// produce an error.
//
// The second motivation is that an HTTPS server needs to use the same TLSConfig
// on both the listener and the HTTP server. When listeners and servers are
// created at different times this becomes difficult to handle without keeping
// the TLS configuration somewhere or recreating it.
//
// This approach should ultimately be refactored to the point where we just
// start the server and any error should trigger a proper shutdown of the agent.
2017-11-07 23:06:59 +00:00
func ( a * Agent ) listenHTTP ( ) ( [ ] * HTTPServer , error ) {
2017-05-19 09:53:41 +00:00
var ln [ ] net . Listener
2017-11-07 23:06:59 +00:00
var servers [ ] * HTTPServer
2017-09-25 18:40:42 +00:00
start := func ( proto string , addrs [ ] net . Addr ) error {
2018-10-03 19:37:53 +00:00
listeners , err := a . startListeners ( addrs )
if err != nil {
return err
}
2017-05-24 13:22:56 +00:00
2018-10-03 19:37:53 +00:00
for _ , l := range listeners {
var tlscfg * tls . Config
_ , isTCP := l . ( * tcpKeepAliveListener )
if isTCP && proto == "https" {
2019-03-13 09:29:06 +00:00
tlscfg = a . tlsConfigurator . IncomingHTTPSConfig ( )
2018-10-03 19:37:53 +00:00
l = tls . NewListener ( l , tlscfg )
2017-05-19 09:53:41 +00:00
}
2020-01-31 16:19:37 +00:00
2017-11-07 23:06:59 +00:00
srv := & HTTPServer {
Server : & http . Server {
Addr : l . Addr ( ) . String ( ) ,
TLSConfig : tlscfg ,
} ,
2020-05-29 18:19:16 +00:00
ln : l ,
agent : a ,
denylist : NewDenylist ( a . config . HTTPBlockEndpoints ) ,
proto : proto ,
2017-11-07 23:06:59 +00:00
}
srv . Server . Handler = srv . handler ( a . config . EnableDebug )
2020-01-31 16:19:37 +00:00
// Load the connlimit helper into the server
connLimitFn := a . httpConnLimiter . HTTPConnStateFunc ( )
2017-11-07 23:06:59 +00:00
if proto == "https" {
2020-01-31 16:19:37 +00:00
// Enforce TLS handshake timeout
srv . Server . ConnState = func ( conn net . Conn , state http . ConnState ) {
switch state {
case http . StateNew :
// Set deadline to prevent slow send before TLS handshake or first
// byte of request.
conn . SetReadDeadline ( time . Now ( ) . Add ( a . config . HTTPSHandshakeTimeout ) )
case http . StateActive :
// Clear read deadline. We should maybe set read timeouts more
// generally but that's a bigger task as some HTTP endpoints may
// stream large requests and responses (e.g. snapshot) so we can't
// set sensible blanket timeouts here.
conn . SetReadDeadline ( time . Time { } )
}
// Pass through to conn limit. This is OK because we didn't change
// state (i.e. Close conn).
connLimitFn ( conn , state )
}
// This will enable upgrading connections to HTTP/2 as
// part of TLS negotiation.
2017-11-07 23:06:59 +00:00
err = http2 . ConfigureServer ( srv . Server , nil )
if err != nil {
return err
}
2020-01-31 16:19:37 +00:00
} else {
srv . Server . ConnState = connLimitFn
2017-11-07 23:06:59 +00:00
}
2018-10-03 19:37:53 +00:00
ln = append ( ln , l )
2017-11-07 23:06:59 +00:00
servers = append ( servers , srv )
2017-05-19 09:53:41 +00:00
}
2017-09-25 18:40:42 +00:00
return nil
}
2017-05-24 13:22:56 +00:00
2017-09-25 18:40:42 +00:00
if err := start ( "http" , a . config . HTTPAddrs ) ; err != nil {
for _ , l := range ln {
l . Close ( )
2017-05-31 08:24:32 +00:00
}
2017-09-25 18:40:42 +00:00
return nil , err
}
if err := start ( "https" , a . config . HTTPSAddrs ) ; err != nil {
for _ , l := range ln {
l . Close ( )
}
return nil , err
2017-05-19 09:53:41 +00:00
}
2017-11-07 23:06:59 +00:00
return servers , nil
2017-05-24 13:22:56 +00:00
}
2017-05-19 09:53:41 +00:00
2017-05-30 23:05:21 +00:00
// tcpKeepAliveListener sets TCP keep-alive timeouts on accepted
2017-11-07 23:06:59 +00:00
// connections. It's used so dead TCP connections eventually go away.
2017-05-30 23:05:21 +00:00
type tcpKeepAliveListener struct {
* net . TCPListener
}
func ( ln tcpKeepAliveListener ) Accept ( ) ( c net . Conn , err error ) {
tc , err := ln . AcceptTCP ( )
if err != nil {
return
}
tc . SetKeepAlive ( true )
tc . SetKeepAlivePeriod ( 30 * time . Second )
return tc , nil
}
2017-09-25 18:40:42 +00:00
func ( a * Agent ) listenSocket ( path string ) ( net . Listener , error ) {
2017-05-24 13:22:56 +00:00
if _ , err := os . Stat ( path ) ; ! os . IsNotExist ( err ) {
2020-01-28 23:50:41 +00:00
a . logger . Warn ( "Replacing socket" , "path" , path )
2017-05-24 13:22:56 +00:00
}
if err := os . Remove ( path ) ; err != nil && ! os . IsNotExist ( err ) {
return nil , fmt . Errorf ( "error removing socket file: %s" , err )
}
l , err := net . Listen ( "unix" , path )
if err != nil {
return nil , err
}
2017-09-25 18:40:42 +00:00
user , group , mode := a . config . UnixSocketUser , a . config . UnixSocketGroup , a . config . UnixSocketMode
if err := setFilePermissions ( path , user , group , mode ) ; err != nil {
return nil , fmt . Errorf ( "Failed setting up socket: %s" , err )
2017-05-24 13:22:56 +00:00
}
return l , nil
}
2017-11-07 23:06:59 +00:00
func ( a * Agent ) serveHTTP ( srv * HTTPServer ) error {
2017-05-19 09:53:41 +00:00
// https://github.com/golang/go/issues/20239
//
2017-05-24 13:22:56 +00:00
// In go.8.1 there is a race between Serve and Shutdown. If
2017-05-19 09:53:41 +00:00
// Shutdown is called before the Serve go routine was scheduled then
// the Serve go routine never returns. This deadlocks the agent
// shutdown for some tests since it will wait forever.
2017-09-25 18:40:42 +00:00
notif := make ( chan net . Addr )
2017-05-24 13:22:56 +00:00
a . wgServers . Add ( 1 )
go func ( ) {
defer a . wgServers . Done ( )
2017-11-07 23:06:59 +00:00
notif <- srv . ln . Addr ( )
err := srv . Serve ( srv . ln )
2017-05-24 13:22:56 +00:00
if err != nil && err != http . ErrServerClosed {
2020-01-28 23:50:41 +00:00
a . logger . Error ( "error closing server" , "error" , err )
2017-05-24 13:22:56 +00:00
}
} ( )
2017-05-19 09:53:41 +00:00
2017-05-24 13:22:56 +00:00
select {
case addr := <- notif :
if srv . proto == "https" {
2020-01-28 23:50:41 +00:00
a . logger . Info ( "Started HTTPS server" ,
"address" , addr . String ( ) ,
"network" , addr . Network ( ) ,
)
2017-05-24 13:22:56 +00:00
} else {
2020-01-28 23:50:41 +00:00
a . logger . Info ( "Started HTTP server" ,
"address" , addr . String ( ) ,
"network" , addr . Network ( ) ,
)
2017-05-24 13:22:56 +00:00
}
return nil
case <- time . After ( time . Second ) :
return fmt . Errorf ( "agent: timeout starting HTTP servers" )
2017-05-19 09:53:41 +00:00
}
2013-12-20 01:14:46 +00:00
}
2020-05-26 08:01:49 +00:00
// stopAllWatches stops all the currently running watches
func ( a * Agent ) stopAllWatches ( ) {
for _ , wp := range a . watchPlans {
wp . Stop ( )
}
}
2017-06-24 19:52:41 +00:00
// reloadWatches stops any existing watch plans and attempts to load the given
// set of watches.
2017-09-25 18:40:42 +00:00
func ( a * Agent ) reloadWatches ( cfg * config . RuntimeConfig ) error {
2017-09-26 20:47:27 +00:00
// Stop the current watches.
2020-05-26 08:01:49 +00:00
a . stopAllWatches ( )
2017-09-26 20:47:27 +00:00
a . watchPlans = nil
// Return if there are no watches now.
if len ( cfg . Watches ) == 0 {
return nil
}
2017-06-24 19:52:41 +00:00
// Watches use the API to talk to this agent, so that must be enabled.
2017-09-26 20:47:27 +00:00
if len ( cfg . HTTPAddrs ) == 0 && len ( cfg . HTTPSAddrs ) == 0 {
2017-06-09 08:03:49 +00:00
return fmt . Errorf ( "watch plans require an HTTP or HTTPS endpoint" )
}
2017-09-25 18:40:42 +00:00
// Compile the watches
var watchPlans [ ] * watch . Plan
for _ , params := range cfg . Watches {
2017-10-22 01:39:09 +00:00
if handlerType , ok := params [ "handler_type" ] ; ! ok {
params [ "handler_type" ] = "script"
} else if handlerType != "http" && handlerType != "script" {
return fmt . Errorf ( "Handler type '%s' not recognized" , params [ "handler_type" ] )
}
2018-04-26 17:06:26 +00:00
// Don't let people use connect watches via this mechanism for now as it
// needs thought about how to do securely and shouldn't be necessary. Note
// that if the type assertion fails an type is not a string then
// ParseExample below will error so we don't need to handle that case.
if typ , ok := params [ "type" ] . ( string ) ; ok {
if strings . HasPrefix ( typ , "connect_" ) {
return fmt . Errorf ( "Watch type %s is not allowed in agent config" , typ )
}
}
2017-10-22 01:39:09 +00:00
// Parse the watches, excluding 'handler' and 'args'
2017-10-04 23:48:00 +00:00
wp , err := watch . ParseExempt ( params , [ ] string { "handler" , "args" } )
2017-09-25 18:40:42 +00:00
if err != nil {
return fmt . Errorf ( "Failed to parse watch (%#v): %v" , params , err )
}
2017-10-04 23:48:00 +00:00
// Get the handler and subprocess arguments
handler , hasHandler := wp . Exempt [ "handler" ]
args , hasArgs := wp . Exempt [ "args" ]
if hasHandler {
2020-01-28 23:50:41 +00:00
a . logger . Warn ( "The 'handler' field in watches has been deprecated " +
2017-10-04 23:48:00 +00:00
"and replaced with the 'args' field. See https://www.consul.io/docs/agent/watches.html" )
}
if _ , ok := handler . ( string ) ; hasHandler && ! ok {
2017-09-25 18:40:42 +00:00
return fmt . Errorf ( "Watch handler must be a string" )
}
2017-10-04 23:48:00 +00:00
if raw , ok := args . ( [ ] interface { } ) ; hasArgs && ok {
var parsed [ ] string
for _ , arg := range raw {
2018-01-28 18:53:30 +00:00
v , ok := arg . ( string )
if ! ok {
2017-10-04 23:48:00 +00:00
return fmt . Errorf ( "Watch args must be a list of strings" )
}
2018-01-28 18:40:13 +00:00
parsed = append ( parsed , v )
2017-10-04 23:48:00 +00:00
}
wp . Exempt [ "args" ] = parsed
} else if hasArgs && ! ok {
return fmt . Errorf ( "Watch args must be a list of strings" )
}
2017-10-22 01:39:09 +00:00
if hasHandler && hasArgs || hasHandler && wp . HandlerType == "http" || hasArgs && wp . HandlerType == "http" {
return fmt . Errorf ( "Only one watch handler allowed" )
2017-10-04 23:48:00 +00:00
}
2017-10-22 01:39:09 +00:00
if ! hasHandler && ! hasArgs && wp . HandlerType != "http" {
return fmt . Errorf ( "Must define a watch handler" )
2017-10-04 23:48:00 +00:00
}
2017-09-25 18:40:42 +00:00
// Store the watch plan
watchPlans = append ( watchPlans , wp )
}
2017-06-24 19:52:41 +00:00
// Fire off a goroutine for each new watch plan.
2017-09-25 18:40:42 +00:00
for _ , wp := range watchPlans {
2018-07-16 20:30:15 +00:00
config , err := a . config . APIConfig ( true )
if err != nil {
2020-01-28 23:50:41 +00:00
a . logger . Error ( "Failed to run watch" , "error" , err )
2018-07-16 20:30:15 +00:00
continue
}
2017-06-24 19:52:41 +00:00
a . watchPlans = append ( a . watchPlans , wp )
2017-06-09 08:03:49 +00:00
go func ( wp * watch . Plan ) {
2017-10-04 23:48:00 +00:00
if h , ok := wp . Exempt [ "handler" ] ; ok {
2020-01-28 23:50:41 +00:00
wp . Handler = makeWatchHandler ( a . logger , h )
2017-10-22 01:39:09 +00:00
} else if h , ok := wp . Exempt [ "args" ] ; ok {
2020-01-28 23:50:41 +00:00
wp . Handler = makeWatchHandler ( a . logger , h )
2017-10-04 23:48:00 +00:00
} else {
2017-10-22 01:39:09 +00:00
httpConfig := wp . Exempt [ "http_handler_config" ] . ( * watch . HttpHandlerConfig )
2020-01-28 23:50:41 +00:00
wp . Handler = makeHTTPWatchHandler ( a . logger , httpConfig )
2017-10-04 23:48:00 +00:00
}
2017-06-09 08:03:49 +00:00
wp . LogOutput = a . LogOutput
2018-05-31 21:07:36 +00:00
2018-07-16 20:30:15 +00:00
addr := config . Address
if config . Scheme == "https" {
addr = "https://" + addr
2018-05-31 21:07:36 +00:00
}
2018-06-01 00:22:14 +00:00
if err := wp . RunWithConfig ( addr , config ) ; err != nil {
2020-01-28 23:50:41 +00:00
a . logger . Error ( "Failed to run watch" , "error" , err )
2017-06-09 08:03:49 +00:00
}
} ( wp )
}
return nil
}
2013-12-20 23:33:13 +00:00
// consulConfig is used to return a consul configuration
2017-05-03 21:47:25 +00:00
func ( a * Agent ) consulConfig ( ) ( * consul . Config , error ) {
2013-12-20 23:33:13 +00:00
// Start with the provided config or default config
2017-05-03 19:12:30 +00:00
base := consul . DefaultConfig ( )
2017-06-30 09:09:52 +00:00
2017-01-18 06:20:11 +00:00
// This is set when the agent starts up
base . NodeID = a . config . NodeID
2015-11-29 04:40:05 +00:00
// Apply dev mode
base . DevMode = a . config . DevMode
2013-12-20 23:33:13 +00:00
// Override with our config
2017-09-25 18:40:42 +00:00
// todo(fs): these are now always set in the runtime config so we can simplify this
// todo(fs): or is there a reason to keep it like that?
base . Datacenter = a . config . Datacenter
2018-10-15 16:17:48 +00:00
base . PrimaryDatacenter = a . config . PrimaryDatacenter
2017-09-25 18:40:42 +00:00
base . DataDir = a . config . DataDir
base . NodeName = a . config . NodeName
base . CoordinateUpdateBatchSize = a . config . ConsulCoordinateUpdateBatchSize
base . CoordinateUpdateMaxBatches = a . config . ConsulCoordinateUpdateMaxBatches
base . CoordinateUpdatePeriod = a . config . ConsulCoordinateUpdatePeriod
2019-06-26 15:43:25 +00:00
base . CheckOutputMaxSize = a . config . CheckOutputMaxSize
2017-09-25 18:40:42 +00:00
base . RaftConfig . HeartbeatTimeout = a . config . ConsulRaftHeartbeatTimeout
base . RaftConfig . LeaderLeaseTimeout = a . config . ConsulRaftLeaderLeaseTimeout
base . RaftConfig . ElectionTimeout = a . config . ConsulRaftElectionTimeout
base . SerfLANConfig . MemberlistConfig . BindAddr = a . config . SerfBindAddrLAN . IP . String ( )
2017-09-29 14:12:04 +00:00
base . SerfLANConfig . MemberlistConfig . BindPort = a . config . SerfBindAddrLAN . Port
2020-05-20 09:31:19 +00:00
base . SerfLANConfig . MemberlistConfig . CIDRsAllowed = a . config . SerfAllowedCIDRsLAN
base . SerfWANConfig . MemberlistConfig . CIDRsAllowed = a . config . SerfAllowedCIDRsWAN
2017-09-25 18:40:42 +00:00
base . SerfLANConfig . MemberlistConfig . AdvertiseAddr = a . config . SerfAdvertiseAddrLAN . IP . String ( )
2017-09-29 14:12:04 +00:00
base . SerfLANConfig . MemberlistConfig . AdvertisePort = a . config . SerfAdvertiseAddrLAN . Port
2017-09-25 18:40:42 +00:00
base . SerfLANConfig . MemberlistConfig . GossipVerifyIncoming = a . config . EncryptVerifyIncoming
base . SerfLANConfig . MemberlistConfig . GossipVerifyOutgoing = a . config . EncryptVerifyOutgoing
2018-07-26 15:39:49 +00:00
base . SerfLANConfig . MemberlistConfig . GossipInterval = a . config . GossipLANGossipInterval
base . SerfLANConfig . MemberlistConfig . GossipNodes = a . config . GossipLANGossipNodes
base . SerfLANConfig . MemberlistConfig . ProbeInterval = a . config . GossipLANProbeInterval
base . SerfLANConfig . MemberlistConfig . ProbeTimeout = a . config . GossipLANProbeTimeout
base . SerfLANConfig . MemberlistConfig . SuspicionMult = a . config . GossipLANSuspicionMult
base . SerfLANConfig . MemberlistConfig . RetransmitMult = a . config . GossipLANRetransmitMult
2018-08-17 18:44:25 +00:00
if a . config . ReconnectTimeoutLAN != 0 {
base . SerfLANConfig . ReconnectTimeout = a . config . ReconnectTimeoutLAN
}
2017-09-25 18:40:42 +00:00
2018-03-26 19:21:06 +00:00
if a . config . SerfBindAddrWAN != nil {
base . SerfWANConfig . MemberlistConfig . BindAddr = a . config . SerfBindAddrWAN . IP . String ( )
base . SerfWANConfig . MemberlistConfig . BindPort = a . config . SerfBindAddrWAN . Port
base . SerfWANConfig . MemberlistConfig . AdvertiseAddr = a . config . SerfAdvertiseAddrWAN . IP . String ( )
base . SerfWANConfig . MemberlistConfig . AdvertisePort = a . config . SerfAdvertiseAddrWAN . Port
base . SerfWANConfig . MemberlistConfig . GossipVerifyIncoming = a . config . EncryptVerifyIncoming
base . SerfWANConfig . MemberlistConfig . GossipVerifyOutgoing = a . config . EncryptVerifyOutgoing
2018-07-26 15:39:49 +00:00
base . SerfWANConfig . MemberlistConfig . GossipInterval = a . config . GossipWANGossipInterval
base . SerfWANConfig . MemberlistConfig . GossipNodes = a . config . GossipWANGossipNodes
base . SerfWANConfig . MemberlistConfig . ProbeInterval = a . config . GossipWANProbeInterval
base . SerfWANConfig . MemberlistConfig . ProbeTimeout = a . config . GossipWANProbeTimeout
base . SerfWANConfig . MemberlistConfig . SuspicionMult = a . config . GossipWANSuspicionMult
base . SerfWANConfig . MemberlistConfig . RetransmitMult = a . config . GossipWANRetransmitMult
2018-08-17 18:44:25 +00:00
if a . config . ReconnectTimeoutWAN != 0 {
base . SerfWANConfig . ReconnectTimeout = a . config . ReconnectTimeoutWAN
}
2018-03-26 19:21:06 +00:00
} else {
// Disable serf WAN federation
base . SerfWANConfig = nil
}
2017-09-25 18:40:42 +00:00
base . RPCAddr = a . config . RPCBindAddr
base . RPCAdvertise = a . config . RPCAdvertiseAddr
base . Segment = a . config . SegmentName
2017-08-29 00:58:22 +00:00
if len ( a . config . Segments ) > 0 {
segments , err := a . segmentConfig ( )
if err != nil {
return nil , err
2017-08-14 14:36:07 +00:00
}
2017-08-29 00:58:22 +00:00
base . Segments = segments
2017-08-14 14:36:07 +00:00
}
2013-12-25 00:48:07 +00:00
if a . config . Bootstrap {
base . Bootstrap = true
}
2019-06-26 15:43:25 +00:00
if a . config . CheckOutputMaxSize > 0 {
base . CheckOutputMaxSize = a . config . CheckOutputMaxSize
}
2014-06-18 17:32:19 +00:00
if a . config . RejoinAfterLeave {
base . RejoinAfterLeave = true
}
2014-06-20 00:08:48 +00:00
if a . config . BootstrapExpect != 0 {
base . BootstrapExpect = a . config . BootstrapExpect
2014-06-16 21:36:12 +00:00
}
2017-09-25 18:40:42 +00:00
if a . config . RPCProtocol > 0 {
base . ProtocolVersion = uint8 ( a . config . RPCProtocol )
2014-03-09 22:57:03 +00:00
}
2017-02-24 04:32:13 +00:00
if a . config . RaftProtocol != 0 {
base . RaftConfig . ProtocolVersion = raft . ProtocolVersion ( a . config . RaftProtocol )
}
2018-05-10 15:16:38 +00:00
if a . config . RaftSnapshotThreshold != 0 {
base . RaftConfig . SnapshotThreshold = uint64 ( a . config . RaftSnapshotThreshold )
}
2018-05-10 22:06:47 +00:00
if a . config . RaftSnapshotInterval != 0 {
base . RaftConfig . SnapshotInterval = a . config . RaftSnapshotInterval
}
2019-07-23 14:19:57 +00:00
if a . config . RaftTrailingLogs != 0 {
base . RaftConfig . TrailingLogs = uint64 ( a . config . RaftTrailingLogs )
}
2014-08-05 22:36:08 +00:00
if a . config . ACLMasterToken != "" {
base . ACLMasterToken = a . config . ACLMasterToken
}
2014-08-05 22:20:35 +00:00
if a . config . ACLDatacenter != "" {
base . ACLDatacenter = a . config . ACLDatacenter
}
2018-10-19 16:04:07 +00:00
if a . config . ACLTokenTTL != 0 {
base . ACLTokenTTL = a . config . ACLTokenTTL
}
if a . config . ACLPolicyTTL != 0 {
base . ACLPolicyTTL = a . config . ACLPolicyTTL
2014-08-05 22:20:35 +00:00
}
2019-04-15 20:43:19 +00:00
if a . config . ACLRoleTTL != 0 {
base . ACLRoleTTL = a . config . ACLRoleTTL
}
2014-08-05 22:20:35 +00:00
if a . config . ACLDefaultPolicy != "" {
base . ACLDefaultPolicy = a . config . ACLDefaultPolicy
}
if a . config . ACLDownPolicy != "" {
base . ACLDownPolicy = a . config . ACLDownPolicy
}
2018-10-19 16:04:07 +00:00
base . ACLTokenReplication = a . config . ACLTokenReplication
base . ACLsEnabled = a . config . ACLsEnabled
2017-10-02 22:10:21 +00:00
if a . config . ACLEnableKeyListPolicy {
base . ACLEnableKeyListPolicy = a . config . ACLEnableKeyListPolicy
}
2017-09-25 18:40:42 +00:00
if a . config . SessionTTLMin != 0 {
2015-03-27 05:30:04 +00:00
base . SessionTTLMin = a . config . SessionTTLMin
}
2017-03-21 23:36:44 +00:00
if a . config . NonVotingServer {
base . NonVoter = a . config . NonVotingServer
}
2017-12-13 18:31:45 +00:00
// These are fully specified in the agent defaults, so we can simply
// copy them over.
base . AutopilotConfig . CleanupDeadServers = a . config . AutopilotCleanupDeadServers
base . AutopilotConfig . LastContactThreshold = a . config . AutopilotLastContactThreshold
base . AutopilotConfig . MaxTrailingLogs = uint64 ( a . config . AutopilotMaxTrailingLogs )
2019-10-29 14:04:41 +00:00
base . AutopilotConfig . MinQuorum = a . config . AutopilotMinQuorum
2017-12-13 18:31:45 +00:00
base . AutopilotConfig . ServerStabilizationTime = a . config . AutopilotServerStabilizationTime
base . AutopilotConfig . RedundancyZoneTag = a . config . AutopilotRedundancyZoneTag
base . AutopilotConfig . DisableUpgradeMigration = a . config . AutopilotDisableUpgradeMigration
base . AutopilotConfig . UpgradeVersionTag = a . config . AutopilotUpgradeVersionTag
2013-12-20 23:33:13 +00:00
2017-05-03 20:59:06 +00:00
// make sure the advertise address is always set
if base . RPCAdvertise == nil {
base . RPCAdvertise = base . RPCAddr
}
2017-09-01 22:02:50 +00:00
// Rate limiting for RPC calls.
2017-09-25 18:40:42 +00:00
if a . config . RPCRateLimit > 0 {
base . RPCRate = a . config . RPCRateLimit
2017-09-01 22:02:50 +00:00
}
2017-09-25 18:40:42 +00:00
if a . config . RPCMaxBurst > 0 {
base . RPCMaxBurst = a . config . RPCMaxBurst
2017-09-01 22:02:50 +00:00
}
2020-01-31 16:19:37 +00:00
// RPC timeouts/limits.
if a . config . RPCHandshakeTimeout > 0 {
base . RPCHandshakeTimeout = a . config . RPCHandshakeTimeout
2017-10-10 22:19:50 +00:00
}
2020-01-31 16:19:37 +00:00
if a . config . RPCMaxConnsPerClient > 0 {
base . RPCMaxConnsPerClient = a . config . RPCMaxConnsPerClient
}
// RPC-related performance configs. We allow explicit zero value to disable so
// copy it whatever the value.
base . RPCHoldTimeout = a . config . RPCHoldTimeout
2017-10-10 22:19:50 +00:00
if a . config . LeaveDrainTime > 0 {
base . LeaveDrainTime = a . config . LeaveDrainTime
}
2017-05-03 10:57:11 +00:00
// set the src address for outgoing rpc connections
2017-05-10 07:30:19 +00:00
// Use port 0 so that outgoing connections use a random port.
2017-05-15 20:10:36 +00:00
if ! ipaddr . IsAny ( base . RPCAddr . IP ) {
2017-05-10 07:30:19 +00:00
base . RPCSrcAddr = & net . TCPAddr { IP : base . RPCAddr . IP }
}
2017-05-03 10:57:11 +00:00
2014-06-06 22:36:40 +00:00
// Format the build string
revision := a . config . Revision
if len ( revision ) > 8 {
revision = revision [ : 8 ]
}
2017-05-03 19:12:30 +00:00
base . Build = fmt . Sprintf ( "%s%s:%s" , a . config . Version , a . config . VersionPrerelease , revision )
2014-06-06 22:36:40 +00:00
2014-04-04 23:52:39 +00:00
// Copy the TLS configuration
2017-04-28 23:15:55 +00:00
base . VerifyIncoming = a . config . VerifyIncoming || a . config . VerifyIncomingRPC
2017-05-10 21:25:48 +00:00
if a . config . CAPath != "" || a . config . CAFile != "" {
base . UseTLS = true
}
2014-04-04 23:52:39 +00:00
base . VerifyOutgoing = a . config . VerifyOutgoing
2015-05-11 22:16:13 +00:00
base . VerifyServerHostname = a . config . VerifyServerHostname
2014-04-04 23:52:39 +00:00
base . CAFile = a . config . CAFile
2017-04-27 08:29:39 +00:00
base . CAPath = a . config . CAPath
2014-04-04 23:52:39 +00:00
base . CertFile = a . config . CertFile
base . KeyFile = a . config . KeyFile
2014-06-13 18:27:44 +00:00
base . ServerName = a . config . ServerName
2017-09-25 18:40:42 +00:00
base . Domain = a . config . DNSDomain
2017-02-01 20:52:04 +00:00
base . TLSMinVersion = a . config . TLSMinVersion
2017-04-27 08:29:39 +00:00
base . TLSCipherSuites = a . config . TLSCipherSuites
base . TLSPreferServerCipherSuites = a . config . TLSPreferServerCipherSuites
2020-01-17 13:20:57 +00:00
base . DefaultQueryTime = a . config . DefaultQueryTime
base . MaxQueryTime = a . config . MaxQueryTime
2014-04-04 23:52:39 +00:00
2019-06-27 20:22:07 +00:00
base . AutoEncryptAllowTLS = a . config . AutoEncryptAllowTLS
2018-04-25 18:34:08 +00:00
// Copy the Connect CA bootstrap config
if a . config . ConnectEnabled {
base . ConnectEnabled = true
2020-03-09 20:59:02 +00:00
base . ConnectMeshGatewayWANFederationEnabled = a . config . ConnectMeshGatewayWANFederationEnabled
2018-04-25 18:34:08 +00:00
2018-05-10 16:04:33 +00:00
// Allow config to specify cluster_id provided it's a valid UUID. This is
// meant only for tests where a deterministic ID makes fixtures much simpler
// to work with but since it's only read on initial cluster bootstrap it's not
// that much of a liability in production. The worst a user could do is
// configure logically separate clusters with same ID by mistake but we can
// avoid documenting this is even an option.
if clusterID , ok := a . config . ConnectCAConfig [ "cluster_id" ] ; ok {
if cIDStr , ok := clusterID . ( string ) ; ok {
if _ , err := uuid . ParseUUID ( cIDStr ) ; err == nil {
// Valid UUID configured, use that
base . CAConfig . ClusterID = cIDStr
}
}
if base . CAConfig . ClusterID == "" {
2018-05-22 14:11:13 +00:00
// If the tried to specify an ID but typoed it don't ignore as they will
// then bootstrap with a new ID and have to throw away the whole cluster
// and start again.
2020-01-28 23:50:41 +00:00
a . logger . Error ( "connect CA config cluster_id specified but " +
2018-05-22 14:11:13 +00:00
"is not a valid UUID, aborting startup" )
return nil , fmt . Errorf ( "cluster_id was supplied but was not a valid UUID" )
2018-05-10 16:04:33 +00:00
}
}
2018-04-25 18:34:08 +00:00
if a . config . ConnectCAProvider != "" {
base . CAConfig . Provider = a . config . ConnectCAProvider
2019-01-22 17:19:36 +00:00
}
2018-04-25 18:34:08 +00:00
2019-01-22 17:19:36 +00:00
// Merge connect CA Config regardless of provider (since there are some
// common config options valid to all like leaf TTL).
for k , v := range a . config . ConnectCAConfig {
base . CAConfig . Config [ k ] = v
2018-04-25 18:34:08 +00:00
}
}
2020-06-05 19:56:19 +00:00
// copy over auto config settings
base . AutoConfigEnabled = a . config . AutoConfig . Enabled
base . AutoConfigIntroToken = a . config . AutoConfig . IntroToken
base . AutoConfigIntroTokenFile = a . config . AutoConfig . IntroTokenFile
base . AutoConfigServerAddresses = a . config . AutoConfig . ServerAddresses
base . AutoConfigDNSSANs = a . config . AutoConfig . DNSSANs
base . AutoConfigIPSANs = a . config . AutoConfig . IPSANs
base . AutoConfigAuthzEnabled = a . config . AutoConfig . Authorizer . Enabled
base . AutoConfigAuthzAuthMethod = a . config . AutoConfig . Authorizer . AuthMethod
base . AutoConfigAuthzClaimAssertions = a . config . AutoConfig . Authorizer . ClaimAssertions
base . AutoConfigAuthzAllowReuse = a . config . AutoConfig . Authorizer . AllowReuse
2014-08-27 23:49:12 +00:00
// Setup the user event callback
base . UserEventHandler = func ( e serf . UserEvent ) {
select {
case a . eventCh <- e :
case <- a . shutdownCh :
}
}
2013-12-21 00:39:32 +00:00
// Setup the loggers
2019-06-19 12:50:48 +00:00
base . LogLevel = a . config . LogLevel
2017-05-19 15:51:39 +00:00
base . LogOutput = a . LogOutput
2017-06-29 12:35:55 +00:00
2017-09-07 19:17:20 +00:00
// This will set up the LAN keyring, as well as the WAN and any segments
// for servers.
2017-07-17 19:48:45 +00:00
if err := a . setupKeyrings ( base ) ; err != nil {
return nil , fmt . Errorf ( "Failed to configure keyring: %v" , err )
2017-06-29 12:35:55 +00:00
}
2019-04-26 18:25:03 +00:00
base . ConfigEntryBootstrap = a . config . ConfigEntryBootstrap
2020-04-28 13:44:26 +00:00
2020-04-28 13:42:46 +00:00
return a . enterpriseConsulConfig ( base )
2013-12-20 23:33:13 +00:00
}
2017-08-29 00:58:22 +00:00
// Setup the serf and memberlist config for any defined network segments.
2017-09-07 23:37:11 +00:00
func ( a * Agent ) segmentConfig ( ) ( [ ] consul . NetworkSegment , error ) {
var segments [ ] consul . NetworkSegment
2017-08-29 00:58:22 +00:00
config := a . config
2017-09-25 18:40:42 +00:00
for _ , s := range config . Segments {
2017-08-29 00:58:22 +00:00
serfConf := consul . DefaultConfig ( ) . SerfLANConfig
2017-09-25 18:40:42 +00:00
serfConf . MemberlistConfig . BindAddr = s . Bind . IP . String ( )
serfConf . MemberlistConfig . BindPort = s . Bind . Port
serfConf . MemberlistConfig . AdvertiseAddr = s . Advertise . IP . String ( )
serfConf . MemberlistConfig . AdvertisePort = s . Advertise . Port
2017-08-30 19:51:10 +00:00
2017-09-25 18:40:42 +00:00
if config . ReconnectTimeoutLAN != 0 {
serfConf . ReconnectTimeout = config . ReconnectTimeoutLAN
2017-08-29 00:58:22 +00:00
}
2017-09-25 18:40:42 +00:00
if config . EncryptVerifyIncoming {
serfConf . MemberlistConfig . GossipVerifyIncoming = config . EncryptVerifyIncoming
2017-08-29 00:58:22 +00:00
}
2017-09-25 18:40:42 +00:00
if config . EncryptVerifyOutgoing {
serfConf . MemberlistConfig . GossipVerifyOutgoing = config . EncryptVerifyOutgoing
2017-08-29 00:58:22 +00:00
}
var rpcAddr * net . TCPAddr
2017-09-25 18:40:42 +00:00
if s . RPCListener {
2017-08-29 00:58:22 +00:00
rpcAddr = & net . TCPAddr {
2017-09-25 18:40:42 +00:00
IP : s . Bind . IP ,
Port : a . config . ServerPort ,
2017-08-29 00:58:22 +00:00
}
}
2017-09-07 23:37:11 +00:00
segments = append ( segments , consul . NetworkSegment {
2017-09-25 18:40:42 +00:00
Name : s . Name ,
2017-08-30 23:44:04 +00:00
Bind : serfConf . MemberlistConfig . BindAddr ,
Advertise : serfConf . MemberlistConfig . AdvertiseAddr ,
2017-09-25 18:40:42 +00:00
Port : s . Bind . Port ,
2017-08-29 00:58:22 +00:00
RPCAddr : rpcAddr ,
SerfConfig : serfConf ,
} )
}
return segments , nil
}
2017-02-01 18:27:04 +00:00
// makeRandomID will generate a random UUID for a node.
func ( a * Agent ) makeRandomID ( ) ( string , error ) {
id , err := uuid . GenerateUUID ( )
if err != nil {
return "" , err
}
2020-01-28 23:50:41 +00:00
a . logger . Debug ( "Using random ID as node ID" , "id" , id )
2017-02-01 18:27:04 +00:00
return id , nil
}
// makeNodeID will try to find a host-specific ID, or else will generate a
// random ID. The returned ID will always be formatted as a GUID. We don't tell
// the caller whether this ID is random or stable since the consequences are
// high for us if this changes, so we will persist it either way. This will let
// gopsutil change implementations without affecting in-place upgrades of nodes.
func ( a * Agent ) makeNodeID ( ) ( string , error ) {
2017-04-13 05:05:38 +00:00
// If they've disabled host-based IDs then just make a random one.
2017-09-25 18:40:42 +00:00
if a . config . DisableHostNodeID {
2017-04-13 05:05:38 +00:00
return a . makeRandomID ( )
}
2017-02-01 18:27:04 +00:00
// Try to get a stable ID associated with the host itself.
info , err := host . Info ( )
if err != nil {
2020-01-28 23:50:41 +00:00
a . logger . Debug ( "Couldn't get a unique ID from the host" , "error" , err )
2017-02-01 18:27:04 +00:00
return a . makeRandomID ( )
}
// Make sure the host ID parses as a UUID, since we don't have complete
// control over this process.
id := strings . ToLower ( info . HostID )
if _ , err := uuid . ParseUUID ( id ) ; err != nil {
2020-01-28 23:50:41 +00:00
a . logger . Debug ( "Unique ID from host isn't formatted as a UUID" ,
"id" , id ,
"error" , err ,
)
2017-02-01 18:27:04 +00:00
return a . makeRandomID ( )
}
2017-04-10 18:57:24 +00:00
// Hash the input to make it well distributed. The reported Host UUID may be
// similar across nodes if they are on a cloud provider or on motherboards
// created from the same batch.
buf := sha512 . Sum512 ( [ ] byte ( id ) )
id = fmt . Sprintf ( "%08x-%04x-%04x-%04x-%12x" ,
buf [ 0 : 4 ] ,
buf [ 4 : 6 ] ,
buf [ 6 : 8 ] ,
buf [ 8 : 10 ] ,
buf [ 10 : 16 ] )
2020-01-28 23:50:41 +00:00
a . logger . Debug ( "Using unique ID from host as node ID" , "id" , id )
2017-02-01 18:27:04 +00:00
return id , nil
}
2017-02-01 03:13:49 +00:00
// setupNodeID will pull the persisted node ID, if any, or create a random one
2017-01-18 06:20:11 +00:00
// and persist it.
2017-09-25 18:40:42 +00:00
func ( a * Agent ) setupNodeID ( config * config . RuntimeConfig ) error {
2017-01-18 06:20:11 +00:00
// If they've configured a node ID manually then just use that, as
// long as it's valid.
if config . NodeID != "" {
2017-03-14 02:51:56 +00:00
config . NodeID = types . NodeID ( strings . ToLower ( string ( config . NodeID ) ) )
2017-01-18 06:20:11 +00:00
if _ , err := uuid . ParseUUID ( string ( config . NodeID ) ) ; err != nil {
return err
}
return nil
}
2017-02-01 18:27:04 +00:00
// For dev mode we have no filesystem access so just make one.
2018-06-06 20:04:19 +00:00
if a . config . DataDir == "" {
2017-02-01 18:27:04 +00:00
id , err := a . makeNodeID ( )
2017-01-18 06:20:11 +00:00
if err != nil {
return err
}
config . NodeID = types . NodeID ( id )
return nil
}
// Load saved state, if any. Since a user could edit this, we also
// validate it.
fileID := filepath . Join ( config . DataDir , "node-id" )
if _ , err := os . Stat ( fileID ) ; err == nil {
rawID , err := ioutil . ReadFile ( fileID )
if err != nil {
return err
}
nodeID := strings . TrimSpace ( string ( rawID ) )
2017-03-14 02:51:56 +00:00
nodeID = strings . ToLower ( nodeID )
2017-01-18 06:20:11 +00:00
if _ , err := uuid . ParseUUID ( nodeID ) ; err != nil {
return err
}
config . NodeID = types . NodeID ( nodeID )
}
// If we still don't have a valid node ID, make one.
if config . NodeID == "" {
2017-02-01 18:27:04 +00:00
id , err := a . makeNodeID ( )
2017-01-18 06:20:11 +00:00
if err != nil {
return err
}
if err := lib . EnsurePath ( fileID , false ) ; err != nil {
return err
}
if err := ioutil . WriteFile ( fileID , [ ] byte ( id ) , 0600 ) ; err != nil {
return err
}
config . NodeID = types . NodeID ( id )
}
return nil
}
2017-09-07 19:17:20 +00:00
// setupBaseKeyrings configures the LAN and WAN keyrings.
func ( a * Agent ) setupBaseKeyrings ( config * consul . Config ) error {
2017-07-17 19:48:45 +00:00
// If the keyring file is disabled then just poke the provided key
// into the in-memory keyring.
2018-03-27 19:28:05 +00:00
federationEnabled := config . SerfWANConfig != nil
2017-07-17 19:48:45 +00:00
if a . config . DisableKeyringFile {
if a . config . EncryptKey == "" {
return nil
}
keys := [ ] string { a . config . EncryptKey }
if err := loadKeyring ( config . SerfLANConfig , keys ) ; err != nil {
return err
}
2018-03-27 19:28:05 +00:00
if a . config . ServerMode && federationEnabled {
2017-07-17 19:48:45 +00:00
if err := loadKeyring ( config . SerfWANConfig , keys ) ; err != nil {
return err
}
}
return nil
}
// Otherwise, we need to deal with the keyring files.
pkg refactor
command/agent/* -> agent/*
command/consul/* -> agent/consul/*
command/agent/command{,_test}.go -> command/agent{,_test}.go
command/base/command.go -> command/base.go
command/base/* -> command/*
commands.go -> command/commands.go
The script which did the refactor is:
(
cd $GOPATH/src/github.com/hashicorp/consul
git mv command/agent/command.go command/agent.go
git mv command/agent/command_test.go command/agent_test.go
git mv command/agent/flag_slice_value{,_test}.go command/
git mv command/agent .
git mv command/base/command.go command/base.go
git mv command/base/config_util{,_test}.go command/
git mv commands.go command/
git mv consul agent
rmdir command/base/
gsed -i -e 's|package agent|package command|' command/agent{,_test}.go
gsed -i -e 's|package agent|package command|' command/flag_slice_value{,_test}.go
gsed -i -e 's|package base|package command|' command/base.go command/config_util{,_test}.go
gsed -i -e 's|package main|package command|' command/commands.go
gsed -i -e 's|base.Command|BaseCommand|' command/commands.go
gsed -i -e 's|agent.Command|AgentCommand|' command/commands.go
gsed -i -e 's|\tCommand:|\tBaseCommand:|' command/commands.go
gsed -i -e 's|base\.||' command/commands.go
gsed -i -e 's|command\.||' command/commands.go
gsed -i -e 's|command|c|' main.go
gsed -i -e 's|range Commands|range command.Commands|' main.go
gsed -i -e 's|Commands: Commands|Commands: command.Commands|' main.go
gsed -i -e 's|base\.BoolValue|BoolValue|' command/operator_autopilot_set.go
gsed -i -e 's|base\.DurationValue|DurationValue|' command/operator_autopilot_set.go
gsed -i -e 's|base\.StringValue|StringValue|' command/operator_autopilot_set.go
gsed -i -e 's|base\.UintValue|UintValue|' command/operator_autopilot_set.go
gsed -i -e 's|\bCommand\b|BaseCommand|' command/base.go
gsed -i -e 's|BaseCommand Options|Command Options|' command/base.go
gsed -i -e 's|base.Command|BaseCommand|' command/*.go
gsed -i -e 's|c\.Command|c.BaseCommand|g' command/*.go
gsed -i -e 's|\tCommand:|\tBaseCommand:|' command/*_test.go
gsed -i -e 's|base\.||' command/*_test.go
gsed -i -e 's|\bCommand\b|AgentCommand|' command/agent{,_test}.go
gsed -i -e 's|cmd.AgentCommand|cmd.BaseCommand|' command/agent.go
gsed -i -e 's|cli.AgentCommand = new(Command)|cli.Command = new(AgentCommand)|' command/agent_test.go
gsed -i -e 's|exec.AgentCommand|exec.Command|' command/agent_test.go
gsed -i -e 's|exec.BaseCommand|exec.Command|' command/agent_test.go
gsed -i -e 's|NewTestAgent|agent.NewTestAgent|' command/agent_test.go
gsed -i -e 's|= TestConfig|= agent.TestConfig|' command/agent_test.go
gsed -i -e 's|: RetryJoin|: agent.RetryJoin|' command/agent_test.go
gsed -i -e 's|\.\./\.\./|../|' command/config_util_test.go
gsed -i -e 's|\bverifyUniqueListeners|VerifyUniqueListeners|' agent/config{,_test}.go command/agent.go
gsed -i -e 's|\bserfLANKeyring\b|SerfLANKeyring|g' agent/{agent,keyring,testagent}.go command/agent.go
gsed -i -e 's|\bserfWANKeyring\b|SerfWANKeyring|g' agent/{agent,keyring,testagent}.go command/agent.go
gsed -i -e 's|\bNewAgent\b|agent.New|g' command/agent{,_test}.go
gsed -i -e 's|\bNewAgent|New|' agent/{acl_test,agent,testagent}.go
gsed -i -e 's|\bAgent\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bBool\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bConfig\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bDefaultConfig\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bDevConfig\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bMergeConfig\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bReadConfigPaths\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bParseMetaPair\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bSerfLANKeyring\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bSerfWANKeyring\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|circonus\.agent|circonus|g' command/agent{,_test}.go
gsed -i -e 's|logger\.agent|logger|g' command/agent{,_test}.go
gsed -i -e 's|metrics\.agent|metrics|g' command/agent{,_test}.go
gsed -i -e 's|// agent.Agent|// agent|' command/agent{,_test}.go
gsed -i -e 's|a\.agent\.Config|a.Config|' command/agent{,_test}.go
gsed -i -e 's|agent\.AppendSliceValue|AppendSliceValue|' command/{configtest,validate}.go
gsed -i -e 's|consul/consul|agent/consul|' GNUmakefile
gsed -i -e 's|\.\./test|../../test|' agent/consul/server_test.go
# fix imports
f=$(grep -rl 'github.com/hashicorp/consul/command/agent' * | grep '\.go')
gsed -i -e 's|github.com/hashicorp/consul/command/agent|github.com/hashicorp/consul/agent|' $f
goimports -w $f
f=$(grep -rl 'github.com/hashicorp/consul/consul' * | grep '\.go')
gsed -i -e 's|github.com/hashicorp/consul/consul|github.com/hashicorp/consul/agent/consul|' $f
goimports -w $f
goimports -w command/*.go main.go
)
2017-06-09 22:28:28 +00:00
fileLAN := filepath . Join ( a . config . DataDir , SerfLANKeyring )
fileWAN := filepath . Join ( a . config . DataDir , SerfWANKeyring )
2014-10-10 18:13:30 +00:00
2020-02-13 19:35:09 +00:00
var existingLANKeyring , existingWANKeyring bool
2014-10-10 18:13:30 +00:00
if a . config . EncryptKey == "" {
goto LOAD
}
if _ , err := os . Stat ( fileLAN ) ; err != nil {
if err := initKeyring ( fileLAN , a . config . EncryptKey ) ; err != nil {
return err
}
2020-02-13 19:35:09 +00:00
} else {
existingLANKeyring = true
2014-10-10 18:13:30 +00:00
}
2018-03-27 19:28:05 +00:00
if a . config . ServerMode && federationEnabled {
2014-10-10 18:13:30 +00:00
if _ , err := os . Stat ( fileWAN ) ; err != nil {
if err := initKeyring ( fileWAN , a . config . EncryptKey ) ; err != nil {
return err
}
2020-02-13 19:35:09 +00:00
} else {
existingWANKeyring = true
2014-10-10 18:13:30 +00:00
}
}
LOAD :
if _ , err := os . Stat ( fileLAN ) ; err == nil {
config . SerfLANConfig . KeyringFile = fileLAN
}
if err := loadKeyringFile ( config . SerfLANConfig ) ; err != nil {
return err
}
2018-03-27 19:28:05 +00:00
if a . config . ServerMode && federationEnabled {
2014-10-10 18:13:30 +00:00
if _ , err := os . Stat ( fileWAN ) ; err == nil {
config . SerfWANConfig . KeyringFile = fileWAN
}
if err := loadKeyringFile ( config . SerfWANConfig ) ; err != nil {
return err
}
}
2020-02-13 19:35:09 +00:00
// Only perform the following checks if there was an encrypt_key
// provided in the configuration.
if a . config . EncryptKey != "" {
msg := " keyring doesn't include key provided with -encrypt, using keyring"
if existingLANKeyring &&
keyringIsMissingKey (
config . SerfLANConfig . MemberlistConfig . Keyring ,
a . config . EncryptKey ,
) {
a . logger . Warn ( msg , "keyring" , "LAN" )
}
if existingWANKeyring &&
keyringIsMissingKey (
config . SerfWANConfig . MemberlistConfig . Keyring ,
a . config . EncryptKey ,
) {
a . logger . Warn ( msg , "keyring" , "WAN" )
}
}
2014-10-10 18:13:30 +00:00
return nil
}
2017-09-07 19:17:20 +00:00
// setupKeyrings is used to initialize and load keyrings during agent startup.
func ( a * Agent ) setupKeyrings ( config * consul . Config ) error {
// First set up the LAN and WAN keyrings.
if err := a . setupBaseKeyrings ( config ) ; err != nil {
return err
}
// If there's no LAN keyring then there's nothing else to set up for
// any segments.
lanKeyring := config . SerfLANConfig . MemberlistConfig . Keyring
if lanKeyring == nil {
return nil
}
// Copy the initial state of the LAN keyring into each segment config.
// Segments don't have their own keyring file, they rely on the LAN
// holding the state so things can't get out of sync.
k , pk := lanKeyring . GetKeys ( ) , lanKeyring . GetPrimaryKey ( )
for _ , segment := range config . Segments {
keyring , err := memberlist . NewKeyring ( k , pk )
if err != nil {
return err
}
segment . SerfConfig . MemberlistConfig . Keyring = keyring
}
return nil
}
2017-06-19 14:36:09 +00:00
// registerEndpoint registers a handler for the consul RPC server
2017-06-16 07:54:09 +00:00
// under a unique name while making it accessible under the provided
// name. This allows overwriting handlers for the golang net/rpc
// service which does not allow this.
2017-06-19 14:36:09 +00:00
func ( a * Agent ) registerEndpoint ( name string , handler interface { } ) error {
2017-06-16 07:54:09 +00:00
srv , ok := a . delegate . ( * consul . Server )
if ! ok {
panic ( "agent must be a server" )
}
realname := fmt . Sprintf ( "%s-%d" , name , time . Now ( ) . UnixNano ( ) )
a . endpointsLock . Lock ( )
a . endpoints [ name ] = realname
a . endpointsLock . Unlock ( )
return srv . RegisterEndpoint ( realname , handler )
}
2013-12-20 23:33:13 +00:00
// RPC is used to make an RPC call to the Consul servers
// This allows the agent to implement the Consul.Interface
func ( a * Agent ) RPC ( method string , args interface { } , reply interface { } ) error {
2017-08-10 01:51:55 +00:00
a . endpointsLock . RLock ( )
2017-06-16 07:54:09 +00:00
// fast path: only translate if there are overrides
if len ( a . endpoints ) > 0 {
p := strings . SplitN ( method , "." , 2 )
if e := a . endpoints [ p [ 0 ] ] ; e != "" {
method = e + "." + p [ 1 ]
}
}
2017-08-10 01:51:55 +00:00
a . endpointsLock . RUnlock ( )
2017-05-15 14:05:17 +00:00
return a . delegate . RPC ( method , args , reply )
2013-12-20 23:33:13 +00:00
}
2014-04-18 05:46:31 +00:00
// Leave is used to prepare the agent for a graceful shutdown
2013-12-20 01:14:46 +00:00
func ( a * Agent ) Leave ( ) error {
2017-05-15 14:05:17 +00:00
return a . delegate . Leave ( )
2013-12-20 01:14:46 +00:00
}
2017-06-20 07:29:20 +00:00
// ShutdownAgent is used to hard stop the agent. Should be preceded by
// Leave to do it gracefully. Should be followed by ShutdownEndpoints to
// terminate the HTTP and DNS servers as well.
func ( a * Agent ) ShutdownAgent ( ) error {
2013-12-21 00:39:32 +00:00
a . shutdownLock . Lock ( )
defer a . shutdownLock . Unlock ( )
if a . shutdown {
return nil
}
2020-01-28 23:50:41 +00:00
a . logger . Info ( "Requesting shutdown" )
2020-05-26 08:01:49 +00:00
// Stop the watches to avoid any notification/state change during shutdown
a . stopAllWatches ( )
2017-05-19 09:53:41 +00:00
2019-09-24 15:04:48 +00:00
// Stop the service manager (must happen before we take the stateLock to avoid deadlock)
if a . serviceManager != nil {
a . serviceManager . Stop ( )
}
2014-01-21 20:05:56 +00:00
// Stop all the checks
2019-03-04 14:34:05 +00:00
a . stateLock . Lock ( )
defer a . stateLock . Unlock ( )
2014-01-21 20:05:56 +00:00
for _ , chk := range a . checkMonitors {
chk . Stop ( )
}
for _ , chk := range a . checkTTLs {
chk . Stop ( )
}
2015-01-09 22:43:24 +00:00
for _ , chk := range a . checkHTTPs {
chk . Stop ( )
}
2015-07-23 11:45:08 +00:00
for _ , chk := range a . checkTCPs {
chk . Stop ( )
}
2017-12-27 04:35:22 +00:00
for _ , chk := range a . checkGRPCs {
chk . Stop ( )
}
2017-07-18 18:57:27 +00:00
for _ , chk := range a . checkDockers {
chk . Stop ( )
}
2018-06-30 13:38:56 +00:00
for _ , chk := range a . checkAliases {
chk . Stop ( )
}
2015-07-23 11:45:08 +00:00
2018-10-03 19:37:53 +00:00
// Stop gRPC
if a . grpcServer != nil {
a . grpcServer . Stop ( )
}
// Stop the proxy config manager
if a . proxyConfig != nil {
a . proxyConfig . Close ( )
}
2018-10-04 10:27:11 +00:00
// Stop the cache background work
if a . cache != nil {
a . cache . Close ( )
}
2017-05-22 21:59:54 +00:00
var err error
if a . delegate != nil {
err = a . delegate . Shutdown ( )
2017-05-23 10:15:25 +00:00
if _ , ok := a . delegate . ( * consul . Server ) ; ok {
2020-01-28 23:50:41 +00:00
a . logger . Info ( "consul server down" )
2017-05-23 10:15:25 +00:00
} else {
2020-01-28 23:50:41 +00:00
a . logger . Info ( "consul client down" )
2017-05-23 10:15:25 +00:00
}
2017-05-22 21:59:54 +00:00
}
2013-12-21 00:39:32 +00:00
2014-05-06 16:57:53 +00:00
pidErr := a . deletePid ( )
if pidErr != nil {
2020-01-28 23:50:41 +00:00
a . logger . Warn ( "could not delete pid file" , "error" , pidErr )
2014-05-06 16:57:53 +00:00
}
2014-05-06 03:29:50 +00:00
2020-01-28 23:50:41 +00:00
a . logger . Info ( "shutdown complete" )
2013-12-21 00:39:32 +00:00
a . shutdown = true
2017-06-19 19:34:08 +00:00
close ( a . shutdownCh )
2013-12-21 00:39:32 +00:00
return err
2017-06-20 07:29:20 +00:00
}
// ShutdownEndpoints terminates the HTTP and DNS servers. Should be
2018-03-19 16:56:00 +00:00
// preceded by ShutdownAgent.
2017-06-20 07:29:20 +00:00
func ( a * Agent ) ShutdownEndpoints ( ) {
a . shutdownLock . Lock ( )
defer a . shutdownLock . Unlock ( )
2018-03-29 13:45:46 +00:00
if len ( a . dnsServers ) == 0 && len ( a . httpServers ) == 0 {
2017-06-20 07:29:20 +00:00
return
}
for _ , srv := range a . dnsServers {
2019-08-27 15:45:05 +00:00
if srv . Server != nil {
2020-01-28 23:50:41 +00:00
a . logger . Info ( "Stopping server" ,
"protocol" , "DNS" ,
"address" , srv . Server . Addr ,
"network" , srv . Server . Net ,
)
2019-08-27 15:45:05 +00:00
srv . Shutdown ( )
}
2017-06-20 07:29:20 +00:00
}
a . dnsServers = nil
for _ , srv := range a . httpServers {
2020-01-28 23:50:41 +00:00
a . logger . Info ( "Stopping server" ,
"protocol" , strings . ToUpper ( srv . proto ) ,
"address" , srv . ln . Addr ( ) . String ( ) ,
"network" , srv . ln . Addr ( ) . Network ( ) ,
)
2017-06-20 07:29:20 +00:00
ctx , cancel := context . WithTimeout ( context . Background ( ) , time . Second )
defer cancel ( )
srv . Shutdown ( ctx )
if ctx . Err ( ) == context . DeadlineExceeded {
2020-01-28 23:50:41 +00:00
a . logger . Warn ( "Timeout stopping server" ,
"protocol" , strings . ToUpper ( srv . proto ) ,
"address" , srv . ln . Addr ( ) . String ( ) ,
"network" , srv . ln . Addr ( ) . Network ( ) ,
)
2017-06-20 07:29:20 +00:00
}
}
a . httpServers = nil
2020-01-28 23:50:41 +00:00
a . logger . Info ( "Waiting for endpoints to shut down" )
2017-06-20 07:29:20 +00:00
a . wgServers . Wait ( )
2020-01-28 23:50:41 +00:00
a . logger . Info ( "Endpoints down" )
2013-12-21 00:39:32 +00:00
}
2017-06-02 09:55:29 +00:00
// RetryJoinCh is a channel that transports errors
// from the retry join process.
func ( a * Agent ) RetryJoinCh ( ) <- chan error {
return a . retryJoinCh
}
2014-04-18 05:46:31 +00:00
// ShutdownCh is used to return a channel that can be
// selected to wait for the agent to perform a shutdown.
2013-12-21 00:39:32 +00:00
func ( a * Agent ) ShutdownCh ( ) <- chan struct { } {
return a . shutdownCh
2013-12-20 01:14:46 +00:00
}
2013-12-30 22:42:41 +00:00
// JoinLAN is used to have the agent join a LAN cluster
func ( a * Agent ) JoinLAN ( addrs [ ] string ) ( n int , err error ) {
2020-01-28 23:50:41 +00:00
a . logger . Info ( "(LAN) joining" , "lan_addresses" , addrs )
2017-05-15 14:05:17 +00:00
n , err = a . delegate . JoinLAN ( addrs )
2019-05-24 14:50:18 +00:00
if err == nil {
2020-01-28 23:50:41 +00:00
a . logger . Info ( "(LAN) joined" , "number_of_nodes" , n )
2019-05-24 14:50:18 +00:00
if a . joinLANNotifier != nil {
if notifErr := a . joinLANNotifier . Notify ( systemd . Ready ) ; notifErr != nil {
2020-01-28 23:50:41 +00:00
a . logger . Debug ( "systemd notify failed" , "error" , notifErr )
2019-05-24 14:50:18 +00:00
}
2017-06-21 04:43:55 +00:00
}
2019-05-24 14:50:18 +00:00
} else {
2020-01-28 23:50:41 +00:00
a . logger . Warn ( "(LAN) couldn't join" ,
"number_of_nodes" , n ,
"error" , err ,
)
2017-06-21 04:43:55 +00:00
}
2013-12-30 22:42:41 +00:00
return
}
// JoinWAN is used to have the agent join a WAN cluster
func ( a * Agent ) JoinWAN ( addrs [ ] string ) ( n int , err error ) {
2020-01-28 23:50:41 +00:00
a . logger . Info ( "(WAN) joining" , "wan_addresses" , addrs )
2017-05-15 14:05:17 +00:00
if srv , ok := a . delegate . ( * consul . Server ) ; ok {
n , err = srv . JoinWAN ( addrs )
2013-12-30 22:42:41 +00:00
} else {
err = fmt . Errorf ( "Must be a server to join WAN cluster" )
}
2019-05-24 14:50:18 +00:00
if err == nil {
2020-01-28 23:50:41 +00:00
a . logger . Info ( "(WAN) joined" , "number_of_nodes" , n )
2019-05-24 14:50:18 +00:00
} else {
2020-01-28 23:50:41 +00:00
a . logger . Warn ( "(WAN) couldn't join" ,
"number_of_nodes" , n ,
"error" , err ,
)
2019-05-24 14:50:18 +00:00
}
2013-12-30 22:42:41 +00:00
return
}
2020-03-09 20:59:02 +00:00
// PrimaryMeshGatewayAddressesReadyCh returns a channel that will be closed
// when federation state replication ships back at least one primary mesh
// gateway (not via fallback config).
func ( a * Agent ) PrimaryMeshGatewayAddressesReadyCh ( ) <- chan struct { } {
if srv , ok := a . delegate . ( * consul . Server ) ; ok {
return srv . PrimaryMeshGatewayAddressesReadyCh ( )
}
return nil
}
// PickRandomMeshGatewaySuitableForDialing is a convenience function used for writing tests.
func ( a * Agent ) PickRandomMeshGatewaySuitableForDialing ( dc string ) string {
if srv , ok := a . delegate . ( * consul . Server ) ; ok {
return srv . PickRandomMeshGatewaySuitableForDialing ( dc )
}
return ""
}
// RefreshPrimaryGatewayFallbackAddresses is used to update the list of current
// fallback addresses for locating mesh gateways in the primary datacenter.
func ( a * Agent ) RefreshPrimaryGatewayFallbackAddresses ( addrs [ ] string ) error {
if srv , ok := a . delegate . ( * consul . Server ) ; ok {
srv . RefreshPrimaryGatewayFallbackAddresses ( addrs )
return nil
}
return fmt . Errorf ( "Must be a server to track mesh gateways in the primary datacenter" )
}
2013-12-30 22:42:41 +00:00
// ForceLeave is used to remove a failed node from the cluster
2019-10-04 21:10:02 +00:00
func ( a * Agent ) ForceLeave ( node string , prune bool ) ( err error ) {
2020-01-28 23:50:41 +00:00
a . logger . Info ( "Force leaving node" , "node" , node )
2019-12-02 19:06:15 +00:00
if ok := a . IsMember ( node ) ; ! ok {
return fmt . Errorf ( "agent: No node found with name '%s'" , node )
}
2019-10-04 21:10:02 +00:00
err = a . delegate . RemoveFailedNode ( node , prune )
2013-12-30 22:42:41 +00:00
if err != nil {
2020-01-28 23:50:41 +00:00
a . logger . Warn ( "Failed to remove node" ,
"node" , node ,
"error" , err ,
)
2013-12-30 22:42:41 +00:00
}
return err
}
2014-05-25 23:59:48 +00:00
// LocalMember is used to return the local node
func ( a * Agent ) LocalMember ( ) serf . Member {
2017-05-15 14:05:17 +00:00
return a . delegate . LocalMember ( )
2014-05-25 23:59:48 +00:00
}
2014-04-18 05:46:31 +00:00
// LANMembers is used to retrieve the LAN members
2013-12-30 22:42:41 +00:00
func ( a * Agent ) LANMembers ( ) [ ] serf . Member {
2017-05-15 14:05:17 +00:00
return a . delegate . LANMembers ( )
2013-12-30 22:42:41 +00:00
}
2014-04-18 05:46:31 +00:00
// WANMembers is used to retrieve the WAN members
2013-12-30 22:42:41 +00:00
func ( a * Agent ) WANMembers ( ) [ ] serf . Member {
2017-05-15 14:05:17 +00:00
if srv , ok := a . delegate . ( * consul . Server ) ; ok {
return srv . WANMembers ( )
2013-12-30 22:42:41 +00:00
}
2017-04-21 01:59:42 +00:00
return nil
2013-12-30 22:42:41 +00:00
}
2014-01-21 19:52:25 +00:00
2019-12-02 19:06:15 +00:00
// IsMember is used to check if a node with the given nodeName
// is a member
func ( a * Agent ) IsMember ( nodeName string ) bool {
for _ , m := range a . LANMembers ( ) {
if m . Name == nodeName {
return true
}
}
return false
}
2014-01-21 19:52:25 +00:00
// StartSync is called once Services and Checks are registered.
// This is called to prevent a race between clients and the anti-entropy routines
func ( a * Agent ) StartSync ( ) {
2017-08-28 12:17:09 +00:00
go a . sync . Run ( )
2020-01-28 23:50:41 +00:00
a . logger . Info ( "started state syncer" )
2014-01-21 19:52:25 +00:00
}
2014-01-30 21:39:02 +00:00
2018-09-27 14:00:51 +00:00
// PauseSync is used to pause anti-entropy while bulk changes are made. It also
// sets state that agent-local watches use to "ride out" config reloads and bulk
// updates which might spuriously unload state and reload it again.
2014-02-07 20:19:56 +00:00
func ( a * Agent ) PauseSync ( ) {
2018-09-27 14:00:51 +00:00
// Do this outside of lock as it has it's own locking
2017-08-28 12:17:09 +00:00
a . sync . Pause ( )
2018-09-27 14:00:51 +00:00
// Coordinate local state watchers
a . syncMu . Lock ( )
defer a . syncMu . Unlock ( )
if a . syncCh == nil {
a . syncCh = make ( chan struct { } )
}
2014-02-07 20:19:56 +00:00
}
2014-04-18 05:46:31 +00:00
// ResumeSync is used to unpause anti-entropy after bulk changes are make
2014-02-07 20:19:56 +00:00
func ( a * Agent ) ResumeSync ( ) {
2018-09-27 14:00:51 +00:00
// a.sync maintains a stack/ref count of Pause calls since we call
// Pause/Resume in nested way during a reload and AddService. We only want to
// trigger local state watchers if this Resume call actually started sync back
// up again (i.e. was the last resume on the stack). We could check that
// separately with a.sync.Paused but that is racey since another Pause call
// might be made between our Resume and checking Paused.
resumed := a . sync . Resume ( )
if ! resumed {
// Return early so we don't notify local watchers until we are actually
// resumed.
return
}
// Coordinate local state watchers
a . syncMu . Lock ( )
defer a . syncMu . Unlock ( )
if a . syncCh != nil {
close ( a . syncCh )
a . syncCh = nil
}
}
2020-01-27 19:54:32 +00:00
// SyncPausedCh returns either a channel or nil. If nil sync is not paused. If
2018-09-27 14:00:51 +00:00
// non-nil, the channel will be closed when sync resumes.
2019-09-26 02:55:52 +00:00
func ( a * Agent ) SyncPausedCh ( ) <- chan struct { } {
2018-09-27 14:00:51 +00:00
a . syncMu . Lock ( )
defer a . syncMu . Unlock ( )
return a . syncCh
2014-02-07 20:19:56 +00:00
}
2017-08-14 14:36:07 +00:00
// GetLANCoordinate returns the coordinates of this node in the local pools
// (assumes coordinates are enabled, so check that before calling).
func ( a * Agent ) GetLANCoordinate ( ) ( lib . CoordinateSet , error ) {
2017-05-15 14:05:17 +00:00
return a . delegate . GetLANCoordinate ( )
2015-10-16 02:28:31 +00:00
}
2015-06-06 03:31:33 +00:00
// sendCoordinate is a long-running loop that periodically sends our coordinate
// to the server. Closing the agent's shutdownChannel will cause this to exit.
func ( a * Agent ) sendCoordinate ( ) {
2017-08-14 14:36:07 +00:00
OUTER :
2015-04-15 23:12:45 +00:00
for {
2015-06-30 19:02:05 +00:00
rate := a . config . SyncCoordinateRateTarget
min := a . config . SyncCoordinateIntervalMin
2016-01-29 19:42:34 +00:00
intv := lib . RateScaledInterval ( rate , min , len ( a . LANMembers ( ) ) )
intv = intv + lib . RandomStagger ( intv )
2015-06-06 03:31:33 +00:00
2015-04-15 23:12:45 +00:00
select {
2015-04-29 01:47:41 +00:00
case <- time . After ( intv ) :
2015-10-27 21:30:29 +00:00
members := a . LANMembers ( )
grok , err := consul . CanServersUnderstandProtocol ( members , 3 )
if err != nil {
2020-01-28 23:50:41 +00:00
a . logger . Error ( "Failed to check servers" , "error" , err )
2015-10-27 21:30:29 +00:00
continue
}
if ! grok {
2020-01-28 23:50:41 +00:00
a . logger . Debug ( "Skipping coordinate updates until servers are upgraded" )
2015-10-16 02:28:31 +00:00
continue
}
2017-08-14 14:36:07 +00:00
cs , err := a . GetLANCoordinate ( )
2015-10-27 21:30:29 +00:00
if err != nil {
2020-01-28 23:50:41 +00:00
a . logger . Error ( "Failed to get coordinate" , "error" , err )
2015-06-29 22:53:29 +00:00
continue
}
2017-08-14 14:36:07 +00:00
for segment , coord := range cs {
2020-01-27 19:54:32 +00:00
agentToken := a . tokens . AgentToken ( )
2017-08-14 14:36:07 +00:00
req := structs . CoordinateUpdateRequest {
Datacenter : a . config . Datacenter ,
Node : a . config . NodeName ,
Segment : segment ,
Coord : coord ,
2020-01-27 19:54:32 +00:00
WriteRequest : structs . WriteRequest { Token : agentToken } ,
2017-08-14 14:36:07 +00:00
}
var reply struct { }
2020-01-27 19:54:32 +00:00
// todo(kit) port all of these logger calls to hclog w/ loglevel configuration
// todo(kit) handle acl.ErrNotFound cases here in the future
2017-08-14 14:36:07 +00:00
if err := a . RPC ( "Coordinate.Update" , & req , & reply ) ; err != nil {
if acl . IsErrPermissionDenied ( err ) {
2020-01-27 19:54:32 +00:00
accessorID := a . aclAccessorID ( agentToken )
2020-01-29 17:16:08 +00:00
a . logger . Warn ( "Coordinate update blocked by ACLs" , "accessorID" , accessorID )
2017-08-14 14:36:07 +00:00
} else {
2020-01-28 23:50:41 +00:00
a . logger . Error ( "Coordinate update error" , "error" , err )
2017-08-14 14:36:07 +00:00
}
continue OUTER
2017-07-14 05:33:47 +00:00
}
2015-04-15 23:12:45 +00:00
}
2015-04-19 00:49:49 +00:00
case <- a . shutdownCh :
2015-04-15 23:12:45 +00:00
return
}
2015-04-13 20:45:42 +00:00
}
2015-04-09 20:23:14 +00:00
}
2016-08-16 19:52:30 +00:00
// reapServicesInternal does a single pass, looking for services to reap.
func ( a * Agent ) reapServicesInternal ( ) {
2019-12-10 02:26:41 +00:00
reaped := make ( map [ structs . ServiceID ] bool )
for checkID , cs := range a . State . CriticalCheckStates ( structs . WildcardEnterpriseMeta ( ) ) {
serviceID := cs . Check . CompoundServiceID ( )
2017-08-28 12:17:12 +00:00
2016-08-16 19:52:30 +00:00
// There's nothing to do if there's no service.
2019-12-10 02:26:41 +00:00
if serviceID . ID == "" {
2016-08-16 19:52:30 +00:00
continue
}
2016-08-16 07:05:55 +00:00
2016-08-16 19:52:30 +00:00
// There might be multiple checks for one service, so
// we don't need to reap multiple times.
2017-08-28 12:17:12 +00:00
if reaped [ serviceID ] {
2016-08-16 19:52:30 +00:00
continue
}
2016-08-16 07:05:55 +00:00
2016-08-16 19:52:30 +00:00
// See if there's a timeout.
2018-03-19 16:56:00 +00:00
// todo(fs): this looks fishy... why is there another data structure in the agent with its own lock?
2019-03-04 14:34:05 +00:00
a . stateLock . Lock ( )
2017-08-28 12:17:12 +00:00
timeout := a . checkReapAfter [ checkID ]
2019-03-04 14:34:05 +00:00
a . stateLock . Unlock ( )
2016-08-16 19:52:30 +00:00
// Reap, if necessary. We keep track of which service
// this is so that we won't try to remove it again.
2017-08-28 12:17:12 +00:00
if timeout > 0 && cs . CriticalFor ( ) > timeout {
reaped [ serviceID ] = true
2019-09-24 15:04:48 +00:00
if err := a . RemoveService ( serviceID ) ; err != nil {
2020-01-28 23:50:41 +00:00
a . logger . Error ( "unable to deregister service after check has been critical for too long" ,
"service" , serviceID . String ( ) ,
"check" , checkID . String ( ) ,
"error" , err )
2019-03-04 14:34:05 +00:00
} else {
2020-01-28 23:50:41 +00:00
a . logger . Info ( "Check for service has been critical for too long; deregistered service" ,
"service" , serviceID . String ( ) ,
"check" , checkID . String ( ) ,
)
2019-03-04 14:34:05 +00:00
}
2016-08-16 07:05:55 +00:00
}
}
2016-08-16 19:52:30 +00:00
}
2016-08-16 07:05:55 +00:00
2016-08-16 19:52:30 +00:00
// reapServices is a long running goroutine that looks for checks that have been
2017-10-26 02:17:41 +00:00
// critical too long and deregisters their associated services.
2016-08-16 19:52:30 +00:00
func ( a * Agent ) reapServices ( ) {
2016-08-16 07:05:55 +00:00
for {
select {
case <- time . After ( a . config . CheckReapInterval ) :
2016-08-16 19:52:30 +00:00
a . reapServicesInternal ( )
2016-08-16 07:05:55 +00:00
case <- a . shutdownCh :
return
}
}
}
2017-06-15 16:46:06 +00:00
// persistedService is used to wrap a service definition and bundle it
// with an ACL token so we can restore both at a later agent start.
type persistedService struct {
Token string
Service * structs . NodeService
2019-09-24 15:04:48 +00:00
Source string
2017-06-15 16:46:06 +00:00
}
2014-11-24 08:36:03 +00:00
// persistService saves a service definition to a JSON file in the data dir
2019-09-24 15:04:48 +00:00
func ( a * Agent ) persistService ( service * structs . NodeService , source configSource ) error {
2019-12-10 02:26:41 +00:00
svcID := service . CompoundServiceID ( )
svcPath := filepath . Join ( a . config . DataDir , servicesDir , svcID . StringHash ( ) )
2016-11-07 18:51:03 +00:00
2015-05-06 05:08:03 +00:00
wrapped := persistedService {
2019-12-10 02:26:41 +00:00
Token : a . State . ServiceToken ( service . CompoundServiceID ( ) ) ,
2015-05-06 05:08:03 +00:00
Service : service ,
2019-09-24 15:04:48 +00:00
Source : source . String ( ) ,
2015-05-06 05:08:03 +00:00
}
encoded , err := json . Marshal ( wrapped )
if err != nil {
2016-04-26 22:03:26 +00:00
return err
2015-05-06 05:08:03 +00:00
}
2016-11-07 18:51:03 +00:00
2018-05-03 20:56:42 +00:00
return file . WriteAtomic ( svcPath , encoded )
2014-11-24 08:36:03 +00:00
}
// purgeService removes a persisted service definition file from the data dir
2019-12-10 02:26:41 +00:00
func ( a * Agent ) purgeService ( serviceID structs . ServiceID ) error {
svcPath := filepath . Join ( a . config . DataDir , servicesDir , serviceID . StringHash ( ) )
2014-11-24 08:36:03 +00:00
if _ , err := os . Stat ( svcPath ) ; err == nil {
return os . Remove ( svcPath )
}
return nil
}
// persistCheck saves a check definition to the local agent's state directory
2019-09-24 15:04:48 +00:00
func ( a * Agent ) persistCheck ( check * structs . HealthCheck , chkType * structs . CheckType , source configSource ) error {
2019-12-10 02:26:41 +00:00
cid := check . CompoundCheckID ( )
checkPath := filepath . Join ( a . config . DataDir , checksDir , cid . StringHash ( ) )
2014-11-29 20:25:01 +00:00
// Create the persisted check
2015-04-28 19:44:46 +00:00
wrapped := persistedCheck {
Check : check ,
ChkType : chkType ,
2019-12-10 02:26:41 +00:00
Token : a . State . CheckToken ( check . CompoundCheckID ( ) ) ,
2019-09-24 15:04:48 +00:00
Source : source . String ( ) ,
2015-04-28 19:44:46 +00:00
}
2014-11-29 20:25:01 +00:00
2015-04-28 19:44:46 +00:00
encoded , err := json . Marshal ( wrapped )
2014-11-29 20:25:01 +00:00
if err != nil {
2016-04-26 22:03:26 +00:00
return err
2014-11-29 20:25:01 +00:00
}
2016-11-07 18:51:03 +00:00
2018-05-03 20:56:42 +00:00
return file . WriteAtomic ( checkPath , encoded )
2014-11-24 08:36:03 +00:00
}
// purgeCheck removes a persisted check definition file from the data dir
2019-12-10 02:26:41 +00:00
func ( a * Agent ) purgeCheck ( checkID structs . CheckID ) error {
checkPath := filepath . Join ( a . config . DataDir , checksDir , checkID . StringHash ( ) )
2014-11-24 08:36:03 +00:00
if _ , err := os . Stat ( checkPath ) ; err == nil {
return os . Remove ( checkPath )
}
return nil
}
2019-09-24 15:04:48 +00:00
// persistedServiceConfig is used to serialize the resolved service config that
// feeds into the ServiceManager at registration time so that it may be
// restored later on.
type persistedServiceConfig struct {
ServiceID string
Defaults * structs . ServiceConfigResponse
2019-12-10 02:26:41 +00:00
structs . EnterpriseMeta
2019-09-24 15:04:48 +00:00
}
2019-12-10 02:26:41 +00:00
func ( a * Agent ) persistServiceConfig ( serviceID structs . ServiceID , defaults * structs . ServiceConfigResponse ) error {
2019-09-24 15:04:48 +00:00
// Create the persisted config.
wrapped := persistedServiceConfig {
2019-12-10 02:26:41 +00:00
ServiceID : serviceID . ID ,
Defaults : defaults ,
EnterpriseMeta : serviceID . EnterpriseMeta ,
2019-09-24 15:04:48 +00:00
}
encoded , err := json . Marshal ( wrapped )
if err != nil {
return err
}
dir := filepath . Join ( a . config . DataDir , serviceConfigDir )
2019-12-10 02:26:41 +00:00
configPath := filepath . Join ( dir , serviceID . StringHash ( ) )
2019-09-24 15:04:48 +00:00
// Create the config dir if it doesn't exist
if err := os . MkdirAll ( dir , 0700 ) ; err != nil {
return fmt . Errorf ( "failed creating service configs dir %q: %s" , dir , err )
}
return file . WriteAtomic ( configPath , encoded )
}
2019-12-10 02:26:41 +00:00
func ( a * Agent ) purgeServiceConfig ( serviceID structs . ServiceID ) error {
configPath := filepath . Join ( a . config . DataDir , serviceConfigDir , serviceID . StringHash ( ) )
2019-09-24 15:04:48 +00:00
if _ , err := os . Stat ( configPath ) ; err == nil {
return os . Remove ( configPath )
}
return nil
}
2019-12-10 02:26:41 +00:00
func ( a * Agent ) readPersistedServiceConfigs ( ) ( map [ structs . ServiceID ] * structs . ServiceConfigResponse , error ) {
out := make ( map [ structs . ServiceID ] * structs . ServiceConfigResponse )
2019-09-24 15:04:48 +00:00
configDir := filepath . Join ( a . config . DataDir , serviceConfigDir )
files , err := ioutil . ReadDir ( configDir )
if err != nil {
if os . IsNotExist ( err ) {
return nil , nil
}
return nil , fmt . Errorf ( "Failed reading service configs dir %q: %s" , configDir , err )
}
for _ , fi := range files {
// Skip all dirs
if fi . IsDir ( ) {
continue
}
// Skip all partially written temporary files
if strings . HasSuffix ( fi . Name ( ) , "tmp" ) {
2020-01-28 23:50:41 +00:00
a . logger . Warn ( "Ignoring temporary service config file" , "file" , fi . Name ( ) )
2019-09-24 15:04:48 +00:00
continue
}
// Read the contents into a buffer
file := filepath . Join ( configDir , fi . Name ( ) )
buf , err := ioutil . ReadFile ( file )
if err != nil {
return nil , fmt . Errorf ( "failed reading service config file %q: %s" , file , err )
}
// Try decoding the service config definition
var p persistedServiceConfig
if err := json . Unmarshal ( buf , & p ) ; err != nil {
2020-01-28 23:50:41 +00:00
a . logger . Error ( "Failed decoding service config file" ,
"file" , file ,
"error" , err ,
)
2019-09-24 15:04:48 +00:00
continue
}
2019-12-10 02:26:41 +00:00
out [ structs . NewServiceID ( p . ServiceID , & p . EnterpriseMeta ) ] = p . Defaults
2019-09-24 15:04:48 +00:00
}
return out , nil
}
2019-09-02 15:38:29 +00:00
// AddServiceAndReplaceChecks is used to add a service entry and its check. Any check for this service missing from chkTypes will be deleted.
// This entry is persistent and the agent will make a best effort to
// ensure it is registered
func ( a * Agent ) AddServiceAndReplaceChecks ( service * structs . NodeService , chkTypes [ ] * structs . CheckType , persist bool , token string , source configSource ) error {
a . stateLock . Lock ( )
defer a . stateLock . Unlock ( )
2019-09-24 15:04:48 +00:00
return a . addServiceLocked ( & addServiceRequest {
service : service ,
chkTypes : chkTypes ,
previousDefaults : nil ,
waitForCentralConfig : true ,
persist : persist ,
persistServiceConfig : true ,
token : token ,
replaceExistingChecks : true ,
source : source ,
2020-03-09 11:59:41 +00:00
} , a . snapshotCheckState ( ) )
2019-09-02 15:38:29 +00:00
}
2014-01-30 21:39:02 +00:00
// AddService is used to add a service entry.
// This entry is persistent and the agent will make a best effort to
// ensure it is registered
2018-10-11 12:22:11 +00:00
func ( a * Agent ) AddService ( service * structs . NodeService , chkTypes [ ] * structs . CheckType , persist bool , token string , source configSource ) error {
2019-03-04 14:34:05 +00:00
a . stateLock . Lock ( )
defer a . stateLock . Unlock ( )
2019-09-24 15:04:48 +00:00
return a . addServiceLocked ( & addServiceRequest {
service : service ,
chkTypes : chkTypes ,
previousDefaults : nil ,
waitForCentralConfig : true ,
persist : persist ,
persistServiceConfig : true ,
token : token ,
replaceExistingChecks : false ,
source : source ,
2020-03-09 11:59:41 +00:00
} , a . snapshotCheckState ( ) )
2019-03-04 14:34:05 +00:00
}
2019-04-24 13:11:08 +00:00
// addServiceLocked adds a service entry to the service manager if enabled, or directly
// to the local state if it is not. This function assumes the state lock is already held.
2020-03-09 11:59:41 +00:00
func ( a * Agent ) addServiceLocked ( req * addServiceRequest , snap map [ structs . CheckID ] * structs . HealthCheck ) error {
2019-09-24 15:04:48 +00:00
req . fixupForAddServiceLocked ( )
2019-12-10 02:26:41 +00:00
req . service . EnterpriseMeta . Normalize ( )
2019-09-24 15:04:48 +00:00
if err := a . validateService ( req . service , req . chkTypes ) ; err != nil {
2019-04-23 06:39:02 +00:00
return err
2019-01-08 10:13:49 +00:00
}
2019-04-24 13:11:08 +00:00
if a . config . EnableCentralServiceConfig {
2019-09-24 15:04:48 +00:00
return a . serviceManager . AddService ( req )
2015-02-09 17:22:51 +00:00
}
2019-09-24 15:04:48 +00:00
// previousDefaults are ignored here because they are only relevant for central config.
req . persistService = nil
req . persistDefaults = nil
req . persistServiceConfig = false
2020-03-09 11:59:41 +00:00
return a . addServiceInternal ( req , snap )
2019-09-24 15:04:48 +00:00
}
// addServiceRequest is the union of arguments for calling both
// addServiceLocked and addServiceInternal. The overlap was significant enough
// to warrant merging them and indicating which fields are meant to be set only
// in one of the two contexts.
//
// Before using the request struct one of the fixupFor*() methods should be
// invoked to clear irrelevant fields.
//
// The ServiceManager.AddService signature is largely just a passthrough for
// addServiceLocked and should be treated as such.
type addServiceRequest struct {
service * structs . NodeService
chkTypes [ ] * structs . CheckType
previousDefaults * structs . ServiceConfigResponse // just for: addServiceLocked
waitForCentralConfig bool // just for: addServiceLocked
persistService * structs . NodeService // just for: addServiceInternal
persistDefaults * structs . ServiceConfigResponse // just for: addServiceInternal
persist bool
persistServiceConfig bool
token string
replaceExistingChecks bool
source configSource
}
func ( r * addServiceRequest ) fixupForAddServiceLocked ( ) {
r . persistService = nil
r . persistDefaults = nil
}
func ( r * addServiceRequest ) fixupForAddServiceInternal ( ) {
r . previousDefaults = nil
r . waitForCentralConfig = false
2019-04-23 06:39:02 +00:00
}
2015-02-09 17:30:06 +00:00
2019-04-24 13:11:08 +00:00
// addServiceInternal adds the given service and checks to the local state.
2020-03-09 11:59:41 +00:00
func ( a * Agent ) addServiceInternal ( req * addServiceRequest , snap map [ structs . CheckID ] * structs . HealthCheck ) error {
2019-09-24 15:04:48 +00:00
req . fixupForAddServiceInternal ( )
var (
service = req . service
chkTypes = req . chkTypes
persistService = req . persistService
persistDefaults = req . persistDefaults
persist = req . persist
persistServiceConfig = req . persistServiceConfig
token = req . token
replaceExistingChecks = req . replaceExistingChecks
source = req . source
)
2015-05-06 19:28:42 +00:00
// Pause the service syncs during modification
a . PauseSync ( )
defer a . ResumeSync ( )
2020-01-17 14:54:17 +00:00
// Set default tagged addresses
serviceIP := net . ParseIP ( service . Address )
serviceAddressIs4 := serviceIP != nil && serviceIP . To4 ( ) != nil
serviceAddressIs6 := serviceIP != nil && serviceIP . To4 ( ) == nil
if service . TaggedAddresses == nil {
service . TaggedAddresses = map [ string ] structs . ServiceAddress { }
}
if _ , ok := service . TaggedAddresses [ structs . TaggedAddressLANIPv4 ] ; ! ok && serviceAddressIs4 {
service . TaggedAddresses [ structs . TaggedAddressLANIPv4 ] = structs . ServiceAddress { Address : service . Address , Port : service . Port }
}
if _ , ok := service . TaggedAddresses [ structs . TaggedAddressWANIPv4 ] ; ! ok && serviceAddressIs4 {
service . TaggedAddresses [ structs . TaggedAddressWANIPv4 ] = structs . ServiceAddress { Address : service . Address , Port : service . Port }
}
if _ , ok := service . TaggedAddresses [ structs . TaggedAddressLANIPv6 ] ; ! ok && serviceAddressIs6 {
service . TaggedAddresses [ structs . TaggedAddressLANIPv6 ] = structs . ServiceAddress { Address : service . Address , Port : service . Port }
}
if _ , ok := service . TaggedAddresses [ structs . TaggedAddressWANIPv6 ] ; ! ok && serviceAddressIs6 {
service . TaggedAddresses [ structs . TaggedAddressWANIPv6 ] = structs . ServiceAddress { Address : service . Address , Port : service . Port }
}
2019-03-04 14:34:05 +00:00
var checks [ ] * structs . HealthCheck
2014-11-24 08:36:03 +00:00
2019-12-10 02:26:41 +00:00
// all the checks must be associated with the same enterprise meta of the service
// so this map can just use the main CheckID for indexing
existingChecks := map [ structs . CheckID ] bool { }
for _ , check := range a . State . ChecksForService ( service . CompoundServiceID ( ) , false ) {
existingChecks [ check . CompoundCheckID ( ) ] = false
2019-09-02 15:38:29 +00:00
}
2014-01-30 21:39:02 +00:00
// Create an associated health check
2015-01-14 01:52:17 +00:00
for i , chkType := range chkTypes {
2017-05-15 19:49:13 +00:00
checkID := string ( chkType . CheckID )
if checkID == "" {
checkID = fmt . Sprintf ( "service:%s" , service . ID )
if len ( chkTypes ) > 1 {
checkID += fmt . Sprintf ( ":%d" , i + 1 )
}
}
2019-11-14 15:59:06 +00:00
2020-04-15 16:03:29 +00:00
cid := structs . NewCheckID ( types . CheckID ( checkID ) , & service . EnterpriseMeta )
2019-12-10 02:26:41 +00:00
existingChecks [ cid ] = true
2019-11-14 15:59:06 +00:00
2017-05-15 19:49:13 +00:00
name := chkType . Name
if name == "" {
name = fmt . Sprintf ( "Service '%s' check" , service . Service )
2015-01-14 01:52:17 +00:00
}
2014-01-30 21:39:02 +00:00
check := & structs . HealthCheck {
2019-12-10 02:26:41 +00:00
Node : a . config . NodeName ,
CheckID : types . CheckID ( checkID ) ,
Name : name ,
Status : api . HealthCritical ,
Notes : chkType . Notes ,
ServiceID : service . ID ,
ServiceName : service . Service ,
ServiceTags : service . Tags ,
Type : chkType . Type ( ) ,
EnterpriseMeta : service . EnterpriseMeta ,
2014-01-30 21:39:02 +00:00
}
2015-04-12 00:53:48 +00:00
if chkType . Status != "" {
check . Status = chkType . Status
}
2019-03-04 14:34:05 +00:00
2019-07-17 19:06:50 +00:00
// Restore the fields from the snapshot.
2019-12-10 02:26:41 +00:00
prev , ok := snap [ cid ]
2019-07-17 19:06:50 +00:00
if ok {
check . Output = prev . Output
check . Status = prev . Status
}
2019-03-04 14:34:05 +00:00
checks = append ( checks , check )
}
// cleanup, store the ids of services and checks that weren't previously
2019-09-26 02:55:52 +00:00
// registered so we clean them up if something fails halfway through the
2019-03-04 14:34:05 +00:00
// process.
2019-12-10 02:26:41 +00:00
var cleanupServices [ ] structs . ServiceID
var cleanupChecks [ ] structs . CheckID
2019-03-04 14:34:05 +00:00
2019-12-10 02:26:41 +00:00
sid := service . CompoundServiceID ( )
if s := a . State . Service ( sid ) ; s == nil {
cleanupServices = append ( cleanupServices , sid )
2019-03-04 14:34:05 +00:00
}
for _ , check := range checks {
2019-12-10 02:26:41 +00:00
cid := check . CompoundCheckID ( )
if c := a . State . Check ( cid ) ; c == nil {
cleanupChecks = append ( cleanupChecks , cid )
2019-03-04 14:34:05 +00:00
}
}
err := a . State . AddServiceWithChecks ( service , checks , token )
if err != nil {
a . cleanupRegistration ( cleanupServices , cleanupChecks )
return err
}
for i := range checks {
if err := a . addCheck ( checks [ i ] , chkTypes [ i ] , service , persist , token , source ) ; err != nil {
a . cleanupRegistration ( cleanupServices , cleanupChecks )
return err
}
if persist && a . config . DataDir != "" {
2019-09-24 15:04:48 +00:00
if err := a . persistCheck ( checks [ i ] , chkTypes [ i ] , source ) ; err != nil {
2019-03-04 14:34:05 +00:00
a . cleanupRegistration ( cleanupServices , cleanupChecks )
return err
}
}
}
2019-09-26 02:55:52 +00:00
// If a proxy service wishes to expose checks, check targets need to be rerouted to the proxy listener
// This needs to be called after chkTypes are added to the agent, to avoid being overwritten
2020-04-15 16:03:29 +00:00
psid := structs . NewServiceID ( service . Proxy . DestinationServiceID , & service . EnterpriseMeta )
2019-12-10 02:26:41 +00:00
2019-09-26 02:55:52 +00:00
if service . Proxy . Expose . Checks {
agent: rewrite checks with proxy address, not local service address (#7518)
Exposing checks is supposed to allow a Consul agent bound to a different
IP address (e.g., in a different Kubernetes pod) to access healthchecks
through the proxy while the underlying service binds to localhost. This
is an important security feature that makes sure no external traffic
reaches the service except through the proxy.
However, as far as I can tell, this is subtly broken in the case where
the Consul agent cannot reach the proxy over localhost.
If a proxy is configured with: `{ LocalServiceAddress: "127.0.0.1",
Checks: true }`, as is typical with a sidecar proxy, the Consul checks
are currently rewritten to `127.0.0.1:<random port>`. A Consul agent
that does not share the loopback address cannot reach this address. Just
to make sure I was not misunderstanding, I tried configuring the proxy
with `{ LocalServiceAddress: "<pod ip>", Checks: true }`. In this case,
while the checks are rewritten as expected and the agent can reach the
dynamic port, the proxy can no longer reach its backend because the
traffic is no longer on the loopback interface.
I think rewriting the checks to use `proxy.Address`, the proxy's own
address, is more correct in this case. That is the IP where the proxy
can be reached, both by other proxies and by a Consul agent running on
a different IP. The local service address should continue to use
`127.0.0.1` in most cases.
2020-04-02 07:35:43 +00:00
err := a . rerouteExposedChecks ( psid , service . Address )
2019-09-26 02:55:52 +00:00
if err != nil {
2020-01-28 23:50:41 +00:00
a . logger . Warn ( "failed to reroute L7 checks to exposed proxy listener" )
2019-09-26 02:55:52 +00:00
}
} else {
// Reset check targets if proxy was re-registered but no longer wants to expose checks
// If the proxy is being registered for the first time then this is a no-op
2019-12-10 02:26:41 +00:00
a . resetExposedChecks ( psid )
2019-09-26 02:55:52 +00:00
}
2019-09-24 15:04:48 +00:00
if persistServiceConfig && a . config . DataDir != "" {
var err error
if persistDefaults != nil {
2019-12-10 02:26:41 +00:00
err = a . persistServiceConfig ( service . CompoundServiceID ( ) , persistDefaults )
2019-09-24 15:04:48 +00:00
} else {
2019-12-10 02:26:41 +00:00
err = a . purgeServiceConfig ( service . CompoundServiceID ( ) )
2019-09-24 15:04:48 +00:00
}
if err != nil {
a . cleanupRegistration ( cleanupServices , cleanupChecks )
return err
}
}
2019-03-04 14:34:05 +00:00
// Persist the service to a file
if persist && a . config . DataDir != "" {
2019-09-24 15:04:48 +00:00
if persistService == nil {
persistService = service
}
if err := a . persistService ( persistService , source ) ; err != nil {
2019-03-04 14:34:05 +00:00
a . cleanupRegistration ( cleanupServices , cleanupChecks )
2014-01-30 21:39:02 +00:00
return err
}
}
2018-09-27 13:33:12 +00:00
2019-09-02 15:38:29 +00:00
if replaceExistingChecks {
for checkID , keep := range existingChecks {
if ! keep {
a . removeCheckLocked ( checkID , persist )
}
}
}
2014-01-30 21:39:02 +00:00
return nil
}
2019-04-23 06:39:02 +00:00
// validateService validates an service and its checks, either returning an error or emitting a
// warning based on the nature of the error.
func ( a * Agent ) validateService ( service * structs . NodeService , chkTypes [ ] * structs . CheckType ) error {
if service . Service == "" {
return fmt . Errorf ( "Service name missing" )
}
if service . ID == "" && service . Service != "" {
service . ID = service . Service
}
for _ , check := range chkTypes {
if err := check . Validate ( ) ; err != nil {
return fmt . Errorf ( "Check is not valid: %v" , err )
}
}
// Set default weights if not specified. This is important as it ensures AE
// doesn't consider the service different since it has nil weights.
if service . Weights == nil {
service . Weights = & structs . Weights { Passing : 1 , Warning : 1 }
}
// Warn if the service name is incompatible with DNS
if InvalidDnsRe . MatchString ( service . Service ) {
2020-01-28 23:50:41 +00:00
a . logger . Warn ( "Service name will not be discoverable " +
2019-04-23 06:39:02 +00:00
"via DNS due to invalid characters. Valid characters include " +
2020-01-28 23:50:41 +00:00
"all alpha-numerics and dashes." ,
"service" , service . Service ,
)
2019-04-23 06:39:02 +00:00
} else if len ( service . Service ) > MaxDNSLabelLength {
2020-01-28 23:50:41 +00:00
a . logger . Warn ( "Service name will not be discoverable " +
2019-04-23 06:39:02 +00:00
"via DNS due to it being too long. Valid lengths are between " +
2020-01-28 23:50:41 +00:00
"1 and 63 bytes." ,
"service" , service . Service ,
)
2019-04-23 06:39:02 +00:00
}
// Warn if any tags are incompatible with DNS
for _ , tag := range service . Tags {
if InvalidDnsRe . MatchString ( tag ) {
2020-01-28 23:50:41 +00:00
a . logger . Debug ( "Service tag will not be discoverable " +
2019-04-23 06:39:02 +00:00
"via DNS due to invalid characters. Valid characters include " +
2020-01-28 23:50:41 +00:00
"all alpha-numerics and dashes." ,
"tag" , tag ,
)
2019-04-23 06:39:02 +00:00
} else if len ( tag ) > MaxDNSLabelLength {
2020-01-28 23:50:41 +00:00
a . logger . Debug ( "Service tag will not be discoverable " +
2019-04-23 06:39:02 +00:00
"via DNS due to it being too long. Valid lengths are between " +
2020-01-28 23:50:41 +00:00
"1 and 63 bytes." ,
"tag" , tag ,
)
2019-04-23 06:39:02 +00:00
}
}
2020-01-17 14:54:17 +00:00
// Check IPv4/IPv6 tagged addresses
if service . TaggedAddresses != nil {
if sa , ok := service . TaggedAddresses [ structs . TaggedAddressLANIPv4 ] ; ok {
ip := net . ParseIP ( sa . Address )
if ip == nil || ip . To4 ( ) == nil {
return fmt . Errorf ( "Service tagged address %q must be a valid ipv4 address" , structs . TaggedAddressLANIPv4 )
}
}
if sa , ok := service . TaggedAddresses [ structs . TaggedAddressWANIPv4 ] ; ok {
ip := net . ParseIP ( sa . Address )
if ip == nil || ip . To4 ( ) == nil {
return fmt . Errorf ( "Service tagged address %q must be a valid ipv4 address" , structs . TaggedAddressWANIPv4 )
}
}
if sa , ok := service . TaggedAddresses [ structs . TaggedAddressLANIPv6 ] ; ok {
ip := net . ParseIP ( sa . Address )
if ip == nil || ip . To4 ( ) != nil {
return fmt . Errorf ( "Service tagged address %q must be a valid ipv6 address" , structs . TaggedAddressLANIPv6 )
}
}
if sa , ok := service . TaggedAddresses [ structs . TaggedAddressLANIPv6 ] ; ok {
ip := net . ParseIP ( sa . Address )
if ip == nil || ip . To4 ( ) != nil {
return fmt . Errorf ( "Service tagged address %q must be a valid ipv6 address" , structs . TaggedAddressLANIPv6 )
}
}
}
2019-04-23 06:39:02 +00:00
return nil
}
2019-03-04 14:34:05 +00:00
// cleanupRegistration is called on registration error to ensure no there are no
// leftovers after a partial failure
2019-12-10 02:26:41 +00:00
func ( a * Agent ) cleanupRegistration ( serviceIDs [ ] structs . ServiceID , checksIDs [ ] structs . CheckID ) {
2019-03-04 14:34:05 +00:00
for _ , s := range serviceIDs {
if err := a . State . RemoveService ( s ) ; err != nil {
2020-01-28 23:50:41 +00:00
a . logger . Error ( "failed to remove service during cleanup" ,
"service" , s . String ( ) ,
"error" , err ,
)
2019-03-04 14:34:05 +00:00
}
if err := a . purgeService ( s ) ; err != nil {
2020-01-28 23:50:41 +00:00
a . logger . Error ( "failed to purge service file during cleanup" ,
"service" , s . String ( ) ,
"error" , err ,
)
2019-03-04 14:34:05 +00:00
}
2019-09-24 15:04:48 +00:00
if err := a . purgeServiceConfig ( s ) ; err != nil {
2020-01-28 23:50:41 +00:00
a . logger . Error ( "failed to purge service config file during cleanup" ,
"service" , s ,
"error" , err ,
)
2019-09-24 15:04:48 +00:00
}
2020-01-20 13:01:40 +00:00
if err := a . removeServiceSidecars ( s , true ) ; err != nil {
2020-01-28 23:50:41 +00:00
a . logger . Error ( "service registration: cleanup: failed remove sidecars for" , "service" , s , "error" , err )
2020-01-20 13:01:40 +00:00
}
2019-03-04 14:34:05 +00:00
}
for _ , c := range checksIDs {
a . cancelCheckMonitors ( c )
if err := a . State . RemoveCheck ( c ) ; err != nil {
2020-01-28 23:50:41 +00:00
a . logger . Error ( "failed to remove check during cleanup" ,
"check" , c . String ( ) ,
"error" , err ,
)
2019-03-04 14:34:05 +00:00
}
if err := a . purgeCheck ( c ) ; err != nil {
2020-01-28 23:50:41 +00:00
a . logger . Error ( "failed to purge check file during cleanup" ,
"check" , c . String ( ) ,
"error" , err ,
)
2019-03-04 14:34:05 +00:00
}
}
}
2014-01-30 21:39:02 +00:00
// RemoveService is used to remove a service entry.
// The agent will make a best effort to ensure it is deregistered
2019-12-10 02:26:41 +00:00
func ( a * Agent ) RemoveService ( serviceID structs . ServiceID ) error {
2019-09-24 15:04:48 +00:00
return a . removeService ( serviceID , true )
}
2019-12-10 02:26:41 +00:00
func ( a * Agent ) removeService ( serviceID structs . ServiceID , persist bool ) error {
2019-03-04 14:34:05 +00:00
a . stateLock . Lock ( )
defer a . stateLock . Unlock ( )
return a . removeServiceLocked ( serviceID , persist )
}
// removeServiceLocked is used to remove a service entry.
// The agent will make a best effort to ensure it is deregistered
2019-12-10 02:26:41 +00:00
func ( a * Agent ) removeServiceLocked ( serviceID structs . ServiceID , persist bool ) error {
2015-01-26 16:06:49 +00:00
// Validate ServiceID
2019-12-10 02:26:41 +00:00
if serviceID . ID == "" {
2015-01-26 16:06:49 +00:00
return fmt . Errorf ( "ServiceID missing" )
}
2019-04-25 09:11:07 +00:00
// Shut down the config watch in the service manager if enabled.
if a . config . EnableCentralServiceConfig {
a . serviceManager . RemoveService ( serviceID )
}
2019-04-23 06:39:02 +00:00
2019-09-26 02:55:52 +00:00
// Reset the HTTP check targets if they were exposed through a proxy
// If this is not a proxy or checks were not exposed then this is a no-op
svc := a . State . Service ( serviceID )
if svc != nil {
2020-04-15 16:03:29 +00:00
psid := structs . NewServiceID ( svc . Proxy . DestinationServiceID , & svc . EnterpriseMeta )
2019-12-10 02:26:41 +00:00
a . resetExposedChecks ( psid )
2019-09-26 02:55:52 +00:00
}
2019-12-10 02:26:41 +00:00
checks := a . State . ChecksForService ( serviceID , false )
var checkIDs [ ] structs . CheckID
for id := range checks {
2019-03-04 14:34:05 +00:00
checkIDs = append ( checkIDs , id )
}
2015-09-15 12:22:08 +00:00
// Remove service immediately
2019-03-04 14:34:05 +00:00
if err := a . State . RemoveServiceWithChecks ( serviceID , checkIDs ) ; err != nil {
2020-01-28 23:50:41 +00:00
a . logger . Warn ( "Failed to deregister service" ,
"service" , serviceID . String ( ) ,
"error" , err ,
)
2016-11-09 21:56:54 +00:00
return nil
}
2014-01-30 21:39:02 +00:00
2014-11-24 08:36:03 +00:00
// Remove the service from the data dir
2014-11-26 07:58:02 +00:00
if persist {
if err := a . purgeService ( serviceID ) ; err != nil {
return err
}
2019-09-24 15:04:48 +00:00
if err := a . purgeServiceConfig ( serviceID ) ; err != nil {
return err
}
2014-11-24 08:36:03 +00:00
}
2014-01-30 21:39:02 +00:00
// Deregister any associated health checks
2019-12-10 02:26:41 +00:00
for checkID := range checks {
2019-03-04 14:34:05 +00:00
if err := a . removeCheckLocked ( checkID , persist ) ; err != nil {
2015-01-14 01:52:17 +00:00
return err
}
2015-01-08 06:26:40 +00:00
}
2020-01-28 23:50:41 +00:00
a . logger . Debug ( "removed service" , "service" , serviceID . String ( ) )
2018-09-27 13:33:12 +00:00
// If any Sidecar services exist for the removed service ID, remove them too.
2020-01-20 13:01:40 +00:00
return a . removeServiceSidecars ( serviceID , persist )
}
func ( a * Agent ) removeServiceSidecars ( serviceID structs . ServiceID , persist bool ) error {
2020-04-15 16:03:29 +00:00
sidecarSID := structs . NewServiceID ( a . sidecarServiceID ( serviceID . ID ) , & serviceID . EnterpriseMeta )
2019-12-10 02:26:41 +00:00
if sidecar := a . State . Service ( sidecarSID ) ; sidecar != nil {
2018-09-27 13:33:12 +00:00
// Double check that it's not just an ID collision and we actually added
// this from a sidecar.
if sidecar . LocallyRegisteredAsSidecar {
// Remove it!
2019-12-10 02:26:41 +00:00
err := a . removeServiceLocked ( sidecarSID , persist )
2018-09-27 13:33:12 +00:00
if err != nil {
return err
}
}
}
2015-01-08 06:26:40 +00:00
return nil
2014-01-30 21:39:02 +00:00
}
// AddCheck is used to add a health check to the agent.
// This entry is persistent and the agent will make a best effort to
// ensure it is registered. The Check may include a CheckType which
// is used to automatically update the check status
2018-10-11 12:22:11 +00:00
func ( a * Agent ) AddCheck ( check * structs . HealthCheck , chkType * structs . CheckType , persist bool , token string , source configSource ) error {
2019-03-04 14:34:05 +00:00
a . stateLock . Lock ( )
defer a . stateLock . Unlock ( )
return a . addCheckLocked ( check , chkType , persist , token , source )
}
func ( a * Agent ) addCheckLocked ( check * structs . HealthCheck , chkType * structs . CheckType , persist bool , token string , source configSource ) error {
var service * structs . NodeService
2019-12-10 02:26:41 +00:00
check . EnterpriseMeta . Normalize ( )
2019-03-04 14:34:05 +00:00
if check . ServiceID != "" {
2019-12-10 02:26:41 +00:00
cid := check . CompoundServiceID ( )
service = a . State . Service ( cid )
2019-03-04 14:34:05 +00:00
if service == nil {
2019-12-10 02:26:41 +00:00
return fmt . Errorf ( "ServiceID %q does not exist" , cid . String ( ) )
2019-03-04 14:34:05 +00:00
}
}
2019-12-10 02:26:41 +00:00
// Extra validations
if err := check . Validate ( ) ; err != nil {
return err
}
2019-03-04 14:34:05 +00:00
// snapshot the current state of the health check to avoid potential flapping
2019-12-10 02:26:41 +00:00
cid := check . CompoundCheckID ( )
existing := a . State . Check ( cid )
2019-03-04 14:34:05 +00:00
defer func ( ) {
if existing != nil {
2019-12-10 02:26:41 +00:00
a . State . UpdateCheck ( cid , existing . Status , existing . Output )
2019-03-04 14:34:05 +00:00
}
} ( )
err := a . addCheck ( check , chkType , service , persist , token , source )
if err != nil {
2019-12-10 02:26:41 +00:00
a . State . RemoveCheck ( cid )
2019-03-04 14:34:05 +00:00
return err
}
// Add to the local state for anti-entropy
err = a . State . AddCheck ( check , token )
if err != nil {
return err
}
// Persist the check
if persist && a . config . DataDir != "" {
2019-09-24 15:04:48 +00:00
return a . persistCheck ( check , chkType , source )
2019-03-04 14:34:05 +00:00
}
return nil
}
func ( a * Agent ) addCheck ( check * structs . HealthCheck , chkType * structs . CheckType , service * structs . NodeService , persist bool , token string , source configSource ) error {
2014-01-30 21:39:02 +00:00
if check . CheckID == "" {
return fmt . Errorf ( "CheckID missing" )
}
2017-07-17 18:20:35 +00:00
if chkType != nil {
2017-10-10 23:54:06 +00:00
if err := chkType . Validate ( ) ; err != nil {
return fmt . Errorf ( "Check is not valid: %v" , err )
2017-07-17 18:20:35 +00:00
}
2018-10-11 12:22:11 +00:00
if chkType . IsScript ( ) {
if source == ConfigSourceLocal && ! a . config . EnableLocalScriptChecks {
return fmt . Errorf ( "Scripts are disabled on this agent; to enable, configure 'enable_script_checks' or 'enable_local_script_checks' to true" )
}
if source == ConfigSourceRemote && ! a . config . EnableRemoteScriptChecks {
return fmt . Errorf ( "Scripts are disabled on this agent from remote calls; to enable, configure 'enable_script_checks' to true" )
}
2017-07-17 18:20:35 +00:00
}
2014-01-30 21:39:02 +00:00
}
2015-01-14 01:52:17 +00:00
if check . ServiceID != "" {
2019-03-04 14:34:05 +00:00
check . ServiceName = service . Service
check . ServiceTags = service . Tags
2019-12-10 02:26:41 +00:00
check . EnterpriseMeta = service . EnterpriseMeta
2015-01-14 01:52:17 +00:00
}
2014-01-30 21:39:02 +00:00
// Check if already registered
if chkType != nil {
2019-06-26 15:43:25 +00:00
maxOutputSize := a . config . CheckOutputMaxSize
if maxOutputSize == 0 {
maxOutputSize = checks . DefaultBufSize
}
if chkType . OutputMaxSize > 0 && maxOutputSize > chkType . OutputMaxSize {
maxOutputSize = chkType . OutputMaxSize
}
2019-09-26 02:55:52 +00:00
// Get the address of the proxy for this service if it exists
// Need its config to know whether we should reroute checks to it
var proxy * structs . NodeService
if service != nil {
2019-12-10 02:26:41 +00:00
for _ , svc := range a . State . Services ( & service . EnterpriseMeta ) {
2019-09-26 02:55:52 +00:00
if svc . Proxy . DestinationServiceID == service . ID {
proxy = svc
break
}
}
}
2019-10-14 20:49:49 +00:00
statusHandler := checks . NewStatusHandler ( a . State , a . logger , chkType . SuccessBeforePassing , chkType . FailuresBeforeCritical )
2019-12-10 02:26:41 +00:00
sid := check . CompoundServiceID ( )
cid := check . CompoundCheckID ( )
2019-10-14 20:49:49 +00:00
2017-07-12 14:01:42 +00:00
switch {
case chkType . IsTTL ( ) :
2019-12-10 02:26:41 +00:00
if existing , ok := a . checkTTLs [ cid ] ; ok {
2014-06-17 23:48:19 +00:00
existing . Stop ( )
2019-12-10 02:26:41 +00:00
delete ( a . checkTTLs , cid )
2014-01-30 21:39:02 +00:00
}
2017-10-25 09:18:07 +00:00
ttl := & checks . CheckTTL {
2019-06-26 15:43:25 +00:00
Notify : a . State ,
2019-12-10 02:26:41 +00:00
CheckID : cid ,
ServiceID : sid ,
2019-06-26 15:43:25 +00:00
TTL : chkType . TTL ,
Logger : a . logger ,
OutputMaxSize : maxOutputSize ,
2014-01-30 21:39:02 +00:00
}
2015-06-05 23:17:07 +00:00
// Restore persisted state, if any
2015-06-08 16:35:10 +00:00
if err := a . loadCheckState ( check ) ; err != nil {
2020-01-28 23:50:41 +00:00
a . logger . Warn ( "failed restoring state for check" ,
"check" , cid . String ( ) ,
"error" , err ,
)
2015-06-05 23:17:07 +00:00
}
2014-01-30 21:39:02 +00:00
ttl . Start ( )
2019-12-10 02:26:41 +00:00
a . checkTTLs [ cid ] = ttl
2017-07-12 14:01:42 +00:00
case chkType . IsHTTP ( ) :
2019-12-10 02:26:41 +00:00
if existing , ok := a . checkHTTPs [ cid ] ; ok {
2015-01-09 22:43:24 +00:00
existing . Stop ( )
2019-12-10 02:26:41 +00:00
delete ( a . checkHTTPs , cid )
2015-01-09 22:43:24 +00:00
}
2017-10-25 09:18:07 +00:00
if chkType . Interval < checks . MinInterval {
2020-01-28 23:50:41 +00:00
a . logger . Warn ( "check has interval below minimum" ,
"check" , cid . String ( ) ,
"minimum_interval" , checks . MinInterval ,
)
2017-10-25 09:18:07 +00:00
chkType . Interval = checks . MinInterval
2015-01-09 22:43:24 +00:00
}
2019-03-13 09:29:06 +00:00
tlsClientConfig := a . tlsConfigurator . OutgoingTLSConfigForCheck ( chkType . TLSSkipVerify )
2017-11-08 02:22:09 +00:00
2017-10-25 09:18:07 +00:00
http := & checks . CheckHTTP {
2019-12-10 02:26:41 +00:00
CheckID : cid ,
ServiceID : sid ,
2017-11-08 02:22:09 +00:00
HTTP : chkType . HTTP ,
Header : chkType . Header ,
Method : chkType . Method ,
2020-02-10 16:27:12 +00:00
Body : chkType . Body ,
2017-11-08 02:22:09 +00:00
Interval : chkType . Interval ,
Timeout : chkType . Timeout ,
Logger : a . logger ,
2019-06-26 15:43:25 +00:00
OutputMaxSize : maxOutputSize ,
2017-11-08 02:22:09 +00:00
TLSClientConfig : tlsClientConfig ,
2019-10-14 20:49:49 +00:00
StatusHandler : statusHandler ,
2015-01-09 22:43:24 +00:00
}
2019-09-26 02:55:52 +00:00
if proxy != nil && proxy . Proxy . Expose . Checks {
2019-12-10 02:26:41 +00:00
port , err := a . listenerPortLocked ( sid , cid )
2019-09-26 02:55:52 +00:00
if err != nil {
2020-01-28 23:50:41 +00:00
a . logger . Error ( "error exposing check" ,
"check" , cid . String ( ) ,
"error" , err ,
)
2019-09-26 02:55:52 +00:00
return err
}
agent: rewrite checks with proxy address, not local service address (#7518)
Exposing checks is supposed to allow a Consul agent bound to a different
IP address (e.g., in a different Kubernetes pod) to access healthchecks
through the proxy while the underlying service binds to localhost. This
is an important security feature that makes sure no external traffic
reaches the service except through the proxy.
However, as far as I can tell, this is subtly broken in the case where
the Consul agent cannot reach the proxy over localhost.
If a proxy is configured with: `{ LocalServiceAddress: "127.0.0.1",
Checks: true }`, as is typical with a sidecar proxy, the Consul checks
are currently rewritten to `127.0.0.1:<random port>`. A Consul agent
that does not share the loopback address cannot reach this address. Just
to make sure I was not misunderstanding, I tried configuring the proxy
with `{ LocalServiceAddress: "<pod ip>", Checks: true }`. In this case,
while the checks are rewritten as expected and the agent can reach the
dynamic port, the proxy can no longer reach its backend because the
traffic is no longer on the loopback interface.
I think rewriting the checks to use `proxy.Address`, the proxy's own
address, is more correct in this case. That is the IP where the proxy
can be reached, both by other proxies and by a Consul agent running on
a different IP. The local service address should continue to use
`127.0.0.1` in most cases.
2020-04-02 07:35:43 +00:00
http . ProxyHTTP = httpInjectAddr ( http . HTTP , proxy . Address , port )
2019-09-26 02:55:52 +00:00
}
2015-01-09 22:43:24 +00:00
http . Start ( )
2019-12-10 02:26:41 +00:00
a . checkHTTPs [ cid ] = http
2015-01-09 22:43:24 +00:00
2017-07-12 14:01:42 +00:00
case chkType . IsTCP ( ) :
2019-12-10 02:26:41 +00:00
if existing , ok := a . checkTCPs [ cid ] ; ok {
2015-07-23 11:45:08 +00:00
existing . Stop ( )
2019-12-10 02:26:41 +00:00
delete ( a . checkTCPs , cid )
2015-07-23 11:45:08 +00:00
}
2017-10-25 09:18:07 +00:00
if chkType . Interval < checks . MinInterval {
2020-01-28 23:50:41 +00:00
a . logger . Warn ( "check has interval below minimum" ,
"check" , cid . String ( ) ,
"minimum_interval" , checks . MinInterval ,
)
2017-10-25 09:18:07 +00:00
chkType . Interval = checks . MinInterval
2015-07-23 11:45:08 +00:00
}
2017-10-25 09:18:07 +00:00
tcp := & checks . CheckTCP {
2019-12-10 02:26:41 +00:00
CheckID : cid ,
ServiceID : sid ,
2019-10-14 20:49:49 +00:00
TCP : chkType . TCP ,
Interval : chkType . Interval ,
Timeout : chkType . Timeout ,
Logger : a . logger ,
StatusHandler : statusHandler ,
2015-07-23 11:45:08 +00:00
}
tcp . Start ( )
2019-12-10 02:26:41 +00:00
a . checkTCPs [ cid ] = tcp
2015-07-23 11:45:08 +00:00
2017-12-27 04:35:22 +00:00
case chkType . IsGRPC ( ) :
2019-12-10 02:26:41 +00:00
if existing , ok := a . checkGRPCs [ cid ] ; ok {
2017-12-27 04:35:22 +00:00
existing . Stop ( )
2019-12-10 02:26:41 +00:00
delete ( a . checkGRPCs , cid )
2017-12-27 04:35:22 +00:00
}
if chkType . Interval < checks . MinInterval {
2020-01-28 23:50:41 +00:00
a . logger . Warn ( "check has interval below minimum" ,
"check" , cid . String ( ) ,
"minimum_interval" , checks . MinInterval ,
)
2017-12-27 04:35:22 +00:00
chkType . Interval = checks . MinInterval
}
var tlsClientConfig * tls . Config
2018-02-03 01:29:34 +00:00
if chkType . GRPCUseTLS {
2019-03-13 09:29:06 +00:00
tlsClientConfig = a . tlsConfigurator . OutgoingTLSConfigForCheck ( chkType . TLSSkipVerify )
2017-12-27 04:35:22 +00:00
}
grpc := & checks . CheckGRPC {
2019-12-10 02:26:41 +00:00
CheckID : cid ,
ServiceID : sid ,
2017-12-27 04:35:22 +00:00
GRPC : chkType . GRPC ,
Interval : chkType . Interval ,
Timeout : chkType . Timeout ,
Logger : a . logger ,
TLSClientConfig : tlsClientConfig ,
2019-10-14 20:49:49 +00:00
StatusHandler : statusHandler ,
2017-12-27 04:35:22 +00:00
}
2019-09-26 02:55:52 +00:00
if proxy != nil && proxy . Proxy . Expose . Checks {
2019-12-10 02:26:41 +00:00
port , err := a . listenerPortLocked ( sid , cid )
2019-09-26 02:55:52 +00:00
if err != nil {
2020-01-28 23:50:41 +00:00
a . logger . Error ( "error exposing check" ,
"check" , cid . String ( ) ,
"error" , err ,
)
2019-09-26 02:55:52 +00:00
return err
}
agent: rewrite checks with proxy address, not local service address (#7518)
Exposing checks is supposed to allow a Consul agent bound to a different
IP address (e.g., in a different Kubernetes pod) to access healthchecks
through the proxy while the underlying service binds to localhost. This
is an important security feature that makes sure no external traffic
reaches the service except through the proxy.
However, as far as I can tell, this is subtly broken in the case where
the Consul agent cannot reach the proxy over localhost.
If a proxy is configured with: `{ LocalServiceAddress: "127.0.0.1",
Checks: true }`, as is typical with a sidecar proxy, the Consul checks
are currently rewritten to `127.0.0.1:<random port>`. A Consul agent
that does not share the loopback address cannot reach this address. Just
to make sure I was not misunderstanding, I tried configuring the proxy
with `{ LocalServiceAddress: "<pod ip>", Checks: true }`. In this case,
while the checks are rewritten as expected and the agent can reach the
dynamic port, the proxy can no longer reach its backend because the
traffic is no longer on the loopback interface.
I think rewriting the checks to use `proxy.Address`, the proxy's own
address, is more correct in this case. That is the IP where the proxy
can be reached, both by other proxies and by a Consul agent running on
a different IP. The local service address should continue to use
`127.0.0.1` in most cases.
2020-04-02 07:35:43 +00:00
grpc . ProxyGRPC = grpcInjectAddr ( grpc . GRPC , proxy . Address , port )
2019-09-26 02:55:52 +00:00
}
2017-12-27 04:35:22 +00:00
grpc . Start ( )
2019-12-10 02:26:41 +00:00
a . checkGRPCs [ cid ] = grpc
2017-12-27 04:35:22 +00:00
2017-07-12 14:01:42 +00:00
case chkType . IsDocker ( ) :
2019-12-10 02:26:41 +00:00
if existing , ok := a . checkDockers [ cid ] ; ok {
2015-10-22 22:29:13 +00:00
existing . Stop ( )
2019-12-10 02:26:41 +00:00
delete ( a . checkDockers , cid )
2015-10-22 22:29:13 +00:00
}
2017-10-25 09:18:07 +00:00
if chkType . Interval < checks . MinInterval {
2020-01-28 23:50:41 +00:00
a . logger . Warn ( "check has interval below minimum" ,
"check" , cid . String ( ) ,
"minimum_interval" , checks . MinInterval ,
)
2017-10-25 09:18:07 +00:00
chkType . Interval = checks . MinInterval
2015-10-22 22:29:13 +00:00
}
2017-07-12 14:01:42 +00:00
if a . dockerClient == nil {
2019-06-26 15:43:25 +00:00
dc , err := checks . NewDockerClient ( os . Getenv ( "DOCKER_HOST" ) , int64 ( maxOutputSize ) )
2017-07-12 14:01:42 +00:00
if err != nil {
2020-01-28 23:50:41 +00:00
a . logger . Error ( "error creating docker client" , "error" , err )
2017-07-12 14:01:42 +00:00
return err
}
2020-01-28 23:50:41 +00:00
a . logger . Debug ( "created docker client" , "host" , dc . Host ( ) )
2017-07-12 14:01:42 +00:00
a . dockerClient = dc
}
2017-10-25 09:18:07 +00:00
dockerCheck := & checks . CheckDocker {
2019-12-10 02:26:41 +00:00
CheckID : cid ,
ServiceID : sid ,
2015-11-18 15:40:02 +00:00
DockerContainerID : chkType . DockerContainerID ,
2015-10-22 22:29:13 +00:00
Shell : chkType . Shell ,
2017-10-04 23:48:00 +00:00
ScriptArgs : chkType . ScriptArgs ,
2015-10-22 22:29:13 +00:00
Interval : chkType . Interval ,
Logger : a . logger ,
2017-10-25 09:18:07 +00:00
Client : a . dockerClient ,
2019-10-14 20:49:49 +00:00
StatusHandler : statusHandler ,
2015-10-26 23:45:12 +00:00
}
2019-12-10 02:26:41 +00:00
if prev := a . checkDockers [ cid ] ; prev != nil {
2017-10-26 10:03:07 +00:00
prev . Stop ( )
}
2015-10-22 22:29:13 +00:00
dockerCheck . Start ( )
2019-12-10 02:26:41 +00:00
a . checkDockers [ cid ] = dockerCheck
2017-07-12 14:01:42 +00:00
case chkType . IsMonitor ( ) :
2019-12-10 02:26:41 +00:00
if existing , ok := a . checkMonitors [ cid ] ; ok {
2015-10-26 22:02:23 +00:00
existing . Stop ( )
2019-12-10 02:26:41 +00:00
delete ( a . checkMonitors , cid )
2015-10-26 22:02:23 +00:00
}
2017-10-25 09:18:07 +00:00
if chkType . Interval < checks . MinInterval {
2020-01-28 23:50:41 +00:00
a . logger . Warn ( "check has interval below minimum" ,
"check" , cid . String ( ) ,
"minimum_interval" , checks . MinInterval ,
)
2017-10-25 09:18:07 +00:00
chkType . Interval = checks . MinInterval
2015-10-26 22:02:23 +00:00
}
2017-10-25 09:18:07 +00:00
monitor := & checks . CheckMonitor {
2019-06-26 15:43:25 +00:00
Notify : a . State ,
2019-12-10 02:26:41 +00:00
CheckID : cid ,
ServiceID : sid ,
2019-06-26 15:43:25 +00:00
ScriptArgs : chkType . ScriptArgs ,
Interval : chkType . Interval ,
Timeout : chkType . Timeout ,
Logger : a . logger ,
OutputMaxSize : maxOutputSize ,
2019-10-14 20:49:49 +00:00
StatusHandler : statusHandler ,
2015-10-26 22:02:23 +00:00
}
monitor . Start ( )
2019-12-10 02:26:41 +00:00
a . checkMonitors [ cid ] = monitor
2017-07-12 14:01:42 +00:00
2018-06-30 13:38:56 +00:00
case chkType . IsAlias ( ) :
2019-12-10 02:26:41 +00:00
if existing , ok := a . checkAliases [ cid ] ; ok {
2018-06-30 13:38:56 +00:00
existing . Stop ( )
2019-12-10 02:26:41 +00:00
delete ( a . checkAliases , cid )
2018-06-30 13:38:56 +00:00
}
var rpcReq structs . NodeSpecificRequest
rpcReq . Datacenter = a . config . Datacenter
2018-07-12 17:17:53 +00:00
// The token to set is really important. The behavior below follows
// the same behavior as anti-entropy: we use the user-specified token
// if set (either on the service or check definition), otherwise
// we use the "UserToken" on the agent. This is tested.
rpcReq . Token = a . tokens . UserToken ( )
if token != "" {
rpcReq . Token = token
}
2018-06-30 13:38:56 +00:00
2020-04-15 16:03:29 +00:00
aliasServiceID := structs . NewServiceID ( chkType . AliasService , & check . EnterpriseMeta )
2018-06-30 13:38:56 +00:00
chkImpl := & checks . CheckAlias {
2019-12-10 02:26:41 +00:00
Notify : a . State ,
RPC : a . delegate ,
RPCReq : rpcReq ,
CheckID : cid ,
Node : chkType . AliasNode ,
ServiceID : aliasServiceID ,
EnterpriseMeta : check . EnterpriseMeta ,
2018-06-30 13:38:56 +00:00
}
chkImpl . Start ( )
2019-12-10 02:26:41 +00:00
a . checkAliases [ cid ] = chkImpl
2018-06-30 13:38:56 +00:00
2017-07-12 14:01:42 +00:00
default :
2015-10-27 02:52:32 +00:00
return fmt . Errorf ( "Check type is not valid" )
2014-01-30 21:39:02 +00:00
}
2016-08-16 07:05:55 +00:00
2019-09-26 02:55:52 +00:00
// Notify channel that watches for service state changes
// This is a non-blocking send to avoid synchronizing on a large number of check updates
2019-12-10 02:26:41 +00:00
s := a . State . ServiceState ( sid )
2019-09-26 02:55:52 +00:00
if s != nil && ! s . Deleted {
select {
case s . WatchCh <- struct { } { } :
default :
}
}
2016-08-16 07:05:55 +00:00
if chkType . DeregisterCriticalServiceAfter > 0 {
timeout := chkType . DeregisterCriticalServiceAfter
if timeout < a . config . CheckDeregisterIntervalMin {
timeout = a . config . CheckDeregisterIntervalMin
2020-01-28 23:50:41 +00:00
a . logger . Warn ( "check has deregister interval below minimum" ,
"check" , cid . String ( ) ,
"minimum_interval" , a . config . CheckDeregisterIntervalMin ,
)
2016-08-16 07:05:55 +00:00
}
2019-12-10 02:26:41 +00:00
a . checkReapAfter [ cid ] = timeout
2016-08-16 07:05:55 +00:00
} else {
2019-12-10 02:26:41 +00:00
delete ( a . checkReapAfter , cid )
2016-08-16 07:05:55 +00:00
}
2014-01-30 21:39:02 +00:00
}
2014-11-25 03:24:32 +00:00
return nil
2014-01-30 21:39:02 +00:00
}
// RemoveCheck is used to remove a health check.
// The agent will make a best effort to ensure it is deregistered
2019-12-10 02:26:41 +00:00
func ( a * Agent ) RemoveCheck ( checkID structs . CheckID , persist bool ) error {
2019-03-04 14:34:05 +00:00
a . stateLock . Lock ( )
defer a . stateLock . Unlock ( )
return a . removeCheckLocked ( checkID , persist )
}
// removeCheckLocked is used to remove a health check.
// The agent will make a best effort to ensure it is deregistered
2019-12-10 02:26:41 +00:00
func ( a * Agent ) removeCheckLocked ( checkID structs . CheckID , persist bool ) error {
2015-01-26 16:06:49 +00:00
// Validate CheckID
2019-12-10 02:26:41 +00:00
if checkID . ID == "" {
2015-01-26 16:06:49 +00:00
return fmt . Errorf ( "CheckID missing" )
}
2019-09-26 02:55:52 +00:00
// Notify channel that watches for service state changes
// This is a non-blocking send to avoid synchronizing on a large number of check updates
2019-12-10 02:26:41 +00:00
var svcID structs . ServiceID
if c := a . State . Check ( checkID ) ; c != nil {
svcID = c . CompoundServiceID ( )
2019-09-26 02:55:52 +00:00
}
2019-12-10 02:26:41 +00:00
2019-09-26 02:55:52 +00:00
s := a . State . ServiceState ( svcID )
if s != nil && ! s . Deleted {
select {
case s . WatchCh <- struct { } { } :
default :
}
}
// Delete port from allocated port set
// If checks weren't being exposed then this is a no-op
2019-12-10 02:26:41 +00:00
portKey := listenerPortKey ( svcID , checkID )
2019-09-26 02:55:52 +00:00
delete ( a . exposedPorts , portKey )
2017-07-18 21:54:20 +00:00
a . cancelCheckMonitors ( checkID )
2019-03-04 14:34:05 +00:00
a . State . RemoveCheck ( checkID )
2017-07-18 21:54:20 +00:00
if persist {
if err := a . purgeCheck ( checkID ) ; err != nil {
return err
}
if err := a . purgeCheckState ( checkID ) ; err != nil {
return err
}
}
2019-09-26 02:55:52 +00:00
2020-01-28 23:50:41 +00:00
a . logger . Debug ( "removed check" , "check" , checkID . String ( ) )
2017-07-18 21:54:20 +00:00
return nil
}
2020-04-01 20:52:23 +00:00
// ServiceHTTPBasedChecks returns HTTP and GRPC based Checks
// for the given serviceID
2019-12-10 02:26:41 +00:00
func ( a * Agent ) ServiceHTTPBasedChecks ( serviceID structs . ServiceID ) [ ] structs . CheckType {
2019-09-26 02:55:52 +00:00
a . stateLock . Lock ( )
defer a . stateLock . Unlock ( )
var chkTypes = make ( [ ] structs . CheckType , 0 )
for _ , c := range a . checkHTTPs {
if c . ServiceID == serviceID {
chkTypes = append ( chkTypes , c . CheckType ( ) )
}
}
for _ , c := range a . checkGRPCs {
if c . ServiceID == serviceID {
chkTypes = append ( chkTypes , c . CheckType ( ) )
}
}
return chkTypes
}
2020-04-01 20:52:23 +00:00
// AdvertiseAddrLAN returns the AdvertiseAddrLAN config value
2019-09-26 02:55:52 +00:00
func ( a * Agent ) AdvertiseAddrLAN ( ) string {
return a . config . AdvertiseAddrLAN . String ( )
}
2018-07-12 11:57:10 +00:00
// resolveProxyCheckAddress returns the best address to use for a TCP check of
// the proxy's public listener. It expects the input to already have default
// values populated by applyProxyConfigDefaults. It may return an empty string
// indicating that the TCP check should not be created at all.
//
// By default this uses the proxy's bind address which in turn defaults to the
// agent's bind address. If the proxy bind address ends up being 0.0.0.0 we have
// to assume the agent can dial it over loopback which is usually true.
//
// In some topologies such as proxy being in a different container, the IP the
// agent used to dial proxy over a local bridge might not be the same as the
// container's public routable IP address so we allow a manual override of the
// check address in config "tcp_check_address" too.
//
// Finally the TCP check can be disabled by another manual override
// "disable_tcp_check" in cases where the agent will never be able to dial the
// proxy directly for some reason.
func ( a * Agent ) resolveProxyCheckAddress ( proxyCfg map [ string ] interface { } ) string {
// If user disabled the check return empty string
if disable , ok := proxyCfg [ "disable_tcp_check" ] . ( bool ) ; ok && disable {
return ""
}
// If user specified a custom one, use that
if chkAddr , ok := proxyCfg [ "tcp_check_address" ] . ( string ) ; ok && chkAddr != "" {
return chkAddr
}
// If we have a bind address and its diallable, use that
if bindAddr , ok := proxyCfg [ "bind_address" ] . ( string ) ; ok &&
bindAddr != "" && bindAddr != "0.0.0.0" && bindAddr != "[::]" {
return bindAddr
}
// Default to localhost
return "127.0.0.1"
}
2019-12-10 02:26:41 +00:00
func ( a * Agent ) cancelCheckMonitors ( checkID structs . CheckID ) {
2014-01-30 21:39:02 +00:00
// Stop any monitors
2016-08-16 07:05:55 +00:00
delete ( a . checkReapAfter , checkID )
2014-01-30 21:39:02 +00:00
if check , ok := a . checkMonitors [ checkID ] ; ok {
check . Stop ( )
delete ( a . checkMonitors , checkID )
}
2015-01-12 22:34:39 +00:00
if check , ok := a . checkHTTPs [ checkID ] ; ok {
check . Stop ( )
delete ( a . checkHTTPs , checkID )
}
2015-07-23 11:45:08 +00:00
if check , ok := a . checkTCPs [ checkID ] ; ok {
check . Stop ( )
delete ( a . checkTCPs , checkID )
}
2017-12-27 04:35:22 +00:00
if check , ok := a . checkGRPCs [ checkID ] ; ok {
check . Stop ( )
delete ( a . checkGRPCs , checkID )
}
2014-01-30 21:39:02 +00:00
if check , ok := a . checkTTLs [ checkID ] ; ok {
check . Stop ( )
delete ( a . checkTTLs , checkID )
}
2017-07-18 18:50:37 +00:00
if check , ok := a . checkDockers [ checkID ] ; ok {
check . Stop ( )
delete ( a . checkDockers , checkID )
}
2014-01-30 21:39:02 +00:00
}
2016-08-16 07:05:55 +00:00
// updateTTLCheck is used to update the status of a TTL check via the Agent API.
2019-12-10 02:26:41 +00:00
func ( a * Agent ) updateTTLCheck ( checkID structs . CheckID , status , output string ) error {
2019-03-04 14:34:05 +00:00
a . stateLock . Lock ( )
defer a . stateLock . Unlock ( )
2014-01-30 21:39:02 +00:00
2016-08-16 07:05:55 +00:00
// Grab the TTL check.
2014-01-30 21:39:02 +00:00
check , ok := a . checkTTLs [ checkID ]
if ! ok {
2019-12-10 02:26:41 +00:00
return fmt . Errorf ( "CheckID %q does not have associated TTL" , checkID . String ( ) )
2014-01-30 21:39:02 +00:00
}
2016-08-16 07:05:55 +00:00
// Set the status through CheckTTL to reset the TTL.
2019-06-26 15:43:25 +00:00
outputTruncated := check . SetStatus ( status , output )
2015-06-05 23:17:07 +00:00
2016-08-16 07:05:55 +00:00
// We don't write any files in dev mode so bail here.
2018-06-06 20:04:19 +00:00
if a . config . DataDir == "" {
2015-11-29 04:40:05 +00:00
return nil
}
2016-08-16 07:05:55 +00:00
// Persist the state so the TTL check can come up in a good state after
// an agent restart, especially with long TTL values.
2019-06-26 15:43:25 +00:00
if err := a . persistCheckState ( check , status , outputTruncated ) ; err != nil {
2019-12-10 02:26:41 +00:00
return fmt . Errorf ( "failed persisting state for check %q: %s" , checkID . String ( ) , err )
2015-06-05 23:17:07 +00:00
}
return nil
}
// persistCheckState is used to record the check status into the data dir.
// This allows the state to be restored on a later agent start. Currently
// only useful for TTL based checks.
2017-10-25 09:18:07 +00:00
func ( a * Agent ) persistCheckState ( check * checks . CheckTTL , status , output string ) error {
2015-06-05 23:17:07 +00:00
// Create the persisted state
state := persistedCheckState {
2019-12-10 02:26:41 +00:00
CheckID : check . CheckID . ID ,
Status : status ,
Output : output ,
Expires : time . Now ( ) . Add ( check . TTL ) . Unix ( ) ,
EnterpriseMeta : check . CheckID . EnterpriseMeta ,
2015-06-05 23:17:07 +00:00
}
// Encode the state
buf , err := json . Marshal ( state )
if err != nil {
return err
}
// Create the state dir if it doesn't exist
dir := filepath . Join ( a . config . DataDir , checkStateDir )
if err := os . MkdirAll ( dir , 0700 ) ; err != nil {
return fmt . Errorf ( "failed creating check state dir %q: %s" , dir , err )
}
// Write the state to the file
2019-12-10 02:26:41 +00:00
file := filepath . Join ( dir , check . CheckID . StringHash ( ) )
2016-11-07 18:51:03 +00:00
// Create temp file in same dir, to make more likely atomic
2016-08-03 15:32:21 +00:00
tempFile := file + ".tmp"
2016-11-07 20:24:31 +00:00
// persistCheckState is called frequently, so don't use writeFileAtomic to avoid calling fsync here
2016-08-03 15:32:21 +00:00
if err := ioutil . WriteFile ( tempFile , buf , 0600 ) ; err != nil {
return fmt . Errorf ( "failed writing temp file %q: %s" , tempFile , err )
}
if err := os . Rename ( tempFile , file ) ; err != nil {
return fmt . Errorf ( "failed to rename temp file from %q to %q: %s" , tempFile , file , err )
2015-06-05 23:17:07 +00:00
}
return nil
}
2015-06-08 16:35:10 +00:00
// loadCheckState is used to restore the persisted state of a check.
func ( a * Agent ) loadCheckState ( check * structs . HealthCheck ) error {
2019-12-10 02:26:41 +00:00
cid := check . CompoundCheckID ( )
2015-06-05 23:17:07 +00:00
// Try to read the persisted state for this check
2019-12-10 02:26:41 +00:00
file := filepath . Join ( a . config . DataDir , checkStateDir , cid . StringHash ( ) )
2015-06-05 23:17:07 +00:00
buf , err := ioutil . ReadFile ( file )
if err != nil {
if os . IsNotExist ( err ) {
return nil
}
return fmt . Errorf ( "failed reading file %q: %s" , file , err )
}
// Decode the state data
var p persistedCheckState
if err := json . Unmarshal ( buf , & p ) ; err != nil {
2020-01-28 23:50:41 +00:00
a . logger . Error ( "failed decoding check state" , "error" , err )
2019-12-10 02:26:41 +00:00
return a . purgeCheckState ( cid )
2015-06-05 23:17:07 +00:00
}
// Check if the state has expired
2015-06-05 23:45:05 +00:00
if time . Now ( ) . Unix ( ) >= p . Expires {
2020-01-28 23:50:41 +00:00
a . logger . Debug ( "check state expired, not restoring" , "check" , cid . String ( ) )
2019-12-10 02:26:41 +00:00
return a . purgeCheckState ( cid )
2015-06-05 23:17:07 +00:00
}
// Restore the fields from the state
check . Output = p . Output
check . Status = p . Status
2014-01-30 21:39:02 +00:00
return nil
}
2014-02-24 00:42:39 +00:00
2015-06-05 23:57:14 +00:00
// purgeCheckState is used to purge the state of a check from the data dir
2019-12-10 02:26:41 +00:00
func ( a * Agent ) purgeCheckState ( checkID structs . CheckID ) error {
file := filepath . Join ( a . config . DataDir , checkStateDir , checkID . StringHash ( ) )
2015-06-05 23:57:14 +00:00
err := os . Remove ( file )
if os . IsNotExist ( err ) {
return nil
}
return err
}
2014-02-24 00:42:39 +00:00
// Stats is used to get various debugging state from the sub-systems
func ( a * Agent ) Stats ( ) map [ string ] map [ string ] string {
2017-05-15 14:05:17 +00:00
stats := a . delegate . Stats ( )
2014-02-24 00:42:39 +00:00
stats [ "agent" ] = map [ string ] string {
2017-08-28 12:17:12 +00:00
"check_monitors" : strconv . Itoa ( len ( a . checkMonitors ) ) ,
"check_ttls" : strconv . Itoa ( len ( a . checkTTLs ) ) ,
}
2017-08-28 12:17:13 +00:00
for k , v := range a . State . Stats ( ) {
2017-08-28 12:17:12 +00:00
stats [ "agent" ] [ k ] = v
2014-02-24 00:42:39 +00:00
}
2014-06-06 21:40:22 +00:00
revision := a . config . Revision
if len ( revision ) > 8 {
revision = revision [ : 8 ]
}
stats [ "build" ] = map [ string ] string {
"revision" : revision ,
"version" : a . config . Version ,
"prerelease" : a . config . VersionPrerelease ,
}
2014-02-24 00:42:39 +00:00
return stats
}
2014-05-06 03:29:50 +00:00
2014-05-06 19:43:33 +00:00
// storePid is used to write out our PID to a file if necessary
2014-05-06 16:57:53 +00:00
func ( a * Agent ) storePid ( ) error {
2014-05-06 19:43:33 +00:00
// Quit fast if no pidfile
2014-05-06 03:29:50 +00:00
pidPath := a . config . PidFile
2014-05-06 19:43:33 +00:00
if pidPath == "" {
return nil
}
2014-05-06 03:29:50 +00:00
2014-05-06 19:43:33 +00:00
// Open the PID file
pidFile , err := os . OpenFile ( pidPath , os . O_CREATE | os . O_WRONLY | os . O_TRUNC , 0666 )
if err != nil {
return fmt . Errorf ( "Could not open pid file: %v" , err )
2014-05-06 03:29:50 +00:00
}
2014-05-06 19:43:33 +00:00
defer pidFile . Close ( )
2014-05-06 16:57:53 +00:00
2014-05-06 19:43:33 +00:00
// Write out the PID
pid := os . Getpid ( )
_ , err = pidFile . WriteString ( fmt . Sprintf ( "%d" , pid ) )
if err != nil {
return fmt . Errorf ( "Could not write to pid file: %s" , err )
}
2014-05-06 16:57:53 +00:00
return nil
2014-05-06 03:29:50 +00:00
}
2014-05-06 19:43:33 +00:00
// deletePid is used to delete our PID on exit
2014-05-06 16:57:53 +00:00
func ( a * Agent ) deletePid ( ) error {
2014-05-06 19:43:33 +00:00
// Quit fast if no pidfile
2014-05-06 03:29:50 +00:00
pidPath := a . config . PidFile
2014-05-06 19:43:33 +00:00
if pidPath == "" {
return nil
}
2014-05-06 03:29:50 +00:00
2014-05-06 19:43:33 +00:00
stat , err := os . Stat ( pidPath )
if err != nil {
return fmt . Errorf ( "Could not remove pid file: %s" , err )
}
2014-05-06 03:29:50 +00:00
2014-05-06 19:43:33 +00:00
if stat . IsDir ( ) {
return fmt . Errorf ( "Specified pid file path is directory" )
2014-05-06 03:29:50 +00:00
}
2014-05-06 16:57:53 +00:00
2014-05-06 19:43:33 +00:00
err = os . Remove ( pidPath )
if err != nil {
return fmt . Errorf ( "Could not remove pid file: %s" , err )
}
2014-05-06 16:57:53 +00:00
return nil
2014-05-06 03:29:50 +00:00
}
2014-11-26 07:58:02 +00:00
2015-01-08 02:05:46 +00:00
// loadServices will load service definitions from configuration and persisted
// definitions on disk, and load them into the local agent.
2020-03-09 11:59:41 +00:00
func ( a * Agent ) loadServices ( conf * config . RuntimeConfig , snap map [ structs . CheckID ] * structs . HealthCheck ) error {
2019-09-24 15:04:48 +00:00
// Load any persisted service configs so we can feed those into the initial
// registrations below.
persistedServiceConfigs , err := a . readPersistedServiceConfigs ( )
if err != nil {
return err
}
2014-11-26 07:58:02 +00:00
// Register the services from config
for _ , service := range conf . Services {
ns := service . NodeService ( )
2017-10-10 23:54:06 +00:00
chkTypes , err := service . CheckTypes ( )
if err != nil {
return fmt . Errorf ( "Failed to validate checks for service %q: %v" , service . Name , err )
}
2018-09-27 13:33:12 +00:00
// Grab and validate sidecar if there is one too
sidecar , sidecarChecks , sidecarToken , err := a . sidecarServiceFromNodeService ( ns , service . Token )
if err != nil {
return fmt . Errorf ( "Failed to validate sidecar for service %q: %v" , service . Name , err )
}
// Remove sidecar from NodeService now it's done it's job it's just a config
// syntax sugar and shouldn't be persisted in local or server state.
ns . Connect . SidecarService = nil
2019-12-10 02:26:41 +00:00
sid := ns . CompoundServiceID ( )
2019-09-24 15:04:48 +00:00
err = a . addServiceLocked ( & addServiceRequest {
service : ns ,
chkTypes : chkTypes ,
2019-12-10 02:26:41 +00:00
previousDefaults : persistedServiceConfigs [ sid ] ,
2019-09-24 15:04:48 +00:00
waitForCentralConfig : false , // exclusively use cached values
persist : false , // don't rewrite the file with the same data we just read
persistServiceConfig : false , // don't rewrite the file with the same data we just read
token : service . Token ,
replaceExistingChecks : false , // do default behavior
source : ConfigSourceLocal ,
2020-03-09 11:59:41 +00:00
} , snap )
2019-09-24 15:04:48 +00:00
if err != nil {
2017-10-10 23:54:06 +00:00
return fmt . Errorf ( "Failed to register service %q: %v" , service . Name , err )
2014-11-26 07:58:02 +00:00
}
2018-09-27 13:33:12 +00:00
// If there is a sidecar service, register that too.
if sidecar != nil {
2019-12-10 02:26:41 +00:00
sidecarServiceID := sidecar . CompoundServiceID ( )
2019-09-24 15:04:48 +00:00
err = a . addServiceLocked ( & addServiceRequest {
service : sidecar ,
chkTypes : sidecarChecks ,
previousDefaults : persistedServiceConfigs [ sidecarServiceID ] ,
waitForCentralConfig : false , // exclusively use cached values
persist : false , // don't rewrite the file with the same data we just read
persistServiceConfig : false , // don't rewrite the file with the same data we just read
token : sidecarToken ,
replaceExistingChecks : false , // do default behavior
source : ConfigSourceLocal ,
2020-03-09 11:59:41 +00:00
} , snap )
2019-09-24 15:04:48 +00:00
if err != nil {
2018-09-27 13:33:12 +00:00
return fmt . Errorf ( "Failed to register sidecar for service %q: %v" , service . Name , err )
}
}
2014-11-26 07:58:02 +00:00
}
// Load any persisted services
2015-01-08 05:24:47 +00:00
svcDir := filepath . Join ( a . config . DataDir , servicesDir )
2015-06-04 21:33:30 +00:00
files , err := ioutil . ReadDir ( svcDir )
if err != nil {
if os . IsNotExist ( err ) {
return nil
}
return fmt . Errorf ( "Failed reading services dir %q: %s" , svcDir , err )
2014-11-26 07:58:02 +00:00
}
2015-06-04 21:33:30 +00:00
for _ , fi := range files {
// Skip all dirs
if fi . IsDir ( ) {
continue
}
2014-11-26 07:58:02 +00:00
2017-07-24 17:37:14 +00:00
// Skip all partially written temporary files
if strings . HasSuffix ( fi . Name ( ) , "tmp" ) {
2020-01-28 23:50:41 +00:00
a . logger . Warn ( "Ignoring temporary service file" , "file" , fi . Name ( ) )
2017-07-24 17:37:14 +00:00
continue
}
2017-07-25 02:07:48 +00:00
2015-06-04 21:33:30 +00:00
// Read the contents into a buffer
2019-09-24 15:04:48 +00:00
file := filepath . Join ( svcDir , fi . Name ( ) )
buf , err := ioutil . ReadFile ( file )
2015-01-08 05:24:47 +00:00
if err != nil {
2015-06-04 21:33:30 +00:00
return fmt . Errorf ( "failed reading service file %q: %s" , file , err )
2015-01-08 05:24:47 +00:00
}
2015-06-04 21:33:30 +00:00
// Try decoding the service definition
var p persistedService
if err := json . Unmarshal ( buf , & p ) ; err != nil {
2015-04-28 19:18:41 +00:00
// Backwards-compatibility for pre-0.5.1 persisted services
2015-06-04 21:33:30 +00:00
if err := json . Unmarshal ( buf , & p . Service ) ; err != nil {
2020-01-28 23:50:41 +00:00
a . logger . Error ( "Failed decoding service file" ,
"file" , file ,
"error" , err ,
)
2018-01-19 22:07:36 +00:00
continue
2015-04-28 19:18:41 +00:00
}
2015-01-08 05:24:47 +00:00
}
2019-12-10 02:26:41 +00:00
serviceID := p . Service . CompoundServiceID ( )
2015-01-08 05:24:47 +00:00
2019-09-24 15:04:48 +00:00
source , ok := ConfigSourceFromName ( p . Source )
if ! ok {
2020-01-28 23:50:41 +00:00
a . logger . Warn ( "service exists with invalid source, purging" ,
"service" , serviceID . String ( ) ,
"source" , p . Source ,
)
2019-09-24 15:04:48 +00:00
if err := a . purgeService ( serviceID ) ; err != nil {
return fmt . Errorf ( "failed purging service %q: %s" , serviceID , err )
}
if err := a . purgeServiceConfig ( serviceID ) ; err != nil {
return fmt . Errorf ( "failed purging service config %q: %s" , serviceID , err )
}
continue
}
2017-08-28 12:17:13 +00:00
if a . State . Service ( serviceID ) != nil {
2015-01-08 05:24:47 +00:00
// Purge previously persisted service. This allows config to be
// preferred over services persisted from the API.
2020-01-28 23:50:41 +00:00
a . logger . Debug ( "service exists, not restoring from file" ,
"service" , serviceID . String ( ) ,
"file" , file ,
)
2015-06-04 21:33:30 +00:00
if err := a . purgeService ( serviceID ) ; err != nil {
2019-12-10 02:26:41 +00:00
return fmt . Errorf ( "failed purging service %q: %s" , serviceID . String ( ) , err )
2015-06-04 21:33:30 +00:00
}
2019-09-24 15:04:48 +00:00
if err := a . purgeServiceConfig ( serviceID ) ; err != nil {
2019-12-10 02:26:41 +00:00
return fmt . Errorf ( "failed purging service config %q: %s" , serviceID . String ( ) , err )
2019-09-24 15:04:48 +00:00
}
2015-01-08 05:24:47 +00:00
} else {
2020-01-28 23:50:41 +00:00
a . logger . Debug ( "restored service definition from file" ,
"service" , serviceID . String ( ) ,
"file" , file ,
)
2019-09-24 15:04:48 +00:00
err = a . addServiceLocked ( & addServiceRequest {
service : p . Service ,
chkTypes : nil ,
previousDefaults : persistedServiceConfigs [ serviceID ] ,
waitForCentralConfig : false , // exclusively use cached values
persist : false , // don't rewrite the file with the same data we just read
persistServiceConfig : false , // don't rewrite the file with the same data we just read
token : p . Token ,
replaceExistingChecks : false , // do default behavior
source : source ,
2020-03-09 11:59:41 +00:00
} , snap )
2019-09-24 15:04:48 +00:00
if err != nil {
2015-06-04 21:33:30 +00:00
return fmt . Errorf ( "failed adding service %q: %s" , serviceID , err )
}
2015-01-08 05:24:47 +00:00
}
2015-06-04 21:33:30 +00:00
}
2015-01-08 05:24:47 +00:00
2020-04-01 20:52:23 +00:00
for serviceID := range persistedServiceConfigs {
2019-09-24 15:04:48 +00:00
if a . State . Service ( serviceID ) == nil {
// This can be cleaned up now.
if err := a . purgeServiceConfig ( serviceID ) ; err != nil {
return fmt . Errorf ( "failed purging service config %q: %s" , serviceID , err )
}
}
}
2015-06-04 21:33:30 +00:00
return nil
2014-11-26 07:58:02 +00:00
}
2017-08-30 10:25:49 +00:00
// unloadServices will deregister all services.
2015-01-08 02:05:46 +00:00
func ( a * Agent ) unloadServices ( ) error {
2019-12-10 02:26:41 +00:00
for id := range a . State . Services ( structs . WildcardEnterpriseMeta ( ) ) {
2019-03-04 14:34:05 +00:00
if err := a . removeServiceLocked ( id , false ) ; err != nil {
2017-08-28 12:17:11 +00:00
return fmt . Errorf ( "Failed deregistering service '%s': %v" , id , err )
2014-11-26 07:58:02 +00:00
}
}
2015-01-08 02:05:46 +00:00
return nil
}
// loadChecks loads check definitions and/or persisted check definitions from
// disk and re-registers them with the local agent.
2019-12-10 02:26:41 +00:00
func ( a * Agent ) loadChecks ( conf * config . RuntimeConfig , snap map [ structs . CheckID ] * structs . HealthCheck ) error {
2014-11-26 07:58:02 +00:00
// Register the checks from config
for _ , check := range conf . Checks {
health := check . HealthCheck ( conf . NodeName )
2019-07-17 19:06:50 +00:00
// Restore the fields from the snapshot.
2019-12-10 02:26:41 +00:00
if prev , ok := snap [ health . CompoundCheckID ( ) ] ; ok {
2019-07-17 19:06:50 +00:00
health . Output = prev . Output
health . Status = prev . Status
}
2017-05-15 19:49:13 +00:00
chkType := check . CheckType ( )
2019-03-04 14:34:05 +00:00
if err := a . addCheckLocked ( health , chkType , false , check . Token , ConfigSourceLocal ) ; err != nil {
2014-11-26 07:58:02 +00:00
return fmt . Errorf ( "Failed to register check '%s': %v %v" , check . Name , err , check )
}
}
// Load any persisted checks
2015-01-08 05:24:47 +00:00
checkDir := filepath . Join ( a . config . DataDir , checksDir )
2015-06-04 21:33:30 +00:00
files , err := ioutil . ReadDir ( checkDir )
if err != nil {
if os . IsNotExist ( err ) {
return nil
}
return fmt . Errorf ( "Failed reading checks dir %q: %s" , checkDir , err )
2014-11-26 07:58:02 +00:00
}
2015-06-04 21:33:30 +00:00
for _ , fi := range files {
// Ignore dirs - we only care about the check definition files
if fi . IsDir ( ) {
continue
}
2014-11-26 07:58:02 +00:00
2015-06-04 21:33:30 +00:00
// Read the contents into a buffer
2019-09-24 15:04:48 +00:00
file := filepath . Join ( checkDir , fi . Name ( ) )
buf , err := ioutil . ReadFile ( file )
2015-01-08 05:24:47 +00:00
if err != nil {
2015-06-04 21:33:30 +00:00
return fmt . Errorf ( "failed reading check file %q: %s" , file , err )
2015-01-08 05:24:47 +00:00
}
2015-06-04 21:33:30 +00:00
// Decode the check
2015-01-08 05:24:47 +00:00
var p persistedCheck
2015-06-04 21:33:30 +00:00
if err := json . Unmarshal ( buf , & p ) ; err != nil {
2020-01-28 23:50:41 +00:00
a . logger . Error ( "Failed decoding check file" ,
"file" , file ,
"error" , err ,
)
2018-01-19 22:07:36 +00:00
continue
2015-01-08 05:24:47 +00:00
}
2019-12-10 02:26:41 +00:00
checkID := p . Check . CompoundCheckID ( )
2015-01-08 05:24:47 +00:00
2019-09-24 15:04:48 +00:00
source , ok := ConfigSourceFromName ( p . Source )
if ! ok {
2020-01-28 23:50:41 +00:00
a . logger . Warn ( "check exists with invalid source, purging" ,
"check" , checkID . String ( ) ,
"source" , p . Source ,
)
2019-09-24 15:04:48 +00:00
if err := a . purgeCheck ( checkID ) ; err != nil {
return fmt . Errorf ( "failed purging check %q: %s" , checkID , err )
}
continue
}
2017-08-28 12:17:13 +00:00
if a . State . Check ( checkID ) != nil {
2015-01-08 05:24:47 +00:00
// Purge previously persisted check. This allows config to be
// preferred over persisted checks from the API.
2020-01-28 23:50:41 +00:00
a . logger . Debug ( "check exists, not restoring from file" ,
"check" , checkID . String ( ) ,
"file" , file ,
)
2015-06-04 21:33:30 +00:00
if err := a . purgeCheck ( checkID ) ; err != nil {
return fmt . Errorf ( "Failed purging check %q: %s" , checkID , err )
}
2015-01-08 05:24:47 +00:00
} else {
// Default check to critical to avoid placing potentially unhealthy
// services into the active pool
2017-04-19 23:00:11 +00:00
p . Check . Status = api . HealthCritical
2015-01-08 05:24:47 +00:00
2019-07-17 19:06:50 +00:00
// Restore the fields from the snapshot.
2019-12-10 02:26:41 +00:00
if prev , ok := snap [ p . Check . CompoundCheckID ( ) ] ; ok {
2019-07-17 19:06:50 +00:00
p . Check . Output = prev . Output
p . Check . Status = prev . Status
}
2019-09-24 15:04:48 +00:00
if err := a . addCheckLocked ( p . Check , p . ChkType , false , p . Token , source ) ; err != nil {
2015-03-11 23:13:19 +00:00
// Purge the check if it is unable to be restored.
2020-01-28 23:50:41 +00:00
a . logger . Warn ( "Failed to restore check" ,
"check" , checkID . String ( ) ,
"error" , err ,
)
2015-06-04 21:33:30 +00:00
if err := a . purgeCheck ( checkID ) ; err != nil {
return fmt . Errorf ( "Failed purging check %q: %s" , checkID , err )
}
2015-03-11 23:13:19 +00:00
}
2020-01-28 23:50:41 +00:00
a . logger . Debug ( "restored health check from file" ,
"check" , p . Check . CheckID ,
"file" , file ,
)
2015-01-08 05:24:47 +00:00
}
2015-06-04 21:33:30 +00:00
}
2015-01-08 05:24:47 +00:00
2015-06-04 21:33:30 +00:00
return nil
2014-11-26 07:58:02 +00:00
}
2015-01-08 02:05:46 +00:00
// unloadChecks will deregister all checks known to the local agent.
func ( a * Agent ) unloadChecks ( ) error {
2019-12-10 02:26:41 +00:00
for id := range a . State . Checks ( structs . WildcardEnterpriseMeta ( ) ) {
2019-03-04 14:34:05 +00:00
if err := a . removeCheckLocked ( id , false ) ; err != nil {
2017-08-28 12:17:11 +00:00
return fmt . Errorf ( "Failed deregistering check '%s': %s" , id , err )
2015-01-08 02:05:46 +00:00
}
}
return nil
}
2015-01-15 08:16:34 +00:00
2019-02-27 19:28:31 +00:00
type persistedTokens struct {
Replication string ` json:"replication,omitempty" `
AgentMaster string ` json:"agent_master,omitempty" `
Default string ` json:"default,omitempty" `
Agent string ` json:"agent,omitempty" `
}
func ( a * Agent ) getPersistedTokens ( ) ( * persistedTokens , error ) {
persistedTokens := & persistedTokens { }
if ! a . config . ACLEnableTokenPersistence {
return persistedTokens , nil
}
a . persistedTokensLock . RLock ( )
defer a . persistedTokensLock . RUnlock ( )
tokensFullPath := filepath . Join ( a . config . DataDir , tokensPath )
buf , err := ioutil . ReadFile ( tokensFullPath )
if err != nil {
if os . IsNotExist ( err ) {
// non-existence is not an error we care about
return persistedTokens , nil
}
return persistedTokens , fmt . Errorf ( "failed reading tokens file %q: %s" , tokensFullPath , err )
}
if err := json . Unmarshal ( buf , persistedTokens ) ; err != nil {
return persistedTokens , fmt . Errorf ( "failed to decode tokens file %q: %s" , tokensFullPath , err )
}
return persistedTokens , nil
}
2020-04-02 07:59:23 +00:00
// CheckSecurity Performs security checks in Consul Configuration
// It might return an error if configuration is considered too dangerous
func ( a * Agent ) CheckSecurity ( conf * config . RuntimeConfig ) error {
if conf . EnableRemoteScriptChecks {
if ! conf . ACLsEnabled {
if len ( conf . AllowWriteHTTPFrom ) == 0 {
err := fmt . Errorf ( "using enable-script-checks without ACLs and without allow_write_http_from is DANGEROUS, use enable-local-script-checks instead, see https://www.hashicorp.com/blog/protecting-consul-from-rce-risk-in-specific-configurations/" )
a . logger . Error ( "[SECURITY] issue" , "error" , err )
// TODO: return the error in future Consul versions
}
}
}
return nil
}
2019-02-27 19:28:31 +00:00
func ( a * Agent ) loadTokens ( conf * config . RuntimeConfig ) error {
persistedTokens , persistenceErr := a . getPersistedTokens ( )
if persistenceErr != nil {
2020-01-28 23:50:41 +00:00
a . logger . Warn ( "unable to load persisted tokens" , "error" , persistenceErr )
2019-02-27 19:28:31 +00:00
}
if persistedTokens . Default != "" {
a . tokens . UpdateUserToken ( persistedTokens . Default , token . TokenSourceAPI )
if conf . ACLToken != "" {
2020-01-28 23:50:41 +00:00
a . logger . Warn ( "\"default\" token present in both the configuration and persisted token store, using the persisted token" )
2019-02-27 19:28:31 +00:00
}
} else {
a . tokens . UpdateUserToken ( conf . ACLToken , token . TokenSourceConfig )
}
if persistedTokens . Agent != "" {
a . tokens . UpdateAgentToken ( persistedTokens . Agent , token . TokenSourceAPI )
if conf . ACLAgentToken != "" {
2020-01-28 23:50:41 +00:00
a . logger . Warn ( "\"agent\" token present in both the configuration and persisted token store, using the persisted token" )
2019-02-27 19:28:31 +00:00
}
} else {
a . tokens . UpdateAgentToken ( conf . ACLAgentToken , token . TokenSourceConfig )
}
if persistedTokens . AgentMaster != "" {
a . tokens . UpdateAgentMasterToken ( persistedTokens . AgentMaster , token . TokenSourceAPI )
if conf . ACLAgentMasterToken != "" {
2020-01-28 23:50:41 +00:00
a . logger . Warn ( "\"agent_master\" token present in both the configuration and persisted token store, using the persisted token" )
2019-02-27 19:28:31 +00:00
}
} else {
a . tokens . UpdateAgentMasterToken ( conf . ACLAgentMasterToken , token . TokenSourceConfig )
}
if persistedTokens . Replication != "" {
a . tokens . UpdateReplicationToken ( persistedTokens . Replication , token . TokenSourceAPI )
if conf . ACLReplicationToken != "" {
2020-01-28 23:50:41 +00:00
a . logger . Warn ( "\"replication\" token present in both the configuration and persisted token store, using the persisted token" )
2019-02-27 19:28:31 +00:00
}
} else {
a . tokens . UpdateReplicationToken ( conf . ACLReplicationToken , token . TokenSourceConfig )
}
return persistenceErr
}
2015-02-17 20:00:04 +00:00
// snapshotCheckState is used to snapshot the current state of the health
// checks. This is done before we reload our checks, so that we can properly
// restore into the same state.
2019-12-10 02:26:41 +00:00
func ( a * Agent ) snapshotCheckState ( ) map [ structs . CheckID ] * structs . HealthCheck {
return a . State . Checks ( structs . WildcardEnterpriseMeta ( ) )
2015-02-17 20:00:04 +00:00
}
2017-01-11 19:41:12 +00:00
// loadMetadata loads node metadata fields from the agent config and
2017-01-05 22:10:26 +00:00
// updates them on the local agent.
2017-09-25 18:40:42 +00:00
func ( a * Agent ) loadMetadata ( conf * config . RuntimeConfig ) error {
2017-08-28 12:17:12 +00:00
meta := map [ string ] string { }
for k , v := range conf . NodeMeta {
meta [ k ] = v
2017-01-11 19:41:12 +00:00
}
2017-08-28 12:17:12 +00:00
meta [ structs . MetaSegmentKey ] = conf . SegmentName
2017-08-28 12:17:13 +00:00
return a . State . LoadMetadata ( meta )
2017-01-11 19:41:12 +00:00
}
2017-01-05 22:10:26 +00:00
// unloadMetadata resets the local metadata state
2017-01-11 19:41:12 +00:00
func ( a * Agent ) unloadMetadata ( ) {
2017-08-28 12:17:13 +00:00
a . State . UnloadMetadata ( )
2017-01-05 22:10:26 +00:00
}
2015-01-15 20:20:57 +00:00
// serviceMaintCheckID returns the ID of a given service's maintenance check
2019-12-10 02:26:41 +00:00
func serviceMaintCheckID ( serviceID structs . ServiceID ) structs . CheckID {
2020-04-15 16:03:29 +00:00
cid := types . CheckID ( structs . ServiceMaintPrefix + serviceID . ID )
return structs . NewCheckID ( cid , & serviceID . EnterpriseMeta )
2015-01-15 20:20:57 +00:00
}
2015-01-15 08:25:36 +00:00
// EnableServiceMaintenance will register a false health check against the given
// service ID with critical status. This will exclude the service from queries.
2019-12-10 02:26:41 +00:00
func ( a * Agent ) EnableServiceMaintenance ( serviceID structs . ServiceID , reason , token string ) error {
service := a . State . Service ( serviceID )
if service == nil {
return fmt . Errorf ( "No service registered with ID %q" , serviceID . String ( ) )
2015-01-15 08:16:34 +00:00
}
2015-01-15 20:20:57 +00:00
// Check if maintenance mode is not already enabled
checkID := serviceMaintCheckID ( serviceID )
2019-12-10 02:26:41 +00:00
if a . State . Check ( checkID ) != nil {
2015-01-15 18:51:00 +00:00
return nil
2015-01-15 08:16:34 +00:00
}
2015-01-21 20:21:57 +00:00
// Use default notes if no reason provided
if reason == "" {
2015-01-21 22:45:09 +00:00
reason = defaultServiceMaintReason
2015-01-21 20:21:57 +00:00
}
2015-01-15 08:16:34 +00:00
// Create and register the critical health check
check := & structs . HealthCheck {
2019-12-10 02:26:41 +00:00
Node : a . config . NodeName ,
CheckID : checkID . ID ,
Name : "Service Maintenance Mode" ,
Notes : reason ,
ServiceID : service . ID ,
ServiceName : service . Service ,
Status : api . HealthCritical ,
Type : "maintenance" ,
EnterpriseMeta : checkID . EnterpriseMeta ,
2015-01-15 08:16:34 +00:00
}
2018-10-11 12:22:11 +00:00
a . AddCheck ( check , nil , true , token , ConfigSourceLocal )
2020-01-28 23:50:41 +00:00
a . logger . Info ( "Service entered maintenance mode" , "service" , serviceID . String ( ) )
2015-01-15 08:16:34 +00:00
return nil
}
2015-01-15 08:25:36 +00:00
// DisableServiceMaintenance will deregister the fake maintenance mode check
// if the service has been marked as in maintenance.
2019-12-10 02:26:41 +00:00
func ( a * Agent ) DisableServiceMaintenance ( serviceID structs . ServiceID ) error {
if a . State . Service ( serviceID ) == nil {
return fmt . Errorf ( "No service registered with ID %q" , serviceID . String ( ) )
2015-01-15 08:16:34 +00:00
}
2015-01-15 20:20:57 +00:00
// Check if maintenance mode is enabled
checkID := serviceMaintCheckID ( serviceID )
2019-12-10 02:26:41 +00:00
if a . State . Check ( checkID ) == nil {
// maintenance mode is not enabled
2015-01-15 20:20:57 +00:00
return nil
}
2015-01-15 08:16:34 +00:00
// Deregister the maintenance check
2015-01-15 20:20:57 +00:00
a . RemoveCheck ( checkID , true )
2020-01-28 23:50:41 +00:00
a . logger . Info ( "Service left maintenance mode" , "service" , serviceID . String ( ) )
2015-01-15 20:20:57 +00:00
2015-01-15 08:16:34 +00:00
return nil
}
2015-01-15 19:20:22 +00:00
// EnableNodeMaintenance places a node into maintenance mode.
2015-09-10 18:43:59 +00:00
func ( a * Agent ) EnableNodeMaintenance ( reason , token string ) {
2015-01-15 19:20:22 +00:00
// Ensure node maintenance is not already enabled
2019-12-10 02:26:41 +00:00
if a . State . Check ( structs . NodeMaintCheckID ) != nil {
2015-01-15 19:20:22 +00:00
return
}
2015-01-21 20:21:57 +00:00
// Use a default notes value
if reason == "" {
2015-01-21 22:45:09 +00:00
reason = defaultNodeMaintReason
2015-01-21 20:21:57 +00:00
}
2015-01-15 19:20:22 +00:00
// Create and register the node maintenance check
check := & structs . HealthCheck {
Node : a . config . NodeName ,
2016-11-29 21:15:20 +00:00
CheckID : structs . NodeMaint ,
2015-01-15 19:20:22 +00:00
Name : "Node Maintenance Mode" ,
2015-01-21 20:21:57 +00:00
Notes : reason ,
2017-04-19 23:00:11 +00:00
Status : api . HealthCritical ,
2019-10-17 18:33:11 +00:00
Type : "maintenance" ,
2015-01-15 19:20:22 +00:00
}
2018-10-11 12:22:11 +00:00
a . AddCheck ( check , nil , true , token , ConfigSourceLocal )
2020-01-28 23:50:41 +00:00
a . logger . Info ( "Node entered maintenance mode" )
2015-01-15 19:20:22 +00:00
}
// DisableNodeMaintenance removes a node from maintenance mode
func ( a * Agent ) DisableNodeMaintenance ( ) {
2019-12-10 02:26:41 +00:00
if a . State . Check ( structs . NodeMaintCheckID ) == nil {
2015-01-15 20:20:57 +00:00
return
}
2019-12-10 02:26:41 +00:00
a . RemoveCheck ( structs . NodeMaintCheckID , true )
2020-01-28 23:50:41 +00:00
a . logger . Info ( "Node left maintenance mode" )
2015-01-15 19:20:22 +00:00
}
2015-11-12 17:19:33 +00:00
2018-04-08 21:28:29 +00:00
func ( a * Agent ) loadLimits ( conf * config . RuntimeConfig ) {
2018-06-11 19:51:17 +00:00
a . config . RPCRateLimit = conf . RPCRateLimit
a . config . RPCMaxBurst = conf . RPCMaxBurst
2018-04-08 21:28:29 +00:00
}
2020-06-10 20:47:35 +00:00
// ReloadConfig will atomically reload all configuration, including
// all services, checks, tokens, metadata, dnsServer configs, etc.
2020-04-01 20:52:23 +00:00
// It will also reload all ongoing watches.
2020-06-10 20:47:35 +00:00
func ( a * Agent ) ReloadConfig ( ) error {
newCfg , err := a . autoConf . ReadConfig ( )
if err != nil {
return err
}
// copy over the existing node id, this cannot be
// changed while running anyways but this prevents
// breaking some existing behavior.
newCfg . NodeID = a . config . NodeID
return a . reloadConfigInternal ( newCfg )
}
// reloadConfigInternal is mainly needed for some unit tests. Instead of parsing
// the configuration using CLI flags and on disk config, this just takes a
// runtime configuration and applies it.
func ( a * Agent ) reloadConfigInternal ( newCfg * config . RuntimeConfig ) error {
2020-04-02 07:59:23 +00:00
if err := a . CheckSecurity ( newCfg ) ; err != nil {
a . logger . Error ( "Security error while reloading configuration: %#v" , err )
return err
}
2020-06-10 20:47:35 +00:00
// Change the log level and update it
if logging . ValidateLogLevel ( newCfg . LogLevel ) {
a . logger . SetLevel ( logging . LevelFromString ( newCfg . LogLevel ) )
} else {
a . logger . Warn ( "Invalid log level in new configuration" , "level" , newCfg . LogLevel )
newCfg . LogLevel = a . config . LogLevel
}
2017-06-02 12:56:49 +00:00
// Bulk update the services and checks
a . PauseSync ( )
defer a . ResumeSync ( )
2019-03-04 14:34:05 +00:00
a . stateLock . Lock ( )
defer a . stateLock . Unlock ( )
2019-07-17 19:06:50 +00:00
// Snapshot the current state, and use that to initialize the checks when
// they are recreated.
2017-06-02 12:56:49 +00:00
snap := a . snapshotCheckState ( )
// First unload all checks, services, and metadata. This lets us begin the reload
// with a clean slate.
if err := a . unloadServices ( ) ; err != nil {
2017-06-24 19:52:41 +00:00
return fmt . Errorf ( "Failed unloading services: %s" , err )
2017-06-02 12:56:49 +00:00
}
if err := a . unloadChecks ( ) ; err != nil {
2017-06-24 19:52:41 +00:00
return fmt . Errorf ( "Failed unloading checks: %s" , err )
2017-06-02 12:56:49 +00:00
}
a . unloadMetadata ( )
2019-02-27 19:28:31 +00:00
// Reload tokens - should be done before all the other loading
// to ensure the correct tokens are available for attaching to
// the checks and service registrations.
a . loadTokens ( newCfg )
2020-02-04 20:58:56 +00:00
a . loadEnterpriseTokens ( newCfg )
2019-02-27 19:28:31 +00:00
2019-03-13 09:29:06 +00:00
if err := a . tlsConfigurator . Update ( newCfg . ToTLSUtilConfig ( ) ) ; err != nil {
return fmt . Errorf ( "Failed reloading tls configuration: %s" , err )
}
2017-06-02 12:56:49 +00:00
// Reload service/check definitions and metadata.
2020-03-09 11:59:41 +00:00
if err := a . loadServices ( newCfg , snap ) ; err != nil {
2017-06-24 19:52:41 +00:00
return fmt . Errorf ( "Failed reloading services: %s" , err )
2017-06-02 12:56:49 +00:00
}
2019-07-17 19:06:50 +00:00
if err := a . loadChecks ( newCfg , snap ) ; err != nil {
2017-06-24 19:52:41 +00:00
return fmt . Errorf ( "Failed reloading checks: %s" , err )
2017-06-02 12:56:49 +00:00
}
if err := a . loadMetadata ( newCfg ) ; err != nil {
2017-06-24 19:52:41 +00:00
return fmt . Errorf ( "Failed reloading metadata: %s" , err )
2017-06-02 12:56:49 +00:00
}
2017-06-24 19:52:41 +00:00
if err := a . reloadWatches ( newCfg ) ; err != nil {
return fmt . Errorf ( "Failed reloading watches: %v" , err )
2017-06-02 12:56:49 +00:00
}
2018-06-11 19:51:17 +00:00
a . loadLimits ( newCfg )
2020-01-31 16:19:37 +00:00
a . httpConnLimiter . SetConfig ( connlimit . Config {
MaxConnsPerClientIP : newCfg . HTTPMaxConnsPerClient ,
} )
2019-04-24 18:11:54 +00:00
for _ , s := range a . dnsServers {
if err := s . ReloadConfig ( newCfg ) ; err != nil {
return fmt . Errorf ( "Failed reloading dns config : %v" , err )
}
}
2019-04-26 18:25:03 +00:00
// this only gets used by the consulConfig function and since
// that is only ever done during init and reload here then
// an in place modification is safe as reloads cannot be
2020-01-31 16:19:37 +00:00
// concurrent due to both gaining a full lock on the stateLock
2019-04-26 18:25:03 +00:00
a . config . ConfigEntryBootstrap = newCfg . ConfigEntryBootstrap
2020-04-16 22:07:52 +00:00
err := a . reloadEnterprise ( newCfg )
if err != nil {
return err
}
2018-06-11 19:51:17 +00:00
// create the config for the rpc server/client
consulCfg , err := a . consulConfig ( )
if err != nil {
return err
}
if err := a . delegate . ReloadConfig ( consulCfg ) ; err != nil {
return err
}
2018-04-08 21:28:29 +00:00
2017-08-08 19:33:30 +00:00
// Update filtered metrics
2018-06-14 12:52:48 +00:00
metrics . UpdateFilter ( newCfg . Telemetry . AllowedPrefixes ,
newCfg . Telemetry . BlockedPrefixes )
2017-08-08 19:33:30 +00:00
2017-08-28 12:17:13 +00:00
a . State . SetDiscardCheckOutput ( newCfg . DiscardCheckOutput )
2017-10-11 00:04:52 +00:00
2017-06-24 19:52:41 +00:00
return nil
2017-06-02 12:56:49 +00:00
}
2018-04-11 08:52:51 +00:00
2019-09-26 02:55:52 +00:00
// LocalBlockingQuery performs a blocking query in a generic way against
// local agent state that has no RPC or raft to back it. It uses `hash` parameter
// instead of an `index`.
// `alwaysBlock` determines whether we block if the provided hash is empty.
// Callers like the AgentService endpoint will want to return the current result if a hash isn't provided.
// On the other hand, for cache notifications we always want to block. This avoids an empty first response.
func ( a * Agent ) LocalBlockingQuery ( alwaysBlock bool , hash string , wait time . Duration ,
fn func ( ws memdb . WatchSet ) ( string , interface { } , error ) ) ( string , interface { } , error ) {
// If we are not blocking we can skip tracking and allocating - nil WatchSet
// is still valid to call Add on and will just be a no op.
var ws memdb . WatchSet
var timeout * time . Timer
if alwaysBlock || hash != "" {
if wait == 0 {
wait = defaultQueryTime
}
if wait > 10 * time . Minute {
wait = maxQueryTime
}
// Apply a small amount of jitter to the request.
wait += lib . RandomStagger ( wait / 16 )
timeout = time . NewTimer ( wait )
}
for {
// Must reset this every loop in case the Watch set is already closed but
// hash remains same. In that case we'll need to re-block on ws.Watch()
// again.
ws = memdb . NewWatchSet ( )
curHash , curResp , err := fn ( ws )
if err != nil {
return "" , curResp , err
}
// Return immediately if there is no timeout, the hash is different or the
// Watch returns true (indicating timeout fired). Note that Watch on a nil
// WatchSet immediately returns false which would incorrectly cause this to
// loop and repeat again, however we rely on the invariant that ws == nil
// IFF timeout == nil in which case the Watch call is never invoked.
if timeout == nil || hash != curHash || ws . Watch ( timeout . C ) {
return curHash , curResp , err
}
// Watch returned false indicating a change was detected, loop and repeat
// the callback to load the new value. If agent sync is paused it means
// local state is currently being bulk-edited e.g. config reload. In this
// case it's likely that local state just got unloaded and may or may not be
// reloaded yet. Wait a short amount of time for Sync to resume to ride out
// typical config reloads.
if syncPauseCh := a . SyncPausedCh ( ) ; syncPauseCh != nil {
select {
case <- syncPauseCh :
case <- timeout . C :
}
}
}
}
2018-04-11 08:52:51 +00:00
// registerCache configures the cache and registers all the supported
// types onto the cache. This is NOT safe to call multiple times so
// care should be taken to call this exactly once after the cache
// field has been initialized.
func ( a * Agent ) registerCache ( ) {
2018-09-06 10:34:28 +00:00
// Note that you should register the _agent_ as the RPC implementation and not
// the a.delegate directly, otherwise tests that rely on overriding RPC
// routing via a.registerEndpoint will not work.
2020-04-14 22:29:30 +00:00
a . cache . RegisterType ( cachetype . ConnectCARootName , & cachetype . ConnectCARoot { RPC : a } )
2018-04-17 23:26:58 +00:00
2018-04-30 21:23:49 +00:00
a . cache . RegisterType ( cachetype . ConnectCALeafName , & cachetype . ConnectCALeaf {
2019-01-10 12:46:11 +00:00
RPC : a ,
Cache : a . cache ,
Datacenter : a . config . Datacenter ,
TestOverrideCAChangeInitialDelay : a . config . ConnectTestCALeafRootChangeSpread ,
2018-04-30 21:23:49 +00:00
} )
2020-04-14 22:29:30 +00:00
a . cache . RegisterType ( cachetype . IntentionMatchName , & cachetype . IntentionMatch { RPC : a } )
2018-09-06 10:34:28 +00:00
2020-04-14 22:29:30 +00:00
a . cache . RegisterType ( cachetype . CatalogServicesName , & cachetype . CatalogServices { RPC : a } )
2018-09-06 10:34:28 +00:00
2020-04-14 22:29:30 +00:00
a . cache . RegisterType ( cachetype . HealthServicesName , & cachetype . HealthServices { RPC : a } )
2018-09-06 10:34:28 +00:00
2020-04-14 22:29:30 +00:00
a . cache . RegisterType ( cachetype . PreparedQueryName , & cachetype . PreparedQuery { RPC : a } )
2019-02-25 19:06:01 +00:00
2020-04-14 22:29:30 +00:00
a . cache . RegisterType ( cachetype . NodeServicesName , & cachetype . NodeServices { RPC : a } )
2019-04-23 06:39:02 +00:00
2020-04-14 22:29:30 +00:00
a . cache . RegisterType ( cachetype . ResolvedServiceConfigName , & cachetype . ResolvedServiceConfig { RPC : a } )
2019-06-24 18:11:34 +00:00
2020-04-14 22:29:30 +00:00
a . cache . RegisterType ( cachetype . CatalogListServicesName , & cachetype . CatalogListServices { RPC : a } )
2019-06-24 18:11:34 +00:00
2020-04-14 22:29:30 +00:00
a . cache . RegisterType ( cachetype . CatalogServiceListName , & cachetype . CatalogServiceList { RPC : a } )
2020-01-24 15:04:58 +00:00
2020-04-14 22:29:30 +00:00
a . cache . RegisterType ( cachetype . CatalogDatacentersName , & cachetype . CatalogDatacenters { RPC : a } )
2019-06-20 19:04:39 +00:00
2020-04-14 22:29:30 +00:00
a . cache . RegisterType ( cachetype . InternalServiceDumpName , & cachetype . InternalServiceDump { RPC : a } )
2019-07-02 03:10:51 +00:00
2020-04-14 22:29:30 +00:00
a . cache . RegisterType ( cachetype . CompiledDiscoveryChainName , & cachetype . CompiledDiscoveryChain { RPC : a } )
2019-07-02 00:45:42 +00:00
2020-04-14 22:29:30 +00:00
a . cache . RegisterType ( cachetype . GatewayServicesName , & cachetype . GatewayServices { RPC : a } )
2020-04-16 21:00:48 +00:00
2020-04-14 22:29:30 +00:00
a . cache . RegisterType ( cachetype . ConfigEntriesName , & cachetype . ConfigEntries { RPC : a } )
2019-09-26 02:55:52 +00:00
2020-04-27 23:36:20 +00:00
a . cache . RegisterType ( cachetype . ConfigEntryName , & cachetype . ConfigEntry { RPC : a } )
2020-04-14 22:29:30 +00:00
a . cache . RegisterType ( cachetype . ServiceHTTPChecksName , & cachetype . ServiceHTTPChecks { Agent : a } )
2020-03-09 20:59:02 +00:00
2020-04-14 22:29:30 +00:00
a . cache . RegisterType ( cachetype . FederationStateListMeshGatewaysName ,
& cachetype . FederationStateListMeshGateways { RPC : a } )
2019-09-26 02:55:52 +00:00
}
2020-04-01 20:52:23 +00:00
// LocalState returns the agent's local state
2019-09-26 02:55:52 +00:00
func ( a * Agent ) LocalState ( ) * local . State {
return a . State
}
// rerouteExposedChecks will inject proxy address into check targets
// Future calls to check() will dial the proxy listener
// The agent stateLock MUST be held for this to be called
2019-12-10 02:26:41 +00:00
func ( a * Agent ) rerouteExposedChecks ( serviceID structs . ServiceID , proxyAddr string ) error {
for cid , c := range a . checkHTTPs {
2019-09-26 02:55:52 +00:00
if c . ServiceID != serviceID {
continue
}
2019-12-10 02:26:41 +00:00
port , err := a . listenerPortLocked ( serviceID , cid )
2019-09-26 02:55:52 +00:00
if err != nil {
return err
}
c . ProxyHTTP = httpInjectAddr ( c . HTTP , proxyAddr , port )
}
2019-12-10 02:26:41 +00:00
for cid , c := range a . checkGRPCs {
2019-09-26 02:55:52 +00:00
if c . ServiceID != serviceID {
continue
}
2019-12-10 02:26:41 +00:00
port , err := a . listenerPortLocked ( serviceID , cid )
2019-09-26 02:55:52 +00:00
if err != nil {
return err
}
c . ProxyGRPC = grpcInjectAddr ( c . GRPC , proxyAddr , port )
}
return nil
}
// resetExposedChecks will set Proxy addr in HTTP checks to empty string
// Future calls to check() will use the original target c.HTTP or c.GRPC
// The agent stateLock MUST be held for this to be called
2019-12-10 02:26:41 +00:00
func ( a * Agent ) resetExposedChecks ( serviceID structs . ServiceID ) {
ids := make ( [ ] structs . CheckID , 0 )
for cid , c := range a . checkHTTPs {
2019-09-26 02:55:52 +00:00
if c . ServiceID == serviceID {
c . ProxyHTTP = ""
2019-12-10 02:26:41 +00:00
ids = append ( ids , cid )
2019-09-26 02:55:52 +00:00
}
}
2019-12-10 02:26:41 +00:00
for cid , c := range a . checkGRPCs {
2019-09-26 02:55:52 +00:00
if c . ServiceID == serviceID {
c . ProxyGRPC = ""
2019-12-10 02:26:41 +00:00
ids = append ( ids , cid )
2019-09-26 02:55:52 +00:00
}
}
for _ , checkID := range ids {
delete ( a . exposedPorts , listenerPortKey ( serviceID , checkID ) )
}
}
// listenerPort allocates a port from the configured range
// The agent stateLock MUST be held when this is called
2019-12-10 02:26:41 +00:00
func ( a * Agent ) listenerPortLocked ( svcID structs . ServiceID , checkID structs . CheckID ) ( int , error ) {
2019-09-26 02:55:52 +00:00
key := listenerPortKey ( svcID , checkID )
if a . exposedPorts == nil {
a . exposedPorts = make ( map [ string ] int )
}
if p , ok := a . exposedPorts [ key ] ; ok {
return p , nil
}
allocated := make ( map [ int ] bool )
for _ , v := range a . exposedPorts {
allocated [ v ] = true
}
var port int
for i := 0 ; i < a . config . ExposeMaxPort - a . config . ExposeMinPort ; i ++ {
port = a . config . ExposeMinPort + i
if ! allocated [ port ] {
a . exposedPorts [ key ] = port
break
}
}
if port == 0 {
return 0 , fmt . Errorf ( "no ports available to expose '%s'" , checkID )
}
return port , nil
}
2019-12-10 02:26:41 +00:00
func listenerPortKey ( svcID structs . ServiceID , checkID structs . CheckID ) string {
2019-09-26 02:55:52 +00:00
return fmt . Sprintf ( "%s:%s" , svcID , checkID )
}
// grpcInjectAddr injects an ip and port into an address of the form: ip:port[/service]
func grpcInjectAddr ( existing string , ip string , port int ) string {
portRepl := fmt . Sprintf ( "${1}:%d${3}" , port )
out := grpcAddrRE . ReplaceAllString ( existing , portRepl )
addrRepl := fmt . Sprintf ( "%s${2}${3}" , ip )
out = grpcAddrRE . ReplaceAllString ( out , addrRepl )
return out
}
// httpInjectAddr injects a port then an IP into a URL
func httpInjectAddr ( url string , ip string , port int ) string {
portRepl := fmt . Sprintf ( "${1}${2}:%d${4}${5}" , port )
out := httpAddrRE . ReplaceAllString ( url , portRepl )
// Ensure that ipv6 addr is enclosed in brackets (RFC 3986)
ip = fixIPv6 ( ip )
addrRepl := fmt . Sprintf ( "${1}%s${3}${4}${5}" , ip )
out = httpAddrRE . ReplaceAllString ( out , addrRepl )
return out
}
func fixIPv6 ( address string ) string {
if strings . Count ( address , ":" ) < 2 {
return address
}
if ! strings . HasSuffix ( address , "]" ) {
address = address + "]"
}
if ! strings . HasPrefix ( address , "[" ) {
address = "[" + address
}
return address
2018-04-11 08:52:51 +00:00
}
2019-09-24 15:04:48 +00:00
// defaultIfEmpty returns the value if not empty otherwise the default value.
func defaultIfEmpty ( val , defaultVal string ) string {
if val != "" {
return val
}
return defaultVal
}