2013-12-20 01:14:46 +00:00
package agent
2013-12-20 23:33:13 +00:00
import (
2017-05-19 09:53:41 +00:00
"context"
2017-05-24 13:22:56 +00:00
"crypto/tls"
2014-11-24 08:36:03 +00:00
"encoding/json"
2013-12-20 23:33:13 +00:00
"fmt"
2013-12-21 00:39:32 +00:00
"io"
2015-06-04 21:33:30 +00:00
"io/ioutil"
2014-01-01 00:45:13 +00:00
"net"
2017-05-19 09:53:41 +00:00
"net/http"
2013-12-21 00:39:32 +00:00
"os"
2014-09-06 00:22:33 +00:00
"path/filepath"
2022-03-31 19:11:49 +00:00
"reflect"
2019-09-26 02:55:52 +00:00
"regexp"
2014-02-24 00:42:39 +00:00
"strconv"
2016-12-02 05:35:38 +00:00
"strings"
2013-12-21 00:39:32 +00:00
"sync"
2015-06-05 23:17:07 +00:00
"time"
2014-06-16 21:36:12 +00:00
2020-09-30 21:38:13 +00:00
"github.com/armon/go-metrics"
2020-12-09 14:16:53 +00:00
"github.com/armon/go-metrics/prometheus"
2020-01-31 16:19:37 +00:00
"github.com/hashicorp/go-connlimit"
2020-01-28 23:50:41 +00:00
"github.com/hashicorp/go-hclog"
2019-10-04 21:10:02 +00:00
"github.com/hashicorp/go-memdb"
2020-09-30 21:38:13 +00:00
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/raft"
"github.com/hashicorp/serf/serf"
"golang.org/x/net/http2"
2018-10-03 19:37:53 +00:00
"google.golang.org/grpc"
2017-08-23 14:52:48 +00:00
"github.com/hashicorp/consul/acl"
2022-06-17 09:24:43 +00:00
"github.com/hashicorp/consul/acl/resolver"
2017-08-28 12:17:09 +00:00
"github.com/hashicorp/consul/agent/ae"
2018-04-11 08:52:51 +00:00
"github.com/hashicorp/consul/agent/cache"
2019-02-25 19:06:01 +00:00
cachetype "github.com/hashicorp/consul/agent/cache-types"
2017-10-25 09:18:07 +00:00
"github.com/hashicorp/consul/agent/checks"
2017-09-25 18:40:42 +00:00
"github.com/hashicorp/consul/agent/config"
pkg refactor
command/agent/* -> agent/*
command/consul/* -> agent/consul/*
command/agent/command{,_test}.go -> command/agent{,_test}.go
command/base/command.go -> command/base.go
command/base/* -> command/*
commands.go -> command/commands.go
The script which did the refactor is:
(
cd $GOPATH/src/github.com/hashicorp/consul
git mv command/agent/command.go command/agent.go
git mv command/agent/command_test.go command/agent_test.go
git mv command/agent/flag_slice_value{,_test}.go command/
git mv command/agent .
git mv command/base/command.go command/base.go
git mv command/base/config_util{,_test}.go command/
git mv commands.go command/
git mv consul agent
rmdir command/base/
gsed -i -e 's|package agent|package command|' command/agent{,_test}.go
gsed -i -e 's|package agent|package command|' command/flag_slice_value{,_test}.go
gsed -i -e 's|package base|package command|' command/base.go command/config_util{,_test}.go
gsed -i -e 's|package main|package command|' command/commands.go
gsed -i -e 's|base.Command|BaseCommand|' command/commands.go
gsed -i -e 's|agent.Command|AgentCommand|' command/commands.go
gsed -i -e 's|\tCommand:|\tBaseCommand:|' command/commands.go
gsed -i -e 's|base\.||' command/commands.go
gsed -i -e 's|command\.||' command/commands.go
gsed -i -e 's|command|c|' main.go
gsed -i -e 's|range Commands|range command.Commands|' main.go
gsed -i -e 's|Commands: Commands|Commands: command.Commands|' main.go
gsed -i -e 's|base\.BoolValue|BoolValue|' command/operator_autopilot_set.go
gsed -i -e 's|base\.DurationValue|DurationValue|' command/operator_autopilot_set.go
gsed -i -e 's|base\.StringValue|StringValue|' command/operator_autopilot_set.go
gsed -i -e 's|base\.UintValue|UintValue|' command/operator_autopilot_set.go
gsed -i -e 's|\bCommand\b|BaseCommand|' command/base.go
gsed -i -e 's|BaseCommand Options|Command Options|' command/base.go
gsed -i -e 's|base.Command|BaseCommand|' command/*.go
gsed -i -e 's|c\.Command|c.BaseCommand|g' command/*.go
gsed -i -e 's|\tCommand:|\tBaseCommand:|' command/*_test.go
gsed -i -e 's|base\.||' command/*_test.go
gsed -i -e 's|\bCommand\b|AgentCommand|' command/agent{,_test}.go
gsed -i -e 's|cmd.AgentCommand|cmd.BaseCommand|' command/agent.go
gsed -i -e 's|cli.AgentCommand = new(Command)|cli.Command = new(AgentCommand)|' command/agent_test.go
gsed -i -e 's|exec.AgentCommand|exec.Command|' command/agent_test.go
gsed -i -e 's|exec.BaseCommand|exec.Command|' command/agent_test.go
gsed -i -e 's|NewTestAgent|agent.NewTestAgent|' command/agent_test.go
gsed -i -e 's|= TestConfig|= agent.TestConfig|' command/agent_test.go
gsed -i -e 's|: RetryJoin|: agent.RetryJoin|' command/agent_test.go
gsed -i -e 's|\.\./\.\./|../|' command/config_util_test.go
gsed -i -e 's|\bverifyUniqueListeners|VerifyUniqueListeners|' agent/config{,_test}.go command/agent.go
gsed -i -e 's|\bserfLANKeyring\b|SerfLANKeyring|g' agent/{agent,keyring,testagent}.go command/agent.go
gsed -i -e 's|\bserfWANKeyring\b|SerfWANKeyring|g' agent/{agent,keyring,testagent}.go command/agent.go
gsed -i -e 's|\bNewAgent\b|agent.New|g' command/agent{,_test}.go
gsed -i -e 's|\bNewAgent|New|' agent/{acl_test,agent,testagent}.go
gsed -i -e 's|\bAgent\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bBool\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bConfig\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bDefaultConfig\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bDevConfig\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bMergeConfig\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bReadConfigPaths\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bParseMetaPair\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bSerfLANKeyring\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bSerfWANKeyring\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|circonus\.agent|circonus|g' command/agent{,_test}.go
gsed -i -e 's|logger\.agent|logger|g' command/agent{,_test}.go
gsed -i -e 's|metrics\.agent|metrics|g' command/agent{,_test}.go
gsed -i -e 's|// agent.Agent|// agent|' command/agent{,_test}.go
gsed -i -e 's|a\.agent\.Config|a.Config|' command/agent{,_test}.go
gsed -i -e 's|agent\.AppendSliceValue|AppendSliceValue|' command/{configtest,validate}.go
gsed -i -e 's|consul/consul|agent/consul|' GNUmakefile
gsed -i -e 's|\.\./test|../../test|' agent/consul/server_test.go
# fix imports
f=$(grep -rl 'github.com/hashicorp/consul/command/agent' * | grep '\.go')
gsed -i -e 's|github.com/hashicorp/consul/command/agent|github.com/hashicorp/consul/agent|' $f
goimports -w $f
f=$(grep -rl 'github.com/hashicorp/consul/consul' * | grep '\.go')
gsed -i -e 's|github.com/hashicorp/consul/consul|github.com/hashicorp/consul/agent/consul|' $f
goimports -w $f
goimports -w command/*.go main.go
)
2017-06-09 22:28:28 +00:00
"github.com/hashicorp/consul/agent/consul"
2020-09-30 21:38:13 +00:00
"github.com/hashicorp/consul/agent/dns"
2022-07-13 15:33:48 +00:00
external "github.com/hashicorp/consul/agent/grpc-external"
2017-08-28 12:17:12 +00:00
"github.com/hashicorp/consul/agent/local"
2018-10-03 19:37:53 +00:00
"github.com/hashicorp/consul/agent/proxycfg"
2022-06-01 15:18:06 +00:00
proxycfgglue "github.com/hashicorp/consul/agent/proxycfg-glue"
2022-05-27 11:38:52 +00:00
catalogproxycfg "github.com/hashicorp/consul/agent/proxycfg-sources/catalog"
localproxycfg "github.com/hashicorp/consul/agent/proxycfg-sources/local"
2020-09-30 21:38:13 +00:00
"github.com/hashicorp/consul/agent/rpcclient/health"
2017-07-06 10:34:00 +00:00
"github.com/hashicorp/consul/agent/structs"
2017-06-21 04:43:55 +00:00
"github.com/hashicorp/consul/agent/systemd"
2020-09-30 21:38:13 +00:00
"github.com/hashicorp/consul/agent/token"
2018-10-03 19:37:53 +00:00
"github.com/hashicorp/consul/agent/xds"
2017-04-19 23:00:11 +00:00
"github.com/hashicorp/consul/api"
2019-04-26 16:33:01 +00:00
"github.com/hashicorp/consul/api/watch"
2017-05-15 20:10:36 +00:00
"github.com/hashicorp/consul/ipaddr"
2016-01-29 19:42:34 +00:00
"github.com/hashicorp/consul/lib"
2018-05-03 20:56:42 +00:00
"github.com/hashicorp/consul/lib/file"
2020-12-05 00:06:47 +00:00
"github.com/hashicorp/consul/lib/mutex"
2021-05-20 14:07:23 +00:00
"github.com/hashicorp/consul/lib/routine"
2020-01-28 23:50:41 +00:00
"github.com/hashicorp/consul/logging"
2022-06-01 15:18:06 +00:00
"github.com/hashicorp/consul/proto/pbpeering"
2019-02-26 15:52:07 +00:00
"github.com/hashicorp/consul/tlsutil"
2016-06-06 20:19:31 +00:00
"github.com/hashicorp/consul/types"
2013-12-20 23:33:13 +00:00
)
2014-11-24 08:36:03 +00:00
const (
// Path to save agent service definitions
2019-09-24 15:04:48 +00:00
servicesDir = "services"
serviceConfigDir = "services/configs"
2014-11-24 08:36:03 +00:00
2018-05-14 20:55:24 +00:00
// Path to save agent proxy definitions
proxyDir = "proxies"
2014-11-24 08:36:03 +00:00
// Path to save local agent checks
2015-06-05 23:17:07 +00:00
checksDir = "checks"
checkStateDir = "checks/state"
2015-01-16 20:39:15 +00:00
2015-01-21 22:45:09 +00:00
// Default reasons for node/service maintenance mode
defaultNodeMaintReason = "Maintenance mode is enabled for this node, " +
"but no reason was provided. This is a default message."
defaultServiceMaintReason = "Maintenance mode is enabled for this " +
"service, but no reason was provided. This is a default message."
2019-06-27 20:22:07 +00:00
// ID of the roots watch
rootsWatchID = "roots"
// ID of the leaf watch
leafWatchID = "leaf"
2019-09-26 02:55:52 +00:00
// maxQueryTime is used to bound the limit of a blocking query
maxQueryTime = 600 * time . Second
// defaultQueryTime is the amount of time we block waiting for a change
// if no time is specified. Previously we would wait the maxQueryTime.
defaultQueryTime = 300 * time . Second
)
var (
httpAddrRE = regexp . MustCompile ( ` ^(http[s]?://)(\[.*?\]|\[?[\w\-\.]+)(:\d+)?([^?]*)(\?.*)?$ ` )
grpcAddrRE = regexp . MustCompile ( "(.*)((?::)(?:[0-9]+))(.*)$" )
2014-11-24 08:36:03 +00:00
)
2018-10-11 12:22:11 +00:00
type configSource int
const (
ConfigSourceLocal configSource = iota
ConfigSourceRemote
)
2019-09-24 15:04:48 +00:00
var configSourceToName = map [ configSource ] string {
ConfigSourceLocal : "local" ,
ConfigSourceRemote : "remote" ,
}
var configSourceFromName = map [ string ] configSource {
"local" : ConfigSourceLocal ,
"remote" : ConfigSourceRemote ,
// If the value is not found in the persisted config file, then use the
// former default.
"" : ConfigSourceLocal ,
}
func ( s configSource ) String ( ) string {
return configSourceToName [ s ]
}
// ConfigSourceFromName will unmarshal the string form of a configSource.
func ConfigSourceFromName ( name string ) ( configSource , bool ) {
s , ok := configSourceFromName [ name ]
return s , ok
}
2017-06-15 09:42:07 +00:00
// delegate defines the interface shared by both
2017-05-15 14:05:17 +00:00
// consul.Client and consul.Server.
2017-06-15 09:42:07 +00:00
type delegate interface {
2021-10-26 20:08:55 +00:00
// Leave is used to prepare for a graceful shutdown.
2017-05-15 14:05:17 +00:00
Leave ( ) error
2021-10-26 20:08:55 +00:00
// AgentLocalMember is used to retrieve the LAN member for the local node.
AgentLocalMember ( ) serf . Member
// LANMembersInAgentPartition returns the LAN members for this agent's
// canonical serf pool. For clients this is the only pool that exists. For
// servers it's the pool in the default segment and the default partition.
LANMembersInAgentPartition ( ) [ ] serf . Member
// LANMembers returns the LAN members for one of:
//
// - the requested partition
// - the requested segment
// - all segments
//
// This is limited to segments and partitions that the node is a member of.
LANMembers ( f consul . LANMemberFilter ) ( [ ] serf . Member , error )
2021-11-15 15:51:14 +00:00
// GetLANCoordinate returns the coordinate of the node in the LAN gossip
// pool.
//
// - Clients return a single coordinate for the single gossip pool they are
// in (default, segment, or partition).
//
// - Servers return one coordinate for their canonical gossip pool (i.e.
// default partition/segment) and one per segment they are also ancillary
// members of.
//
// NOTE: servers do not emit coordinates for partitioned gossip pools they
// are ancillary members of.
//
// NOTE: This assumes coordinates are enabled, so check that before calling.
2021-10-26 20:08:55 +00:00
GetLANCoordinate ( ) ( lib . CoordinateSet , error )
// JoinLAN is used to have Consul join the inner-DC pool The target address
// should be another node inside the DC listening on the Serf LAN address
2022-04-05 21:10:06 +00:00
JoinLAN ( addrs [ ] string , entMeta * acl . EnterpriseMeta ) ( n int , err error )
2021-10-26 20:08:55 +00:00
// RemoveFailedNode is used to remove a failed node from the cluster.
2022-04-05 21:10:06 +00:00
RemoveFailedNode ( node string , prune bool , entMeta * acl . EnterpriseMeta ) error
2020-11-17 22:10:21 +00:00
// ResolveTokenAndDefaultMeta returns an acl.Authorizer which authorizes
// actions based on the permissions granted to the token.
// If either entMeta or authzContext are non-nil they will be populated with the
2021-10-26 19:20:57 +00:00
// default partition and namespace from the token.
2022-06-17 09:24:43 +00:00
ResolveTokenAndDefaultMeta ( token string , entMeta * acl . EnterpriseMeta , authzContext * acl . AuthorizerContext ) ( resolver . Result , error )
2020-11-17 22:10:21 +00:00
2017-05-15 14:05:17 +00:00
RPC ( method string , args interface { } , reply interface { } ) error
2017-06-15 09:50:28 +00:00
SnapshotRPC ( args * structs . SnapshotRequest , in io . Reader , out io . Writer , replyFn structs . SnapshotReplyFn ) error
2017-05-15 14:05:17 +00:00
Shutdown ( ) error
Stats ( ) map [ string ] map [ string ] string
2020-09-16 17:28:03 +00:00
ReloadConfig ( config consul . ReloadableConfig ) error
2018-05-24 14:36:42 +00:00
enterpriseDelegate
2017-05-15 14:05:17 +00:00
}
2015-02-09 17:22:51 +00:00
2017-06-21 04:43:55 +00:00
// notifier is called after a successful JoinLAN.
type notifier interface {
Notify ( string ) error
}
2020-01-27 19:54:32 +00:00
// Agent is the long running process that is run on every machine.
2017-05-15 14:05:17 +00:00
// It exposes an RPC interface that is used by the CLI to control the
// agent. The agent runs the query interfaces like HTTP, DNS, and RPC.
// However, it can run in either a client, or server mode. In server
// mode, it runs a full Consul server. In client-only mode, it only forwards
// requests to other Consul servers.
2013-12-20 01:14:46 +00:00
type Agent struct {
2020-09-14 22:31:07 +00:00
// TODO: remove fields that are already in BaseDeps
baseDeps BaseDeps
2020-06-10 20:47:35 +00:00
2017-05-23 17:04:06 +00:00
// config is the agent configuration.
2017-09-25 18:40:42 +00:00
config * config . RuntimeConfig
2013-12-20 23:33:13 +00:00
2013-12-21 00:39:32 +00:00
// Used for writing our logs
2020-01-28 23:50:41 +00:00
logger hclog . InterceptLogger
2013-12-21 00:39:32 +00:00
2017-05-15 14:05:17 +00:00
// delegate is either a *consul.Server or *consul.Client
// depending on the configuration
2017-06-15 09:42:07 +00:00
delegate delegate
2013-12-21 00:39:32 +00:00
2022-07-13 15:33:48 +00:00
// externalGRPCServer is the gRPC server exposed on the dedicated gRPC port (as
2022-03-22 12:40:24 +00:00
// opposed to the multiplexed "server" port).
2022-07-13 15:33:48 +00:00
externalGRPCServer * grpc . Server
2022-03-22 12:40:24 +00:00
2014-01-16 01:14:50 +00:00
// state stores a local representation of the node,
// services and checks. Used for anti-entropy.
2017-08-28 12:17:13 +00:00
State * local . State
2014-01-21 20:05:56 +00:00
2017-08-28 12:17:09 +00:00
// sync manages the synchronization of the local
// and the remote state.
sync * ae . StateSyncer
2018-09-27 14:00:51 +00:00
// syncMu and syncCh are used to coordinate agent endpoints that are blocking
// on local state during a config reload.
syncMu sync . Mutex
syncCh chan struct { }
2018-04-11 08:52:51 +00:00
// cache is the in-memory cache for data the Agent requests.
cache * cache . Cache
2016-08-16 07:05:55 +00:00
// checkReapAfter maps the check ID to a timeout after which we should
// reap its associated service
2019-12-10 02:26:41 +00:00
checkReapAfter map [ structs . CheckID ] time . Duration
2016-08-16 07:05:55 +00:00
2014-01-21 20:05:56 +00:00
// checkMonitors maps the check ID to an associated monitor
2019-12-10 02:26:41 +00:00
checkMonitors map [ structs . CheckID ] * checks . CheckMonitor
2015-01-09 22:43:24 +00:00
// checkHTTPs maps the check ID to an associated HTTP check
2019-12-10 02:26:41 +00:00
checkHTTPs map [ structs . CheckID ] * checks . CheckHTTP
2015-01-09 22:43:24 +00:00
2021-04-09 19:12:10 +00:00
// checkH2PINGs maps the check ID to an associated HTTP2 PING check
checkH2PINGs map [ structs . CheckID ] * checks . CheckH2PING
2015-07-23 11:45:08 +00:00
// checkTCPs maps the check ID to an associated TCP check
2019-12-10 02:26:41 +00:00
checkTCPs map [ structs . CheckID ] * checks . CheckTCP
2015-07-23 11:45:08 +00:00
2022-06-06 19:13:19 +00:00
// checkUDPs maps the check ID to an associated UDP check
checkUDPs map [ structs . CheckID ] * checks . CheckUDP
2017-12-27 04:35:22 +00:00
// checkGRPCs maps the check ID to an associated GRPC check
2019-12-10 02:26:41 +00:00
checkGRPCs map [ structs . CheckID ] * checks . CheckGRPC
2017-12-27 04:35:22 +00:00
2015-01-09 22:43:24 +00:00
// checkTTLs maps the check ID to an associated check TTL
2019-12-10 02:26:41 +00:00
checkTTLs map [ structs . CheckID ] * checks . CheckTTL
2015-01-09 22:43:24 +00:00
2015-10-22 22:29:13 +00:00
// checkDockers maps the check ID to an associated Docker Exec based check
2019-12-10 02:26:41 +00:00
checkDockers map [ structs . CheckID ] * checks . CheckDocker
2015-10-22 22:29:13 +00:00
2018-06-30 13:38:56 +00:00
// checkAliases maps the check ID to an associated Alias checks
2019-12-10 02:26:41 +00:00
checkAliases map [ structs . CheckID ] * checks . CheckAlias
2018-06-30 13:38:56 +00:00
2019-09-26 02:55:52 +00:00
// exposedPorts tracks listener ports for checks exposed through a proxy
exposedPorts map [ string ] int
2019-03-04 14:34:05 +00:00
// stateLock protects the agent state
2020-12-05 00:06:47 +00:00
stateLock * mutex . Mutex
2014-01-21 20:05:56 +00:00
2017-07-12 14:01:42 +00:00
// dockerClient is the client for performing docker health checks.
2017-10-25 09:18:07 +00:00
dockerClient * checks . DockerClient
2017-07-12 14:01:42 +00:00
2014-08-27 23:49:12 +00:00
// eventCh is used to receive user events
eventCh chan serf . UserEvent
2014-08-28 00:01:10 +00:00
// eventBuf stores the most recent events in a ring buffer
// using eventIndex as the next index to insert into. This
// is guarded by eventLock. When an insert happens, the
// eventNotify group is notified.
2014-08-28 17:56:30 +00:00
eventBuf [ ] * UserEvent
2014-08-28 00:01:10 +00:00
eventIndex int
eventLock sync . RWMutex
2017-06-15 16:45:30 +00:00
eventNotify NotifyGroup
2014-08-28 00:01:10 +00:00
2014-01-21 20:05:56 +00:00
shutdown bool
shutdownCh chan struct { }
shutdownLock sync . Mutex
2015-11-12 17:19:33 +00:00
2017-06-21 04:43:55 +00:00
// joinLANNotifier is called after a successful JoinLAN.
joinLANNotifier notifier
2017-06-02 09:55:29 +00:00
// retryJoinCh transports errors from the retry join
// attempts.
retryJoinCh chan error
2017-06-16 07:54:09 +00:00
// endpoints maps unique RPC endpoint names to common ones
// to allow overriding of RPC handlers since the golang
// net/rpc server does not allow this.
2017-05-22 22:00:14 +00:00
endpoints map [ string ] string
endpointsLock sync . RWMutex
2017-05-19 09:53:41 +00:00
// dnsServer provides the DNS API
dnsServers [ ] * DNSServer
2020-07-02 17:31:47 +00:00
// apiServers listening for connections. If any of these server goroutines
// fail, the agent will be shutdown.
apiServers * apiServers
2017-05-19 09:53:41 +00:00
2020-09-23 11:37:33 +00:00
// httpHandlers provides direct access to (one of) the HTTPHandlers started by
// this agent. This is used in tests to test HTTP endpoints without overhead
// of TCP connections etc.
//
// TODO: this is a temporary re-introduction after we removed a list of
// HTTPServers in favour of apiServers abstraction. Now that HTTPHandlers is
// stateful and has config reloading though it's not OK to just use a
// different instance of handlers in tests to the ones that the agent is wired
// up to since then config reloads won't actually affect the handlers under
// test while plumbing the external handlers in the TestAgent through bypasses
// testing that the agent itself is actually reloading the state correctly.
// Once we move `apiServers` to be a passed-in dependency for NewAgent, we
// should be able to remove this and have the Test Agent create the
// HTTPHandlers and pass them in removing the need to pull them back out
// again.
httpHandlers * HTTPHandlers
2017-05-19 09:53:41 +00:00
// wgServers is the wait group for all HTTP and DNS servers
2020-07-02 17:31:47 +00:00
// TODO: remove once dnsServers are handled by apiServers
2017-05-19 09:53:41 +00:00
wgServers sync . WaitGroup
2017-06-24 19:52:41 +00:00
// watchPlans tracks all the currently-running watch plans for the
// agent.
watchPlans [ ] * watch . Plan
2017-07-26 18:03:43 +00:00
// tokens holds ACL tokens initially from the configuration, but can
// be updated at runtime, so should always be used instead of going to
// the configuration directly.
tokens * token . Store
2018-05-02 18:38:18 +00:00
2018-10-03 19:37:53 +00:00
// proxyConfig is the manager for proxy service (Kind = connect-proxy)
// configuration state. This ensures all state needed by a proxy registration
// is maintained in cache and handles pushing updates to that state into XDS
2019-08-09 19:19:30 +00:00
// server to be pushed out to Envoy.
2018-10-03 19:37:53 +00:00
proxyConfig * proxycfg . Manager
2019-04-24 13:46:30 +00:00
// serviceManager is the manager for combining local service registrations with
// the centrally configured proxy/service defaults.
2019-04-18 04:35:19 +00:00
serviceManager * ServiceManager
2019-02-27 09:14:59 +00:00
// tlsConfigurator is the central instance to provide a *tls.Config
// based on the current consul configuration.
2019-02-26 15:52:07 +00:00
tlsConfigurator * tlsutil . Configurator
2019-02-27 19:28:31 +00:00
2020-01-31 16:19:37 +00:00
// httpConnLimiter is used to limit connections to the HTTP server by client
// IP.
httpConnLimiter connlimit . Limiter
2020-04-17 20:27:39 +00:00
2020-09-23 11:37:33 +00:00
// configReloaders are subcomponents that need to be notified on a reload so
// they can update their internal state.
configReloaders [ ] ConfigReloader
2020-09-30 21:38:13 +00:00
// TODO: pass directly to HTTPHandlers and DNSServer once those are passed
// into Agent, which will allow us to remove this field.
rpcClientHealth * health . Client
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
rpcClientPeering pbpeering . PeeringServiceClient
2021-05-20 14:07:23 +00:00
// routineManager is responsible for managing longer running go routines
// run by the Agent
routineManager * routine . Manager
2022-04-04 15:31:39 +00:00
// configFileWatcher is the watcher responsible to report events when a config file
2022-03-31 19:11:49 +00:00
// changed
2022-04-04 15:31:39 +00:00
configFileWatcher config . Watcher
2022-03-31 19:11:49 +00:00
2022-03-22 12:40:24 +00:00
// xdsServer serves the XDS protocol for configuring Envoy proxies.
xdsServer * xds . Server
2020-09-23 11:37:33 +00:00
// enterpriseAgent embeds fields that we only access in consul-enterprise builds
2020-04-17 20:27:39 +00:00
enterpriseAgent
2013-12-20 01:14:46 +00:00
}
2020-06-10 20:47:35 +00:00
// New process the desired options and creates a new Agent.
// This process will
// * parse the config given the config Flags
// * setup logging
// * using predefined logger given in an option
// OR
// * initialize a new logger from the configuration
// including setting up gRPC logging
// * initialize telemetry
// * create a TLS Configurator
// * build a shared connection pool
// * create the ServiceManager
// * setup the NodeID if one isn't provided in the configuration
// * create the AutoConfig object for future use in fully
// resolving the configuration
2020-08-08 01:08:43 +00:00
func New ( bd BaseDeps ) ( * Agent , error ) {
2019-06-27 20:22:07 +00:00
a := Agent {
2020-06-19 19:16:00 +00:00
checkReapAfter : make ( map [ structs . CheckID ] time . Duration ) ,
checkMonitors : make ( map [ structs . CheckID ] * checks . CheckMonitor ) ,
checkTTLs : make ( map [ structs . CheckID ] * checks . CheckTTL ) ,
checkHTTPs : make ( map [ structs . CheckID ] * checks . CheckHTTP ) ,
2021-04-09 19:12:10 +00:00
checkH2PINGs : make ( map [ structs . CheckID ] * checks . CheckH2PING ) ,
2020-06-19 19:16:00 +00:00
checkTCPs : make ( map [ structs . CheckID ] * checks . CheckTCP ) ,
2022-06-06 19:13:19 +00:00
checkUDPs : make ( map [ structs . CheckID ] * checks . CheckUDP ) ,
2020-06-19 19:16:00 +00:00
checkGRPCs : make ( map [ structs . CheckID ] * checks . CheckGRPC ) ,
checkDockers : make ( map [ structs . CheckID ] * checks . CheckDocker ) ,
checkAliases : make ( map [ structs . CheckID ] * checks . CheckAlias ) ,
eventCh : make ( chan serf . UserEvent , 1024 ) ,
eventBuf : make ( [ ] * UserEvent , 256 ) ,
joinLANNotifier : & systemd . Notifier { } ,
retryJoinCh : make ( chan error ) ,
shutdownCh : make ( chan struct { } ) ,
endpoints : make ( map [ string ] string ) ,
2020-12-05 00:06:47 +00:00
stateLock : mutex . New ( ) ,
2020-06-10 20:47:35 +00:00
2020-09-14 22:31:07 +00:00
baseDeps : bd ,
2020-08-08 01:08:43 +00:00
tokens : bd . Tokens ,
logger : bd . Logger ,
tlsConfigurator : bd . TLSConfigurator ,
config : bd . RuntimeConfig ,
cache : bd . Cache ,
2021-05-20 14:07:23 +00:00
routineManager : routine . NewManager ( bd . Logger ) ,
2019-06-27 20:22:07 +00:00
}
2020-06-10 20:47:35 +00:00
2021-04-20 22:14:46 +00:00
// TODO: create rpcClientHealth in BaseDeps once NetRPC is available without Agent
conn , err := bd . GRPCConnPool . ClientConn ( bd . RuntimeConfig . Datacenter )
if err != nil {
return nil , err
2020-10-05 21:31:35 +00:00
}
2021-04-20 22:14:46 +00:00
2020-10-26 15:55:49 +00:00
a . rpcClientHealth = & health . Client {
2021-04-20 22:14:46 +00:00
Cache : bd . Cache ,
NetRPC : & a ,
CacheName : cachetype . HealthServicesName ,
ViewStore : bd . ViewStore ,
MaterializerDeps : health . MaterializerDeps {
2021-04-22 17:40:12 +00:00
Conn : conn ,
2021-04-20 22:14:46 +00:00
Logger : bd . Logger . Named ( "rpcclient.health" ) ,
} ,
2021-06-28 20:48:10 +00:00
UseStreamingBackend : a . config . UseStreamingBackend ,
2021-07-27 21:55:00 +00:00
QueryOptionDefaults : config . ApplyDefaultQueryOptions ( a . config ) ,
2020-10-26 15:55:49 +00:00
}
2020-09-30 21:38:13 +00:00
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
a . rpcClientPeering = pbpeering . NewPeeringServiceClient ( conn )
2019-06-27 20:22:07 +00:00
a . serviceManager = NewServiceManager ( & a )
2017-06-30 21:56:05 +00:00
2020-07-28 19:31:48 +00:00
// We used to do this in the Start method. However it doesn't need to go
// there any longer. Originally it did because we passed the agent
// delegate to some of the cache registrations. Now we just
// pass the agent itself so its safe to move here.
a . registerCache ( )
2020-08-17 23:30:25 +00:00
// TODO: why do we ignore failure to load persisted tokens?
_ = a . tokens . Load ( bd . RuntimeConfig . ACLTokens , a . logger )
2020-08-31 17:12:17 +00:00
2020-07-02 17:31:47 +00:00
// TODO: pass in a fully populated apiServers into Agent.New
a . apiServers = NewAPIServers ( a . logger )
2022-03-31 19:11:49 +00:00
for _ , f := range [ ] struct {
Cfg tlsutil . ProtocolConfig
} {
{ a . baseDeps . RuntimeConfig . TLS . InternalRPC } ,
{ a . baseDeps . RuntimeConfig . TLS . GRPC } ,
{ a . baseDeps . RuntimeConfig . TLS . HTTPS } ,
} {
if f . Cfg . KeyFile != "" {
a . baseDeps . WatchedFiles = append ( a . baseDeps . WatchedFiles , f . Cfg . KeyFile )
}
if f . Cfg . CertFile != "" {
a . baseDeps . WatchedFiles = append ( a . baseDeps . WatchedFiles , f . Cfg . CertFile )
}
}
2022-04-04 15:31:39 +00:00
if a . baseDeps . RuntimeConfig . AutoReloadConfig && len ( a . baseDeps . WatchedFiles ) > 0 {
w , err := config . NewRateLimitedFileWatcher ( a . baseDeps . WatchedFiles , a . baseDeps . Logger , a . baseDeps . RuntimeConfig . AutoReloadConfigCoalesceInterval )
if err != nil {
return nil , err
}
a . configFileWatcher = w
}
2022-03-31 19:11:49 +00:00
2019-06-27 20:22:07 +00:00
return & a , nil
2017-05-19 09:53:41 +00:00
}
2016-12-02 05:35:38 +00:00
2020-06-10 20:47:35 +00:00
// GetConfig retrieves the agents config
// TODO make export the config field and get rid of this method
// This is here for now to simplify the work I am doing and make
// reviewing the final PR easier.
func ( a * Agent ) GetConfig ( ) * config . RuntimeConfig {
a . stateLock . Lock ( )
defer a . stateLock . Unlock ( )
return a . config
}
2020-01-27 19:54:32 +00:00
// LocalConfig takes a config.RuntimeConfig and maps the fields to a local.Config
2017-08-28 12:17:13 +00:00
func LocalConfig ( cfg * config . RuntimeConfig ) local . Config {
lc := local . Config {
AdvertiseAddr : cfg . AdvertiseAddrLAN . String ( ) ,
CheckUpdateInterval : cfg . CheckUpdateInterval ,
Datacenter : cfg . Datacenter ,
DiscardCheckOutput : cfg . DiscardCheckOutput ,
NodeID : cfg . NodeID ,
NodeName : cfg . NodeName ,
2021-08-19 20:09:42 +00:00
Partition : cfg . PartitionOrDefault ( ) ,
2017-08-28 12:17:13 +00:00
TaggedAddresses : map [ string ] string { } ,
}
for k , v := range cfg . TaggedAddresses {
lc . TaggedAddresses [ k ] = v
}
return lc
}
2020-01-27 19:54:32 +00:00
// Start verifies its configuration and runs an agent's various subprocesses.
2020-06-19 19:16:00 +00:00
func ( a * Agent ) Start ( ctx context . Context ) error {
2019-03-04 14:34:05 +00:00
a . stateLock . Lock ( )
defer a . stateLock . Unlock ( )
2020-06-10 20:47:35 +00:00
// This needs to be done early on as it will potentially alter the configuration
// and then how other bits are brought up
2020-09-14 22:31:07 +00:00
c , err := a . baseDeps . AutoConfig . InitialConfiguration ( ctx )
2020-06-10 20:47:35 +00:00
if err != nil {
return err
}
// copy over the existing node id, this cannot be
// changed while running anyways but this prevents
// breaking some existing behavior. then overwrite
// the configuration
c . NodeID = a . config . NodeID
a . config = c
2022-03-18 10:46:58 +00:00
if err := a . tlsConfigurator . Update ( a . config . TLS ) ; err != nil {
2020-06-10 20:47:35 +00:00
return fmt . Errorf ( "Failed to load TLS configurations after applying auto-config settings: %w" , err )
}
2017-05-19 09:53:41 +00:00
2022-03-22 12:40:24 +00:00
// This needs to happen after the initial auto-config is loaded, because TLS
// can only be configured on the gRPC server at the point of creation.
2022-07-13 15:33:48 +00:00
a . buildExternalGRPCServer ( )
2022-03-22 12:40:24 +00:00
2021-05-17 20:01:32 +00:00
if err := a . startLicenseManager ( ctx ) ; err != nil {
2021-05-11 14:50:03 +00:00
return err
}
2017-06-29 12:35:55 +00:00
// create the local state
2017-08-30 10:25:49 +00:00
a . State = local . NewState ( LocalConfig ( c ) , a . logger , a . tokens )
2017-08-28 12:17:09 +00:00
// create the state synchronization manager which performs
// regular and on-demand state synchronizations (anti-entropy).
2017-10-19 09:20:24 +00:00
a . sync = ae . NewStateSyncer ( a . State , c . AEInterval , a . shutdownCh , a . logger )
2017-06-29 12:35:55 +00:00
// create the config for the rpc server/client
2020-07-29 17:49:52 +00:00
consulCfg , err := newConsulConfig ( a . config , a . logger )
2017-06-29 12:35:55 +00:00
if err != nil {
return err
}
2020-07-29 17:49:52 +00:00
// Setup the user event callback
consulCfg . UserEventHandler = func ( e serf . UserEvent ) {
select {
case a . eventCh <- e :
case <- a . shutdownCh :
}
}
2017-08-28 12:17:09 +00:00
// ServerUp is used to inform that a new consul server is now
// up. This can be used to speed up the sync process if we are blocking
// waiting to discover a consul server
2017-08-30 10:25:49 +00:00
consulCfg . ServerUp = a . sync . SyncFull . Trigger
2017-06-29 12:35:55 +00:00
2020-04-16 22:07:52 +00:00
err = a . initEnterprise ( consulCfg )
if err != nil {
return fmt . Errorf ( "failed to start Consul enterprise component: %v" , err )
}
2019-12-06 20:35:58 +00:00
2016-08-16 07:05:55 +00:00
// Setup either the client or the server.
2017-09-25 18:40:42 +00:00
if c . ServerMode {
2022-07-13 15:33:48 +00:00
server , err := consul . NewServer ( consulCfg , a . baseDeps . Deps , a . externalGRPCServer )
2017-05-19 09:53:41 +00:00
if err != nil {
2017-06-29 12:35:55 +00:00
return fmt . Errorf ( "Failed to start Consul server: %v" , err )
2017-05-19 09:53:41 +00:00
}
a . delegate = server
2013-12-20 23:33:13 +00:00
} else {
2020-09-14 22:31:07 +00:00
client , err := consul . NewClient ( consulCfg , a . baseDeps . Deps )
2017-05-19 09:53:41 +00:00
if err != nil {
2017-06-29 12:35:55 +00:00
return fmt . Errorf ( "Failed to start Consul client: %v" , err )
2017-05-19 09:53:41 +00:00
}
a . delegate = client
2013-12-20 23:33:13 +00:00
}
2021-10-26 20:08:55 +00:00
// The staggering of the state syncing depends on the cluster size.
//
// NOTE: we will use the agent's canonical serf pool for this since that's
// similarly scoped with the state store side of anti-entropy.
a . sync . ClusterSize = func ( ) int { return len ( a . delegate . LANMembersInAgentPartition ( ) ) }
2017-08-30 10:25:49 +00:00
// link the state with the consul server/client and the state syncer
// via callbacks. After several attempts this was easier than using
// channels since the event notification needs to be non-blocking
// and that should be hidden in the state syncer implementation.
a . State . Delegate = a . delegate
a . State . TriggerSyncChanges = a . sync . SyncChanges . Trigger
2020-09-14 22:31:07 +00:00
if err := a . baseDeps . AutoConfig . Start ( & lib . StopChannelContext { StopCh : a . shutdownCh } ) ; err != nil {
2020-07-28 19:31:48 +00:00
return fmt . Errorf ( "AutoConf failed to start certificate monitor: %w" , err )
}
2019-09-24 15:04:48 +00:00
2017-01-05 22:10:26 +00:00
// Load checks/services/metadata.
2020-11-30 22:01:37 +00:00
emptyCheckSnapshot := map [ structs . CheckID ] * structs . HealthCheck { }
if err := a . loadServices ( c , emptyCheckSnapshot ) ; err != nil {
2017-05-19 09:53:41 +00:00
return err
2014-11-24 08:36:03 +00:00
}
2019-07-17 19:06:50 +00:00
if err := a . loadChecks ( c , nil ) ; err != nil {
2017-05-19 09:53:41 +00:00
return err
2014-11-24 08:36:03 +00:00
}
2017-05-19 09:53:41 +00:00
if err := a . loadMetadata ( c ) ; err != nil {
return err
2017-01-05 22:10:26 +00:00
}
2014-11-24 08:36:03 +00:00
2020-08-27 17:20:58 +00:00
var intentionDefaultAllow bool
2021-08-06 22:59:05 +00:00
switch a . config . ACLResolverSettings . ACLDefaultPolicy {
2020-08-27 17:20:58 +00:00
case "allow" :
intentionDefaultAllow = true
case "deny" :
intentionDefaultAllow = false
default :
2021-08-06 22:59:05 +00:00
return fmt . Errorf ( "unexpected ACL default policy value of %q" , a . config . ACLResolverSettings . ACLDefaultPolicy )
2020-08-27 17:20:58 +00:00
}
2021-04-20 22:14:46 +00:00
go a . baseDeps . ViewStore . Run ( & lib . StopChannelContext { StopCh : a . shutdownCh } )
2018-10-03 19:37:53 +00:00
// Start the proxy config manager.
a . proxyConfig , err = proxycfg . NewManager ( proxycfg . ManagerConfig {
proxycfg: server-local config entry data sources
This is the OSS portion of enterprise PR 2056.
This commit provides server-local implementations of the proxycfg.ConfigEntry
and proxycfg.ConfigEntryList interfaces, that source data from streaming events.
It makes use of the LocalMaterializer type introduced for peering replication,
adding the necessary support for authorization.
It also adds support for "wildcard" subscriptions (within a topic) to the event
publisher, as this is needed to fetch service-resolvers for all services when
configuring mesh gateways.
Currently, events will be emitted for just the ingress-gateway, service-resolver,
and mesh config entry types, as these are the only entries required by proxycfg
— the events will be emitted on topics named IngressGateway, ServiceResolver,
and MeshConfig topics respectively.
Though these events will only be consumed "locally" for now, they can also be
consumed via the gRPC endpoint (confirmed using grpcurl) so using them from
client agents should be a case of swapping the LocalMaterializer for an
RPCMaterializer.
2022-07-01 15:09:47 +00:00
DataSources : a . proxyDataSources ( ) ,
2022-06-01 15:18:06 +00:00
Logger : a . logger . Named ( logging . ProxyConfig ) ,
2018-10-03 19:37:53 +00:00
Source : & structs . QuerySource {
2021-08-19 20:09:42 +00:00
Datacenter : a . config . Datacenter ,
Segment : a . config . SegmentName ,
NodePartition : a . config . PartitionOrEmpty ( ) ,
2018-10-03 19:37:53 +00:00
} ,
2020-04-27 23:36:20 +00:00
DNSConfig : proxycfg . DNSConfig {
Domain : a . config . DNSDomain ,
AltDomain : a . config . DNSAltDomain ,
} ,
2020-08-27 17:20:58 +00:00
TLSConfigurator : a . tlsConfigurator ,
IntentionDefaultAllow : intentionDefaultAllow ,
2018-10-03 19:37:53 +00:00
} )
if err != nil {
return err
}
2022-05-27 11:38:52 +00:00
go localproxycfg . Sync (
& lib . StopChannelContext { StopCh : a . shutdownCh } ,
localproxycfg . SyncConfig {
Manager : a . proxyConfig ,
State : a . State ,
Logger : a . proxyConfig . Logger . Named ( "agent-state" ) ,
Tokens : a . baseDeps . Tokens ,
NodeName : a . config . NodeName ,
} ,
)
2018-10-03 19:37:53 +00:00
2016-08-16 07:05:55 +00:00
// Start watching for critical services to deregister, based on their
// checks.
2017-05-19 09:53:41 +00:00
go a . reapServices ( )
2016-08-16 07:05:55 +00:00
// Start handling events.
2017-05-19 09:53:41 +00:00
go a . handleEvents ( )
2014-08-27 23:49:12 +00:00
2015-06-06 03:31:33 +00:00
// Start sending network coordinate to the server.
2017-05-19 09:53:41 +00:00
if ! c . DisableCoordinates {
go a . sendCoordinate ( )
2015-06-06 03:31:33 +00:00
}
2016-08-16 07:05:55 +00:00
// Write out the PID file if necessary.
2017-05-19 09:53:41 +00:00
if err := a . storePid ( ) ; err != nil {
return err
2014-05-06 16:57:53 +00:00
}
2014-05-06 03:29:50 +00:00
2017-05-24 13:22:56 +00:00
// start DNS servers
if err := a . listenAndServeDNS ( ) ; err != nil {
return err
}
2020-01-31 16:19:37 +00:00
// Configure the http connection limiter.
a . httpConnLimiter . SetConfig ( connlimit . Config {
MaxConnsPerClientIP : a . config . HTTPMaxConnsPerClient ,
} )
2017-11-07 23:06:59 +00:00
// Create listeners and unstarted servers; see comment on listenHTTP why
// we are doing this.
servers , err := a . listenHTTP ( )
2017-05-24 13:22:56 +00:00
if err != nil {
return err
}
2017-11-07 23:06:59 +00:00
// Start HTTP and HTTPS servers.
for _ , srv := range servers {
2020-07-02 17:31:47 +00:00
a . apiServers . Start ( srv )
2017-05-24 13:22:56 +00:00
}
2017-06-02 09:55:29 +00:00
2021-09-08 15:48:41 +00:00
// Start gRPC server.
if err := a . listenAndServeGRPC ( ) ; err != nil {
2018-10-03 19:37:53 +00:00
return err
}
2017-06-09 08:03:49 +00:00
// register watches
2017-06-24 19:52:41 +00:00
if err := a . reloadWatches ( a . config ) ; err != nil {
2017-06-09 08:03:49 +00:00
return err
}
2017-06-02 09:55:29 +00:00
// start retry join
2017-08-19 08:44:19 +00:00
go a . retryJoinLAN ( )
2020-03-09 20:59:02 +00:00
if a . config . ServerMode {
go a . retryJoinWAN ( )
}
2017-06-02 09:55:29 +00:00
2021-08-04 17:05:10 +00:00
if a . tlsConfigurator . Cert ( ) != nil {
2021-10-27 19:23:29 +00:00
m := tlsCertExpirationMonitor ( a . tlsConfigurator , a . logger )
2021-08-04 17:05:10 +00:00
go m . Monitor ( & lib . StopChannelContext { StopCh : a . shutdownCh } )
}
2020-12-09 14:16:53 +00:00
// consul version metric with labels
metrics . SetGaugeWithLabels ( [ ] string { "version" } , 1 , [ ] metrics . Label {
2022-05-05 02:16:18 +00:00
{ Name : "version" , Value : a . config . VersionWithMetadata ( ) } ,
2020-12-09 14:16:53 +00:00
{ Name : "pre_release" , Value : a . config . VersionPrerelease } ,
} )
2022-03-31 19:11:49 +00:00
// start a go routine to reload config based on file watcher events
2022-04-04 15:31:39 +00:00
if a . configFileWatcher != nil {
a . baseDeps . Logger . Debug ( "starting file watcher" )
a . configFileWatcher . Start ( context . Background ( ) )
go func ( ) {
for event := range a . configFileWatcher . EventsCh ( ) {
a . baseDeps . Logger . Debug ( "auto-reload config triggered" , "num-events" , len ( event . Filenames ) )
err := a . AutoReloadConfig ( )
if err != nil {
a . baseDeps . Logger . Error ( "error loading config" , "error" , err )
2022-03-31 19:11:49 +00:00
}
2022-04-04 15:31:39 +00:00
}
} ( )
2022-03-31 19:11:49 +00:00
}
2022-04-04 15:31:39 +00:00
2017-05-24 13:22:56 +00:00
return nil
}
2020-12-09 14:16:53 +00:00
var Gauges = [ ] prometheus . GaugeDefinition {
{
Name : [ ] string { "version" } ,
Help : "Represents the Consul version." ,
} ,
}
2020-07-02 17:31:47 +00:00
// Failed returns a channel which is closed when the first server goroutine exits
// with a non-nil error.
func ( a * Agent ) Failed ( ) <- chan struct { } {
return a . apiServers . failed
}
2022-07-13 15:33:48 +00:00
func ( a * Agent ) buildExternalGRPCServer ( ) {
2022-07-15 18:15:50 +00:00
a . externalGRPCServer = external . NewServer ( a . logger . Named ( "grpc.external" ) , a . tlsConfigurator )
2022-03-22 12:40:24 +00:00
}
2021-09-08 15:48:41 +00:00
func ( a * Agent ) listenAndServeGRPC ( ) error {
if len ( a . config . GRPCAddrs ) < 1 {
2018-10-03 19:37:53 +00:00
return nil
}
2022-05-27 11:38:52 +00:00
// TODO(agentless): rather than asserting the concrete type of delegate, we
// should add a method to the Delegate interface to build a ConfigSource.
var cfg xds . ProxyConfigSource = localproxycfg . NewConfigSource ( a . proxyConfig )
if server , ok := a . delegate . ( * consul . Server ) ; ok {
catalogCfg := catalogproxycfg . NewConfigSource ( catalogproxycfg . Config {
NodeName : a . config . NodeName ,
LocalState : a . State ,
LocalConfigSource : cfg ,
Manager : a . proxyConfig ,
GetStore : func ( ) catalogproxycfg . Store { return server . FSM ( ) . State ( ) } ,
Logger : a . proxyConfig . Logger . Named ( "server-catalog" ) ,
} )
go func ( ) {
<- a . shutdownCh
catalogCfg . Shutdown ( )
} ( )
cfg = catalogCfg
}
2022-03-22 12:40:24 +00:00
a . xdsServer = xds . NewServer (
2022-05-27 11:38:52 +00:00
a . config . NodeName ,
2021-04-29 18:54:05 +00:00
a . logger . Named ( logging . Envoy ) ,
2022-03-15 14:07:40 +00:00
a . config . ConnectServerlessPluginEnabled ,
2022-05-27 11:38:52 +00:00
cfg ,
2021-04-29 18:54:05 +00:00
func ( id string ) ( acl . Authorizer , error ) {
2021-04-14 16:39:35 +00:00
return a . delegate . ResolveTokenAndDefaultMeta ( id , nil , nil )
} ,
2021-04-29 18:54:05 +00:00
a ,
)
2022-07-13 15:33:48 +00:00
a . xdsServer . Register ( a . externalGRPCServer )
2018-10-03 19:37:53 +00:00
2021-09-08 15:48:41 +00:00
ln , err := a . startListeners ( a . config . GRPCAddrs )
2018-10-03 19:37:53 +00:00
if err != nil {
return err
}
for _ , l := range ln {
go func ( innerL net . Listener ) {
2020-01-28 23:50:41 +00:00
a . logger . Info ( "Started gRPC server" ,
"address" , innerL . Addr ( ) . String ( ) ,
"network" , innerL . Addr ( ) . Network ( ) ,
)
2022-07-13 15:33:48 +00:00
err := a . externalGRPCServer . Serve ( innerL )
2018-10-03 19:37:53 +00:00
if err != nil {
2020-01-28 23:50:41 +00:00
a . logger . Error ( "gRPC server failed" , "error" , err )
2018-10-03 19:37:53 +00:00
}
} ( l )
}
return nil
}
2017-05-24 13:22:56 +00:00
func ( a * Agent ) listenAndServeDNS ( ) error {
2017-09-25 18:40:42 +00:00
notif := make ( chan net . Addr , len ( a . config . DNSAddrs ) )
2018-09-07 14:48:29 +00:00
errCh := make ( chan error , len ( a . config . DNSAddrs ) )
2017-09-25 18:40:42 +00:00
for _ , addr := range a . config . DNSAddrs {
2017-05-24 13:22:56 +00:00
// create server
s , err := NewDNSServer ( a )
2017-05-19 09:53:41 +00:00
if err != nil {
2017-05-24 13:22:56 +00:00
return err
2017-05-19 09:53:41 +00:00
}
2017-05-24 13:22:56 +00:00
a . dnsServers = append ( a . dnsServers , s )
// start server
a . wgServers . Add ( 1 )
2017-09-25 18:40:42 +00:00
go func ( addr net . Addr ) {
2017-05-24 13:22:56 +00:00
defer a . wgServers . Done ( )
2017-09-25 18:40:42 +00:00
err := s . ListenAndServe ( addr . Network ( ) , addr . String ( ) , func ( ) { notif <- addr } )
2017-05-24 13:22:56 +00:00
if err != nil && ! strings . Contains ( err . Error ( ) , "accept" ) {
2018-09-07 14:48:29 +00:00
errCh <- err
2017-05-24 13:22:56 +00:00
}
2017-09-25 18:40:42 +00:00
} ( addr )
2017-05-19 09:53:41 +00:00
}
2017-05-24 13:22:56 +00:00
// wait for servers to be up
timeout := time . After ( time . Second )
2018-09-07 14:48:29 +00:00
var merr * multierror . Error
2017-09-25 18:40:42 +00:00
for range a . config . DNSAddrs {
2017-05-24 13:22:56 +00:00
select {
2017-09-25 18:40:42 +00:00
case addr := <- notif :
2020-01-28 23:50:41 +00:00
a . logger . Info ( "Started DNS server" ,
"address" , addr . String ( ) ,
"network" , addr . Network ( ) ,
)
2019-02-27 19:28:31 +00:00
2018-09-07 14:48:29 +00:00
case err := <- errCh :
merr = multierror . Append ( merr , err )
2017-05-24 13:22:56 +00:00
case <- timeout :
2018-09-07 14:48:29 +00:00
merr = multierror . Append ( merr , fmt . Errorf ( "agent: timeout starting DNS servers" ) )
2020-05-14 21:02:52 +00:00
return merr . ErrorOrNil ( )
2017-05-24 13:22:56 +00:00
}
}
2018-09-07 14:48:29 +00:00
return merr . ErrorOrNil ( )
2017-05-19 09:53:41 +00:00
}
2022-08-09 16:22:39 +00:00
// startListeners will return a net.Listener for every address unless an
// error is encountered, in which case it will close all previously opened
// listeners and return the error.
2018-10-03 19:37:53 +00:00
func ( a * Agent ) startListeners ( addrs [ ] net . Addr ) ( [ ] net . Listener , error ) {
2022-08-09 16:22:39 +00:00
var lns [ ] net . Listener
closeAll := func ( ) {
for _ , l := range lns {
l . Close ( )
}
}
2018-10-03 19:37:53 +00:00
for _ , addr := range addrs {
var l net . Listener
var err error
switch x := addr . ( type ) {
case * net . UnixAddr :
l , err = a . listenSocket ( x . Name )
if err != nil {
2022-08-09 16:22:39 +00:00
closeAll ( )
2018-10-03 19:37:53 +00:00
return nil , err
}
case * net . TCPAddr :
l , err = net . Listen ( "tcp" , x . String ( ) )
if err != nil {
2022-08-09 16:22:39 +00:00
closeAll ( )
2018-10-03 19:37:53 +00:00
return nil , err
}
l = & tcpKeepAliveListener { l . ( * net . TCPListener ) }
default :
2022-08-09 16:22:39 +00:00
closeAll ( )
2018-10-03 19:37:53 +00:00
return nil , fmt . Errorf ( "unsupported address type %T" , addr )
}
2022-08-09 16:22:39 +00:00
lns = append ( lns , l )
2018-10-03 19:37:53 +00:00
}
2022-08-09 16:22:39 +00:00
return lns , nil
2018-10-03 19:37:53 +00:00
}
2017-05-24 13:22:56 +00:00
// listenHTTP binds listeners to the provided addresses and also returns
// pre-configured HTTP servers which are not yet started. The motivation is
// that in the current startup/shutdown setup we de-couple the listener
// creation from the server startup assuming that if any of the listeners
// cannot be bound we fail immediately and later failures do not occur.
// Therefore, starting a server with a running listener is assumed to not
// produce an error.
//
// The second motivation is that an HTTPS server needs to use the same TLSConfig
// on both the listener and the HTTP server. When listeners and servers are
// created at different times this becomes difficult to handle without keeping
// the TLS configuration somewhere or recreating it.
//
// This approach should ultimately be refactored to the point where we just
// start the server and any error should trigger a proper shutdown of the agent.
2020-07-02 17:31:47 +00:00
func ( a * Agent ) listenHTTP ( ) ( [ ] apiServer , error ) {
2017-05-19 09:53:41 +00:00
var ln [ ] net . Listener
2020-07-02 17:31:47 +00:00
var servers [ ] apiServer
2017-09-25 18:40:42 +00:00
start := func ( proto string , addrs [ ] net . Addr ) error {
2018-10-03 19:37:53 +00:00
listeners , err := a . startListeners ( addrs )
if err != nil {
return err
}
2020-07-02 17:31:47 +00:00
ln = append ( ln , listeners ... )
2017-05-24 13:22:56 +00:00
2018-10-03 19:37:53 +00:00
for _ , l := range listeners {
var tlscfg * tls . Config
_ , isTCP := l . ( * tcpKeepAliveListener )
if isTCP && proto == "https" {
2019-03-13 09:29:06 +00:00
tlscfg = a . tlsConfigurator . IncomingHTTPSConfig ( )
2018-10-03 19:37:53 +00:00
l = tls . NewListener ( l , tlscfg )
2017-05-19 09:53:41 +00:00
}
2020-01-31 16:19:37 +00:00
2020-09-04 18:42:15 +00:00
srv := & HTTPHandlers {
2020-05-29 18:19:16 +00:00
agent : a ,
denylist : NewDenylist ( a . config . HTTPBlockEndpoints ) ,
2017-11-07 23:06:59 +00:00
}
2020-09-23 11:37:33 +00:00
a . configReloaders = append ( a . configReloaders , srv . ReloadConfig )
a . httpHandlers = srv
2020-07-02 17:31:47 +00:00
httpServer := & http . Server {
2020-10-29 17:38:19 +00:00
Addr : l . Addr ( ) . String ( ) ,
TLSConfig : tlscfg ,
Handler : srv . handler ( a . config . EnableDebug ) ,
MaxHeaderBytes : a . config . HTTPMaxHeaderBytes ,
2020-07-02 17:31:47 +00:00
}
2017-11-07 23:06:59 +00:00
2020-01-31 16:19:37 +00:00
// Load the connlimit helper into the server
2020-07-03 07:25:07 +00:00
connLimitFn := a . httpConnLimiter . HTTPConnStateFuncWithDefault429Handler ( 10 * time . Millisecond )
2020-01-31 16:19:37 +00:00
2017-11-07 23:06:59 +00:00
if proto == "https" {
2020-07-02 21:51:25 +00:00
if err := setupHTTPS ( httpServer , connLimitFn , a . config . HTTPSHandshakeTimeout ) ; err != nil {
2017-11-07 23:06:59 +00:00
return err
}
2020-01-31 16:19:37 +00:00
} else {
2020-07-02 20:47:54 +00:00
httpServer . ConnState = connLimitFn
2017-11-07 23:06:59 +00:00
}
2020-11-18 17:22:07 +00:00
servers = append ( servers , newAPIServerHTTP ( proto , l , httpServer ) )
2017-05-19 09:53:41 +00:00
}
2017-09-25 18:40:42 +00:00
return nil
}
2017-05-24 13:22:56 +00:00
2017-09-25 18:40:42 +00:00
if err := start ( "http" , a . config . HTTPAddrs ) ; err != nil {
2020-07-02 17:31:47 +00:00
closeListeners ( ln )
2017-09-25 18:40:42 +00:00
return nil , err
}
if err := start ( "https" , a . config . HTTPSAddrs ) ; err != nil {
2020-07-02 17:31:47 +00:00
closeListeners ( ln )
2017-09-25 18:40:42 +00:00
return nil , err
2017-05-19 09:53:41 +00:00
}
2017-11-07 23:06:59 +00:00
return servers , nil
2017-05-24 13:22:56 +00:00
}
2017-05-19 09:53:41 +00:00
2020-07-02 17:31:47 +00:00
func closeListeners ( lns [ ] net . Listener ) {
for _ , l := range lns {
l . Close ( )
}
}
2020-07-02 21:51:25 +00:00
// setupHTTPS adds HTTP/2 support, ConnState, and a connection handshake timeout
// to the http.Server.
func setupHTTPS ( server * http . Server , connState func ( net . Conn , http . ConnState ) , timeout time . Duration ) error {
// Enforce TLS handshake timeout
server . ConnState = func ( conn net . Conn , state http . ConnState ) {
switch state {
case http . StateNew :
// Set deadline to prevent slow send before TLS handshake or first
// byte of request.
conn . SetReadDeadline ( time . Now ( ) . Add ( timeout ) )
case http . StateActive :
// Clear read deadline. We should maybe set read timeouts more
// generally but that's a bigger task as some HTTP endpoints may
// stream large requests and responses (e.g. snapshot) so we can't
// set sensible blanket timeouts here.
conn . SetReadDeadline ( time . Time { } )
}
// Pass through to conn limit. This is OK because we didn't change
// state (i.e. Close conn).
connState ( conn , state )
}
// This will enable upgrading connections to HTTP/2 as
// part of TLS negotiation.
return http2 . ConfigureServer ( server , nil )
}
2017-05-30 23:05:21 +00:00
// tcpKeepAliveListener sets TCP keep-alive timeouts on accepted
2017-11-07 23:06:59 +00:00
// connections. It's used so dead TCP connections eventually go away.
2017-05-30 23:05:21 +00:00
type tcpKeepAliveListener struct {
* net . TCPListener
}
func ( ln tcpKeepAliveListener ) Accept ( ) ( c net . Conn , err error ) {
tc , err := ln . AcceptTCP ( )
if err != nil {
return
}
tc . SetKeepAlive ( true )
tc . SetKeepAlivePeriod ( 30 * time . Second )
return tc , nil
}
2017-09-25 18:40:42 +00:00
func ( a * Agent ) listenSocket ( path string ) ( net . Listener , error ) {
2017-05-24 13:22:56 +00:00
if _ , err := os . Stat ( path ) ; ! os . IsNotExist ( err ) {
2020-01-28 23:50:41 +00:00
a . logger . Warn ( "Replacing socket" , "path" , path )
2017-05-24 13:22:56 +00:00
}
if err := os . Remove ( path ) ; err != nil && ! os . IsNotExist ( err ) {
return nil , fmt . Errorf ( "error removing socket file: %s" , err )
}
l , err := net . Listen ( "unix" , path )
if err != nil {
return nil , err
}
2017-09-25 18:40:42 +00:00
user , group , mode := a . config . UnixSocketUser , a . config . UnixSocketGroup , a . config . UnixSocketMode
if err := setFilePermissions ( path , user , group , mode ) ; err != nil {
return nil , fmt . Errorf ( "Failed setting up socket: %s" , err )
2017-05-24 13:22:56 +00:00
}
return l , nil
}
2020-05-26 08:01:49 +00:00
// stopAllWatches stops all the currently running watches
func ( a * Agent ) stopAllWatches ( ) {
for _ , wp := range a . watchPlans {
wp . Stop ( )
}
}
2017-06-24 19:52:41 +00:00
// reloadWatches stops any existing watch plans and attempts to load the given
// set of watches.
2017-09-25 18:40:42 +00:00
func ( a * Agent ) reloadWatches ( cfg * config . RuntimeConfig ) error {
2017-09-26 20:47:27 +00:00
// Stop the current watches.
2020-05-26 08:01:49 +00:00
a . stopAllWatches ( )
2017-09-26 20:47:27 +00:00
a . watchPlans = nil
// Return if there are no watches now.
if len ( cfg . Watches ) == 0 {
return nil
}
2017-06-24 19:52:41 +00:00
// Watches use the API to talk to this agent, so that must be enabled.
2017-09-26 20:47:27 +00:00
if len ( cfg . HTTPAddrs ) == 0 && len ( cfg . HTTPSAddrs ) == 0 {
2017-06-09 08:03:49 +00:00
return fmt . Errorf ( "watch plans require an HTTP or HTTPS endpoint" )
}
2017-09-25 18:40:42 +00:00
// Compile the watches
var watchPlans [ ] * watch . Plan
for _ , params := range cfg . Watches {
2017-10-22 01:39:09 +00:00
if handlerType , ok := params [ "handler_type" ] ; ! ok {
params [ "handler_type" ] = "script"
} else if handlerType != "http" && handlerType != "script" {
return fmt . Errorf ( "Handler type '%s' not recognized" , params [ "handler_type" ] )
}
2018-04-26 17:06:26 +00:00
// Don't let people use connect watches via this mechanism for now as it
// needs thought about how to do securely and shouldn't be necessary. Note
// that if the type assertion fails an type is not a string then
// ParseExample below will error so we don't need to handle that case.
if typ , ok := params [ "type" ] . ( string ) ; ok {
if strings . HasPrefix ( typ , "connect_" ) {
return fmt . Errorf ( "Watch type %s is not allowed in agent config" , typ )
}
}
2020-07-10 17:33:45 +00:00
wp , err := makeWatchPlan ( a . logger , params )
2017-09-25 18:40:42 +00:00
if err != nil {
2020-07-10 17:33:45 +00:00
return err
2017-10-04 23:48:00 +00:00
}
2017-09-25 18:40:42 +00:00
watchPlans = append ( watchPlans , wp )
}
2017-06-24 19:52:41 +00:00
// Fire off a goroutine for each new watch plan.
2017-09-25 18:40:42 +00:00
for _ , wp := range watchPlans {
2018-07-16 20:30:15 +00:00
config , err := a . config . APIConfig ( true )
if err != nil {
2020-01-28 23:50:41 +00:00
a . logger . Error ( "Failed to run watch" , "error" , err )
2018-07-16 20:30:15 +00:00
continue
}
2017-06-24 19:52:41 +00:00
a . watchPlans = append ( a . watchPlans , wp )
2017-06-09 08:03:49 +00:00
go func ( wp * watch . Plan ) {
2017-10-04 23:48:00 +00:00
if h , ok := wp . Exempt [ "handler" ] ; ok {
2020-01-28 23:50:41 +00:00
wp . Handler = makeWatchHandler ( a . logger , h )
2017-10-22 01:39:09 +00:00
} else if h , ok := wp . Exempt [ "args" ] ; ok {
2020-01-28 23:50:41 +00:00
wp . Handler = makeWatchHandler ( a . logger , h )
2017-10-04 23:48:00 +00:00
} else {
2017-10-22 01:39:09 +00:00
httpConfig := wp . Exempt [ "http_handler_config" ] . ( * watch . HttpHandlerConfig )
2020-01-28 23:50:41 +00:00
wp . Handler = makeHTTPWatchHandler ( a . logger , httpConfig )
2017-10-04 23:48:00 +00:00
}
2020-07-29 18:33:36 +00:00
wp . Logger = a . logger . Named ( "watch" )
2018-05-31 21:07:36 +00:00
2018-07-16 20:30:15 +00:00
addr := config . Address
if config . Scheme == "https" {
addr = "https://" + addr
2018-05-31 21:07:36 +00:00
}
2018-06-01 00:22:14 +00:00
if err := wp . RunWithConfig ( addr , config ) ; err != nil {
2020-01-28 23:50:41 +00:00
a . logger . Error ( "Failed to run watch" , "error" , err )
2017-06-09 08:03:49 +00:00
}
} ( wp )
}
return nil
}
2020-07-29 17:49:52 +00:00
// newConsulConfig translates a RuntimeConfig into a consul.Config.
2020-08-11 00:20:06 +00:00
// TODO: move this function to a different file, maybe config.go
2020-08-11 16:20:46 +00:00
func newConsulConfig ( runtimeCfg * config . RuntimeConfig , logger hclog . Logger ) ( * consul . Config , error ) {
cfg := consul . DefaultConfig ( )
2017-06-30 09:09:52 +00:00
2017-01-18 06:20:11 +00:00
// This is set when the agent starts up
2020-08-11 16:20:46 +00:00
cfg . NodeID = runtimeCfg . NodeID
2017-01-18 06:20:11 +00:00
2015-11-29 04:40:05 +00:00
// Apply dev mode
2020-08-11 16:20:46 +00:00
cfg . DevMode = runtimeCfg . DevMode
2015-11-29 04:40:05 +00:00
2020-08-11 16:20:46 +00:00
// Override with our runtimeCfg
// todo(fs): these are now always set in the runtime runtimeCfg so we can simplify this
2017-09-25 18:40:42 +00:00
// todo(fs): or is there a reason to keep it like that?
2020-08-11 16:20:46 +00:00
cfg . Datacenter = runtimeCfg . Datacenter
cfg . PrimaryDatacenter = runtimeCfg . PrimaryDatacenter
cfg . DataDir = runtimeCfg . DataDir
cfg . NodeName = runtimeCfg . NodeName
2021-08-06 22:59:05 +00:00
cfg . ACLResolverSettings = runtimeCfg . ACLResolverSettings
2020-08-11 16:20:46 +00:00
cfg . CoordinateUpdateBatchSize = runtimeCfg . ConsulCoordinateUpdateBatchSize
cfg . CoordinateUpdateMaxBatches = runtimeCfg . ConsulCoordinateUpdateMaxBatches
cfg . CoordinateUpdatePeriod = runtimeCfg . ConsulCoordinateUpdatePeriod
cfg . CheckOutputMaxSize = runtimeCfg . CheckOutputMaxSize
cfg . RaftConfig . HeartbeatTimeout = runtimeCfg . ConsulRaftHeartbeatTimeout
cfg . RaftConfig . LeaderLeaseTimeout = runtimeCfg . ConsulRaftLeaderLeaseTimeout
cfg . RaftConfig . ElectionTimeout = runtimeCfg . ConsulRaftElectionTimeout
cfg . SerfLANConfig . MemberlistConfig . BindAddr = runtimeCfg . SerfBindAddrLAN . IP . String ( )
cfg . SerfLANConfig . MemberlistConfig . BindPort = runtimeCfg . SerfBindAddrLAN . Port
cfg . SerfLANConfig . MemberlistConfig . CIDRsAllowed = runtimeCfg . SerfAllowedCIDRsLAN
cfg . SerfWANConfig . MemberlistConfig . CIDRsAllowed = runtimeCfg . SerfAllowedCIDRsWAN
cfg . SerfLANConfig . MemberlistConfig . AdvertiseAddr = runtimeCfg . SerfAdvertiseAddrLAN . IP . String ( )
cfg . SerfLANConfig . MemberlistConfig . AdvertisePort = runtimeCfg . SerfAdvertiseAddrLAN . Port
2022-03-31 19:11:49 +00:00
cfg . SerfLANConfig . MemberlistConfig . GossipVerifyIncoming = runtimeCfg . StaticRuntimeConfig . EncryptVerifyIncoming
cfg . SerfLANConfig . MemberlistConfig . GossipVerifyOutgoing = runtimeCfg . StaticRuntimeConfig . EncryptVerifyOutgoing
2020-08-11 16:20:46 +00:00
cfg . SerfLANConfig . MemberlistConfig . GossipInterval = runtimeCfg . GossipLANGossipInterval
cfg . SerfLANConfig . MemberlistConfig . GossipNodes = runtimeCfg . GossipLANGossipNodes
cfg . SerfLANConfig . MemberlistConfig . ProbeInterval = runtimeCfg . GossipLANProbeInterval
cfg . SerfLANConfig . MemberlistConfig . ProbeTimeout = runtimeCfg . GossipLANProbeTimeout
cfg . SerfLANConfig . MemberlistConfig . SuspicionMult = runtimeCfg . GossipLANSuspicionMult
cfg . SerfLANConfig . MemberlistConfig . RetransmitMult = runtimeCfg . GossipLANRetransmitMult
if runtimeCfg . ReconnectTimeoutLAN != 0 {
cfg . SerfLANConfig . ReconnectTimeout = runtimeCfg . ReconnectTimeoutLAN
}
if runtimeCfg . SerfBindAddrWAN != nil {
cfg . SerfWANConfig . MemberlistConfig . BindAddr = runtimeCfg . SerfBindAddrWAN . IP . String ( )
cfg . SerfWANConfig . MemberlistConfig . BindPort = runtimeCfg . SerfBindAddrWAN . Port
cfg . SerfWANConfig . MemberlistConfig . AdvertiseAddr = runtimeCfg . SerfAdvertiseAddrWAN . IP . String ( )
cfg . SerfWANConfig . MemberlistConfig . AdvertisePort = runtimeCfg . SerfAdvertiseAddrWAN . Port
2022-03-31 19:11:49 +00:00
cfg . SerfWANConfig . MemberlistConfig . GossipVerifyIncoming = runtimeCfg . StaticRuntimeConfig . EncryptVerifyIncoming
cfg . SerfWANConfig . MemberlistConfig . GossipVerifyOutgoing = runtimeCfg . StaticRuntimeConfig . EncryptVerifyOutgoing
2020-08-11 16:20:46 +00:00
cfg . SerfWANConfig . MemberlistConfig . GossipInterval = runtimeCfg . GossipWANGossipInterval
cfg . SerfWANConfig . MemberlistConfig . GossipNodes = runtimeCfg . GossipWANGossipNodes
cfg . SerfWANConfig . MemberlistConfig . ProbeInterval = runtimeCfg . GossipWANProbeInterval
cfg . SerfWANConfig . MemberlistConfig . ProbeTimeout = runtimeCfg . GossipWANProbeTimeout
cfg . SerfWANConfig . MemberlistConfig . SuspicionMult = runtimeCfg . GossipWANSuspicionMult
cfg . SerfWANConfig . MemberlistConfig . RetransmitMult = runtimeCfg . GossipWANRetransmitMult
if runtimeCfg . ReconnectTimeoutWAN != 0 {
cfg . SerfWANConfig . ReconnectTimeout = runtimeCfg . ReconnectTimeoutWAN
2018-08-17 18:44:25 +00:00
}
2018-03-26 19:21:06 +00:00
} else {
// Disable serf WAN federation
2020-08-11 16:20:46 +00:00
cfg . SerfWANConfig = nil
2018-03-26 19:21:06 +00:00
}
2017-09-25 18:40:42 +00:00
2020-10-08 19:02:19 +00:00
cfg . AdvertiseReconnectTimeout = runtimeCfg . AdvertiseReconnectTimeout
2020-08-11 16:20:46 +00:00
cfg . RPCAddr = runtimeCfg . RPCBindAddr
cfg . RPCAdvertise = runtimeCfg . RPCAdvertiseAddr
2017-09-25 18:40:42 +00:00
2022-07-07 18:55:41 +00:00
cfg . GRPCPort = runtimeCfg . GRPCPort
2020-08-11 16:20:46 +00:00
cfg . Segment = runtimeCfg . SegmentName
if len ( runtimeCfg . Segments ) > 0 {
segments , err := segmentConfig ( runtimeCfg )
2017-08-29 00:58:22 +00:00
if err != nil {
return nil , err
2017-08-14 14:36:07 +00:00
}
2020-08-11 16:20:46 +00:00
cfg . Segments = segments
2017-08-14 14:36:07 +00:00
}
2020-08-11 16:20:46 +00:00
if runtimeCfg . Bootstrap {
cfg . Bootstrap = true
2013-12-25 00:48:07 +00:00
}
2020-08-11 16:20:46 +00:00
if runtimeCfg . CheckOutputMaxSize > 0 {
cfg . CheckOutputMaxSize = runtimeCfg . CheckOutputMaxSize
2019-06-26 15:43:25 +00:00
}
2020-08-11 16:20:46 +00:00
if runtimeCfg . RejoinAfterLeave {
cfg . RejoinAfterLeave = true
2014-06-18 17:32:19 +00:00
}
2020-08-11 16:20:46 +00:00
if runtimeCfg . BootstrapExpect != 0 {
cfg . BootstrapExpect = runtimeCfg . BootstrapExpect
2014-06-16 21:36:12 +00:00
}
2020-08-11 16:20:46 +00:00
if runtimeCfg . RPCProtocol > 0 {
cfg . ProtocolVersion = uint8 ( runtimeCfg . RPCProtocol )
2014-03-09 22:57:03 +00:00
}
2020-08-11 16:20:46 +00:00
if runtimeCfg . RaftProtocol != 0 {
cfg . RaftConfig . ProtocolVersion = raft . ProtocolVersion ( runtimeCfg . RaftProtocol )
2017-02-24 04:32:13 +00:00
}
2020-08-11 16:20:46 +00:00
if runtimeCfg . RaftSnapshotThreshold != 0 {
cfg . RaftConfig . SnapshotThreshold = uint64 ( runtimeCfg . RaftSnapshotThreshold )
2018-05-10 15:16:38 +00:00
}
2020-08-11 16:20:46 +00:00
if runtimeCfg . RaftSnapshotInterval != 0 {
cfg . RaftConfig . SnapshotInterval = runtimeCfg . RaftSnapshotInterval
2018-05-10 22:06:47 +00:00
}
2020-08-11 16:20:46 +00:00
if runtimeCfg . RaftTrailingLogs != 0 {
cfg . RaftConfig . TrailingLogs = uint64 ( runtimeCfg . RaftTrailingLogs )
2019-07-23 14:19:57 +00:00
}
2021-12-07 12:39:28 +00:00
if runtimeCfg . ACLInitialManagementToken != "" {
cfg . ACLInitialManagementToken = runtimeCfg . ACLInitialManagementToken
2014-08-05 22:36:08 +00:00
}
2020-08-11 16:20:46 +00:00
cfg . ACLTokenReplication = runtimeCfg . ACLTokenReplication
cfg . ACLsEnabled = runtimeCfg . ACLsEnabled
if runtimeCfg . ACLEnableKeyListPolicy {
cfg . ACLEnableKeyListPolicy = runtimeCfg . ACLEnableKeyListPolicy
2017-10-02 22:10:21 +00:00
}
2020-08-11 16:20:46 +00:00
if runtimeCfg . SessionTTLMin != 0 {
cfg . SessionTTLMin = runtimeCfg . SessionTTLMin
2015-03-27 05:30:04 +00:00
}
2020-11-17 15:53:57 +00:00
if runtimeCfg . ReadReplica {
cfg . ReadReplica = runtimeCfg . ReadReplica
2017-03-21 23:36:44 +00:00
}
2017-12-13 18:31:45 +00:00
// These are fully specified in the agent defaults, so we can simply
// copy them over.
2020-08-11 16:20:46 +00:00
cfg . AutopilotConfig . CleanupDeadServers = runtimeCfg . AutopilotCleanupDeadServers
cfg . AutopilotConfig . LastContactThreshold = runtimeCfg . AutopilotLastContactThreshold
cfg . AutopilotConfig . MaxTrailingLogs = uint64 ( runtimeCfg . AutopilotMaxTrailingLogs )
cfg . AutopilotConfig . MinQuorum = runtimeCfg . AutopilotMinQuorum
cfg . AutopilotConfig . ServerStabilizationTime = runtimeCfg . AutopilotServerStabilizationTime
cfg . AutopilotConfig . RedundancyZoneTag = runtimeCfg . AutopilotRedundancyZoneTag
cfg . AutopilotConfig . DisableUpgradeMigration = runtimeCfg . AutopilotDisableUpgradeMigration
cfg . AutopilotConfig . UpgradeVersionTag = runtimeCfg . AutopilotUpgradeVersionTag
2013-12-20 23:33:13 +00:00
2017-05-03 20:59:06 +00:00
// make sure the advertise address is always set
2020-08-11 16:20:46 +00:00
if cfg . RPCAdvertise == nil {
cfg . RPCAdvertise = cfg . RPCAddr
2017-05-03 20:59:06 +00:00
}
2017-09-01 22:02:50 +00:00
// Rate limiting for RPC calls.
2020-08-11 16:20:46 +00:00
if runtimeCfg . RPCRateLimit > 0 {
2020-09-16 17:29:59 +00:00
cfg . RPCRateLimit = runtimeCfg . RPCRateLimit
2017-09-01 22:02:50 +00:00
}
2020-08-11 16:20:46 +00:00
if runtimeCfg . RPCMaxBurst > 0 {
cfg . RPCMaxBurst = runtimeCfg . RPCMaxBurst
2017-09-01 22:02:50 +00:00
}
2020-01-31 16:19:37 +00:00
// RPC timeouts/limits.
2020-08-11 16:20:46 +00:00
if runtimeCfg . RPCHandshakeTimeout > 0 {
cfg . RPCHandshakeTimeout = runtimeCfg . RPCHandshakeTimeout
2017-10-10 22:19:50 +00:00
}
2020-08-11 16:20:46 +00:00
if runtimeCfg . RPCMaxConnsPerClient > 0 {
cfg . RPCMaxConnsPerClient = runtimeCfg . RPCMaxConnsPerClient
2020-01-31 16:19:37 +00:00
}
// RPC-related performance configs. We allow explicit zero value to disable so
// copy it whatever the value.
2020-08-11 16:20:46 +00:00
cfg . RPCHoldTimeout = runtimeCfg . RPCHoldTimeout
2020-01-31 16:19:37 +00:00
2020-10-05 20:28:13 +00:00
cfg . RPCConfig = runtimeCfg . RPCConfig
2020-08-11 16:20:46 +00:00
if runtimeCfg . LeaveDrainTime > 0 {
cfg . LeaveDrainTime = runtimeCfg . LeaveDrainTime
2017-10-10 22:19:50 +00:00
}
2017-05-03 10:57:11 +00:00
// set the src address for outgoing rpc connections
2017-05-10 07:30:19 +00:00
// Use port 0 so that outgoing connections use a random port.
2020-08-11 16:20:46 +00:00
if ! ipaddr . IsAny ( cfg . RPCAddr . IP ) {
cfg . RPCSrcAddr = & net . TCPAddr { IP : cfg . RPCAddr . IP }
2017-05-10 07:30:19 +00:00
}
2017-05-03 10:57:11 +00:00
2014-06-06 22:36:40 +00:00
// Format the build string
2020-08-11 16:20:46 +00:00
revision := runtimeCfg . Revision
2014-06-06 22:36:40 +00:00
if len ( revision ) > 8 {
revision = revision [ : 8 ]
}
2022-05-05 02:16:18 +00:00
cfg . Build = fmt . Sprintf ( "%s%s:%s" , runtimeCfg . VersionWithMetadata ( ) , runtimeCfg . VersionPrerelease , revision )
2014-06-06 22:36:40 +00:00
2022-03-18 10:46:58 +00:00
cfg . TLSConfig = runtimeCfg . TLS
2021-07-09 22:17:42 +00:00
2020-08-11 16:20:46 +00:00
cfg . DefaultQueryTime = runtimeCfg . DefaultQueryTime
cfg . MaxQueryTime = runtimeCfg . MaxQueryTime
cfg . AutoEncryptAllowTLS = runtimeCfg . AutoEncryptAllowTLS
// Copy the Connect CA bootstrap runtimeCfg
if runtimeCfg . ConnectEnabled {
cfg . ConnectEnabled = true
cfg . ConnectMeshGatewayWANFederationEnabled = runtimeCfg . ConnectMeshGatewayWANFederationEnabled
ca , err := runtimeCfg . ConnectCAConfiguration ( )
2020-07-23 20:05:28 +00:00
if err != nil {
return nil , err
2019-01-22 17:19:36 +00:00
}
2018-04-25 18:34:08 +00:00
2020-08-11 16:20:46 +00:00
cfg . CAConfig = ca
2018-04-25 18:34:08 +00:00
}
2020-08-11 16:20:46 +00:00
// copy over auto runtimeCfg settings
cfg . AutoConfigEnabled = runtimeCfg . AutoConfig . Enabled
cfg . AutoConfigIntroToken = runtimeCfg . AutoConfig . IntroToken
cfg . AutoConfigIntroTokenFile = runtimeCfg . AutoConfig . IntroTokenFile
cfg . AutoConfigServerAddresses = runtimeCfg . AutoConfig . ServerAddresses
cfg . AutoConfigDNSSANs = runtimeCfg . AutoConfig . DNSSANs
cfg . AutoConfigIPSANs = runtimeCfg . AutoConfig . IPSANs
cfg . AutoConfigAuthzEnabled = runtimeCfg . AutoConfig . Authorizer . Enabled
cfg . AutoConfigAuthzAuthMethod = runtimeCfg . AutoConfig . Authorizer . AuthMethod
cfg . AutoConfigAuthzClaimAssertions = runtimeCfg . AutoConfig . Authorizer . ClaimAssertions
cfg . AutoConfigAuthzAllowReuse = runtimeCfg . AutoConfig . Authorizer . AllowReuse
2014-08-27 23:49:12 +00:00
2017-09-07 19:17:20 +00:00
// This will set up the LAN keyring, as well as the WAN and any segments
// for servers.
2020-08-11 00:20:06 +00:00
// TODO: move this closer to where the keyrings will be used.
2020-08-11 16:20:46 +00:00
if err := setupKeyrings ( cfg , runtimeCfg , logger ) ; err != nil {
2017-07-17 19:48:45 +00:00
return nil , fmt . Errorf ( "Failed to configure keyring: %v" , err )
2017-06-29 12:35:55 +00:00
}
2020-08-11 16:20:46 +00:00
cfg . ConfigEntryBootstrap = runtimeCfg . ConfigEntryBootstrap
2021-11-17 23:15:19 +00:00
cfg . RaftBoltDBConfig = runtimeCfg . RaftBoltDBConfig
2020-04-28 13:44:26 +00:00
2021-10-28 21:11:26 +00:00
// Duplicate our own serf config once to make sure that the duplication
// function does not drift.
cfg . SerfLANConfig = consul . CloneSerfLANConfig ( cfg . SerfLANConfig )
2022-07-22 22:20:21 +00:00
cfg . PeeringEnabled = runtimeCfg . PeeringEnabled
2022-07-29 21:36:22 +00:00
cfg . PeeringTestAllowPeerRegistrations = runtimeCfg . PeeringTestAllowPeerRegistrations
2022-07-22 22:20:21 +00:00
2020-08-11 16:20:46 +00:00
enterpriseConsulConfig ( cfg , runtimeCfg )
return cfg , nil
2013-12-20 23:33:13 +00:00
}
2017-08-29 00:58:22 +00:00
// Setup the serf and memberlist config for any defined network segments.
2020-07-29 17:49:52 +00:00
func segmentConfig ( config * config . RuntimeConfig ) ( [ ] consul . NetworkSegment , error ) {
2017-09-07 23:37:11 +00:00
var segments [ ] consul . NetworkSegment
2017-08-29 00:58:22 +00:00
2017-09-25 18:40:42 +00:00
for _ , s := range config . Segments {
2021-11-15 15:51:14 +00:00
// TODO: use consul.CloneSerfLANConfig(config.SerfLANConfig) here?
2017-08-29 00:58:22 +00:00
serfConf := consul . DefaultConfig ( ) . SerfLANConfig
2017-09-25 18:40:42 +00:00
serfConf . MemberlistConfig . BindAddr = s . Bind . IP . String ( )
serfConf . MemberlistConfig . BindPort = s . Bind . Port
serfConf . MemberlistConfig . AdvertiseAddr = s . Advertise . IP . String ( )
serfConf . MemberlistConfig . AdvertisePort = s . Advertise . Port
2021-11-04 22:17:19 +00:00
serfConf . MemberlistConfig . CIDRsAllowed = config . SerfAllowedCIDRsLAN
2017-08-30 19:51:10 +00:00
2017-09-25 18:40:42 +00:00
if config . ReconnectTimeoutLAN != 0 {
serfConf . ReconnectTimeout = config . ReconnectTimeoutLAN
2017-08-29 00:58:22 +00:00
}
2022-03-31 19:11:49 +00:00
if config . StaticRuntimeConfig . EncryptVerifyIncoming {
serfConf . MemberlistConfig . GossipVerifyIncoming = config . StaticRuntimeConfig . EncryptVerifyIncoming
2017-08-29 00:58:22 +00:00
}
2022-03-31 19:11:49 +00:00
if config . StaticRuntimeConfig . EncryptVerifyOutgoing {
serfConf . MemberlistConfig . GossipVerifyOutgoing = config . StaticRuntimeConfig . EncryptVerifyOutgoing
2017-08-29 00:58:22 +00:00
}
var rpcAddr * net . TCPAddr
2017-09-25 18:40:42 +00:00
if s . RPCListener {
2017-08-29 00:58:22 +00:00
rpcAddr = & net . TCPAddr {
2017-09-25 18:40:42 +00:00
IP : s . Bind . IP ,
2020-07-29 17:49:52 +00:00
Port : config . ServerPort ,
2017-08-29 00:58:22 +00:00
}
}
2017-09-07 23:37:11 +00:00
segments = append ( segments , consul . NetworkSegment {
2017-09-25 18:40:42 +00:00
Name : s . Name ,
2017-08-30 23:44:04 +00:00
Bind : serfConf . MemberlistConfig . BindAddr ,
Advertise : serfConf . MemberlistConfig . AdvertiseAddr ,
2017-09-25 18:40:42 +00:00
Port : s . Bind . Port ,
2017-08-29 00:58:22 +00:00
RPCAddr : rpcAddr ,
SerfConfig : serfConf ,
} )
}
return segments , nil
}
2017-06-19 14:36:09 +00:00
// registerEndpoint registers a handler for the consul RPC server
2017-06-16 07:54:09 +00:00
// under a unique name while making it accessible under the provided
// name. This allows overwriting handlers for the golang net/rpc
// service which does not allow this.
2017-06-19 14:36:09 +00:00
func ( a * Agent ) registerEndpoint ( name string , handler interface { } ) error {
2017-06-16 07:54:09 +00:00
srv , ok := a . delegate . ( * consul . Server )
if ! ok {
panic ( "agent must be a server" )
}
realname := fmt . Sprintf ( "%s-%d" , name , time . Now ( ) . UnixNano ( ) )
a . endpointsLock . Lock ( )
a . endpoints [ name ] = realname
a . endpointsLock . Unlock ( )
return srv . RegisterEndpoint ( realname , handler )
}
2013-12-20 23:33:13 +00:00
// RPC is used to make an RPC call to the Consul servers
// This allows the agent to implement the Consul.Interface
func ( a * Agent ) RPC ( method string , args interface { } , reply interface { } ) error {
2017-08-10 01:51:55 +00:00
a . endpointsLock . RLock ( )
2017-06-16 07:54:09 +00:00
// fast path: only translate if there are overrides
if len ( a . endpoints ) > 0 {
p := strings . SplitN ( method , "." , 2 )
if e := a . endpoints [ p [ 0 ] ] ; e != "" {
method = e + "." + p [ 1 ]
}
}
2017-08-10 01:51:55 +00:00
a . endpointsLock . RUnlock ( )
2017-05-15 14:05:17 +00:00
return a . delegate . RPC ( method , args , reply )
2013-12-20 23:33:13 +00:00
}
2014-04-18 05:46:31 +00:00
// Leave is used to prepare the agent for a graceful shutdown
2013-12-20 01:14:46 +00:00
func ( a * Agent ) Leave ( ) error {
2017-05-15 14:05:17 +00:00
return a . delegate . Leave ( )
2013-12-20 01:14:46 +00:00
}
2017-06-20 07:29:20 +00:00
// ShutdownAgent is used to hard stop the agent. Should be preceded by
// Leave to do it gracefully. Should be followed by ShutdownEndpoints to
// terminate the HTTP and DNS servers as well.
func ( a * Agent ) ShutdownAgent ( ) error {
2013-12-21 00:39:32 +00:00
a . shutdownLock . Lock ( )
defer a . shutdownLock . Unlock ( )
if a . shutdown {
return nil
}
2020-01-28 23:50:41 +00:00
a . logger . Info ( "Requesting shutdown" )
2020-05-26 08:01:49 +00:00
// Stop the watches to avoid any notification/state change during shutdown
a . stopAllWatches ( )
2017-05-19 09:53:41 +00:00
2022-03-31 19:11:49 +00:00
// Stop config file watcher
2022-04-04 15:31:39 +00:00
if a . configFileWatcher != nil {
a . configFileWatcher . Stop ( )
2022-03-31 19:11:49 +00:00
}
2021-05-11 14:50:03 +00:00
a . stopLicenseManager ( )
2020-07-28 19:31:48 +00:00
// this would be cancelled anyways (by the closing of the shutdown ch) but
// this should help them to be stopped more quickly
2020-09-14 22:31:07 +00:00
a . baseDeps . AutoConfig . Stop ( )
2022-05-19 20:03:46 +00:00
a . baseDeps . MetricsConfig . Cancel ( )
2020-07-28 19:31:48 +00:00
2022-02-10 18:30:49 +00:00
a . stateLock . Lock ( )
defer a . stateLock . Unlock ( )
2019-09-24 15:04:48 +00:00
// Stop the service manager (must happen before we take the stateLock to avoid deadlock)
if a . serviceManager != nil {
a . serviceManager . Stop ( )
}
2014-01-21 20:05:56 +00:00
// Stop all the checks
for _ , chk := range a . checkMonitors {
chk . Stop ( )
}
for _ , chk := range a . checkTTLs {
chk . Stop ( )
}
2015-01-09 22:43:24 +00:00
for _ , chk := range a . checkHTTPs {
chk . Stop ( )
}
2015-07-23 11:45:08 +00:00
for _ , chk := range a . checkTCPs {
chk . Stop ( )
}
2022-06-06 19:13:19 +00:00
for _ , chk := range a . checkUDPs {
chk . Stop ( )
}
2017-12-27 04:35:22 +00:00
for _ , chk := range a . checkGRPCs {
chk . Stop ( )
}
2017-07-18 18:57:27 +00:00
for _ , chk := range a . checkDockers {
chk . Stop ( )
}
2018-06-30 13:38:56 +00:00
for _ , chk := range a . checkAliases {
chk . Stop ( )
}
2021-04-09 19:12:10 +00:00
for _ , chk := range a . checkH2PINGs {
chk . Stop ( )
}
2015-07-23 11:45:08 +00:00
2018-10-03 19:37:53 +00:00
// Stop gRPC
2022-07-13 15:33:48 +00:00
a . externalGRPCServer . Stop ( )
2018-10-03 19:37:53 +00:00
// Stop the proxy config manager
if a . proxyConfig != nil {
a . proxyConfig . Close ( )
}
2018-10-04 10:27:11 +00:00
// Stop the cache background work
if a . cache != nil {
a . cache . Close ( )
}
2021-04-22 17:40:12 +00:00
a . rpcClientHealth . Close ( )
2017-05-22 21:59:54 +00:00
var err error
if a . delegate != nil {
err = a . delegate . Shutdown ( )
2017-05-23 10:15:25 +00:00
if _ , ok := a . delegate . ( * consul . Server ) ; ok {
2020-01-28 23:50:41 +00:00
a . logger . Info ( "consul server down" )
2017-05-23 10:15:25 +00:00
} else {
2020-01-28 23:50:41 +00:00
a . logger . Info ( "consul client down" )
2017-05-23 10:15:25 +00:00
}
2017-05-22 21:59:54 +00:00
}
2013-12-21 00:39:32 +00:00
2014-05-06 16:57:53 +00:00
pidErr := a . deletePid ( )
if pidErr != nil {
2020-01-28 23:50:41 +00:00
a . logger . Warn ( "could not delete pid file" , "error" , pidErr )
2014-05-06 16:57:53 +00:00
}
2014-05-06 03:29:50 +00:00
2020-01-28 23:50:41 +00:00
a . logger . Info ( "shutdown complete" )
2013-12-21 00:39:32 +00:00
a . shutdown = true
2017-06-19 19:34:08 +00:00
close ( a . shutdownCh )
2013-12-21 00:39:32 +00:00
return err
2017-06-20 07:29:20 +00:00
}
// ShutdownEndpoints terminates the HTTP and DNS servers. Should be
2018-03-19 16:56:00 +00:00
// preceded by ShutdownAgent.
2020-07-02 17:31:47 +00:00
// TODO: remove this method, move to ShutdownAgent
2017-06-20 07:29:20 +00:00
func ( a * Agent ) ShutdownEndpoints ( ) {
a . shutdownLock . Lock ( )
defer a . shutdownLock . Unlock ( )
2020-07-02 17:31:47 +00:00
ctx := context . TODO ( )
2017-06-20 07:29:20 +00:00
for _ , srv := range a . dnsServers {
2019-08-27 15:45:05 +00:00
if srv . Server != nil {
2020-01-28 23:50:41 +00:00
a . logger . Info ( "Stopping server" ,
"protocol" , "DNS" ,
"address" , srv . Server . Addr ,
"network" , srv . Server . Net ,
)
2019-08-27 15:45:05 +00:00
srv . Shutdown ( )
}
2017-06-20 07:29:20 +00:00
}
a . dnsServers = nil
2020-07-02 17:31:47 +00:00
a . apiServers . Shutdown ( ctx )
2020-01-28 23:50:41 +00:00
a . logger . Info ( "Waiting for endpoints to shut down" )
2020-07-02 17:31:47 +00:00
if err := a . apiServers . WaitForShutdown ( ) ; err != nil {
a . logger . Error ( err . Error ( ) )
}
2020-01-28 23:50:41 +00:00
a . logger . Info ( "Endpoints down" )
2013-12-21 00:39:32 +00:00
}
2017-06-02 09:55:29 +00:00
// RetryJoinCh is a channel that transports errors
// from the retry join process.
func ( a * Agent ) RetryJoinCh ( ) <- chan error {
return a . retryJoinCh
}
2014-04-18 05:46:31 +00:00
// ShutdownCh is used to return a channel that can be
// selected to wait for the agent to perform a shutdown.
2013-12-21 00:39:32 +00:00
func ( a * Agent ) ShutdownCh ( ) <- chan struct { } {
return a . shutdownCh
2013-12-20 01:14:46 +00:00
}
2013-12-30 22:42:41 +00:00
// JoinLAN is used to have the agent join a LAN cluster
2022-04-05 21:10:06 +00:00
func ( a * Agent ) JoinLAN ( addrs [ ] string , entMeta * acl . EnterpriseMeta ) ( n int , err error ) {
2020-01-28 23:50:41 +00:00
a . logger . Info ( "(LAN) joining" , "lan_addresses" , addrs )
2021-10-26 20:08:55 +00:00
n , err = a . delegate . JoinLAN ( addrs , entMeta )
2019-05-24 14:50:18 +00:00
if err == nil {
2020-01-28 23:50:41 +00:00
a . logger . Info ( "(LAN) joined" , "number_of_nodes" , n )
2019-05-24 14:50:18 +00:00
if a . joinLANNotifier != nil {
if notifErr := a . joinLANNotifier . Notify ( systemd . Ready ) ; notifErr != nil {
2020-01-28 23:50:41 +00:00
a . logger . Debug ( "systemd notify failed" , "error" , notifErr )
2019-05-24 14:50:18 +00:00
}
2017-06-21 04:43:55 +00:00
}
2019-05-24 14:50:18 +00:00
} else {
2020-01-28 23:50:41 +00:00
a . logger . Warn ( "(LAN) couldn't join" ,
"number_of_nodes" , n ,
"error" , err ,
)
2017-06-21 04:43:55 +00:00
}
2013-12-30 22:42:41 +00:00
return
}
// JoinWAN is used to have the agent join a WAN cluster
func ( a * Agent ) JoinWAN ( addrs [ ] string ) ( n int , err error ) {
2020-01-28 23:50:41 +00:00
a . logger . Info ( "(WAN) joining" , "wan_addresses" , addrs )
2017-05-15 14:05:17 +00:00
if srv , ok := a . delegate . ( * consul . Server ) ; ok {
n , err = srv . JoinWAN ( addrs )
2013-12-30 22:42:41 +00:00
} else {
err = fmt . Errorf ( "Must be a server to join WAN cluster" )
}
2019-05-24 14:50:18 +00:00
if err == nil {
2020-01-28 23:50:41 +00:00
a . logger . Info ( "(WAN) joined" , "number_of_nodes" , n )
2019-05-24 14:50:18 +00:00
} else {
2020-01-28 23:50:41 +00:00
a . logger . Warn ( "(WAN) couldn't join" ,
"number_of_nodes" , n ,
"error" , err ,
)
2019-05-24 14:50:18 +00:00
}
2013-12-30 22:42:41 +00:00
return
}
2020-03-09 20:59:02 +00:00
// PrimaryMeshGatewayAddressesReadyCh returns a channel that will be closed
// when federation state replication ships back at least one primary mesh
// gateway (not via fallback config).
func ( a * Agent ) PrimaryMeshGatewayAddressesReadyCh ( ) <- chan struct { } {
if srv , ok := a . delegate . ( * consul . Server ) ; ok {
return srv . PrimaryMeshGatewayAddressesReadyCh ( )
}
return nil
}
// PickRandomMeshGatewaySuitableForDialing is a convenience function used for writing tests.
func ( a * Agent ) PickRandomMeshGatewaySuitableForDialing ( dc string ) string {
if srv , ok := a . delegate . ( * consul . Server ) ; ok {
return srv . PickRandomMeshGatewaySuitableForDialing ( dc )
}
return ""
}
// RefreshPrimaryGatewayFallbackAddresses is used to update the list of current
// fallback addresses for locating mesh gateways in the primary datacenter.
func ( a * Agent ) RefreshPrimaryGatewayFallbackAddresses ( addrs [ ] string ) error {
if srv , ok := a . delegate . ( * consul . Server ) ; ok {
srv . RefreshPrimaryGatewayFallbackAddresses ( addrs )
return nil
}
return fmt . Errorf ( "Must be a server to track mesh gateways in the primary datacenter" )
}
2013-12-30 22:42:41 +00:00
// ForceLeave is used to remove a failed node from the cluster
2022-04-05 21:10:06 +00:00
func ( a * Agent ) ForceLeave ( node string , prune bool , entMeta * acl . EnterpriseMeta ) error {
2020-01-28 23:50:41 +00:00
a . logger . Info ( "Force leaving node" , "node" , node )
2021-12-02 23:15:10 +00:00
err := a . delegate . RemoveFailedNode ( node , prune , entMeta )
2013-12-30 22:42:41 +00:00
if err != nil {
2020-01-28 23:50:41 +00:00
a . logger . Warn ( "Failed to remove node" ,
"node" , node ,
"error" , err ,
)
2013-12-30 22:42:41 +00:00
}
return err
}
2021-12-02 23:15:10 +00:00
// ForceLeaveWAN is used to remove a failed node from the WAN cluster
2022-04-05 21:10:06 +00:00
func ( a * Agent ) ForceLeaveWAN ( node string , prune bool , entMeta * acl . EnterpriseMeta ) error {
2021-12-02 23:15:10 +00:00
a . logger . Info ( "(WAN) Force leaving node" , "node" , node )
srv , ok := a . delegate . ( * consul . Server )
if ! ok {
return fmt . Errorf ( "Must be a server to force-leave a node from the WAN cluster" )
}
err := srv . RemoveFailedNodeWAN ( node , prune , entMeta )
if err != nil {
a . logger . Warn ( "(WAN) Failed to remove node" ,
"node" , node ,
"error" , err ,
)
}
return err
}
2021-10-26 20:08:55 +00:00
// AgentLocalMember is used to retrieve the LAN member for the local node.
func ( a * Agent ) AgentLocalMember ( ) serf . Member {
return a . delegate . AgentLocalMember ( )
2014-05-25 23:59:48 +00:00
}
2021-10-26 20:08:55 +00:00
// LANMembersInAgentPartition is used to retrieve the LAN members for this
// agent's partition.
func ( a * Agent ) LANMembersInAgentPartition ( ) [ ] serf . Member {
return a . delegate . LANMembersInAgentPartition ( )
2013-12-30 22:42:41 +00:00
}
2021-11-04 22:17:19 +00:00
// LANMembers returns the LAN members for one of:
//
// - the requested partition
// - the requested segment
// - all segments
//
// This is limited to segments and partitions that the node is a member of.
func ( a * Agent ) LANMembers ( f consul . LANMemberFilter ) ( [ ] serf . Member , error ) {
return a . delegate . LANMembers ( f )
}
2014-04-18 05:46:31 +00:00
// WANMembers is used to retrieve the WAN members
2013-12-30 22:42:41 +00:00
func ( a * Agent ) WANMembers ( ) [ ] serf . Member {
2017-05-15 14:05:17 +00:00
if srv , ok := a . delegate . ( * consul . Server ) ; ok {
return srv . WANMembers ( )
2013-12-30 22:42:41 +00:00
}
2017-04-21 01:59:42 +00:00
return nil
2013-12-30 22:42:41 +00:00
}
2014-01-21 19:52:25 +00:00
// StartSync is called once Services and Checks are registered.
// This is called to prevent a race between clients and the anti-entropy routines
func ( a * Agent ) StartSync ( ) {
2017-08-28 12:17:09 +00:00
go a . sync . Run ( )
2020-01-28 23:50:41 +00:00
a . logger . Info ( "started state syncer" )
2014-01-21 19:52:25 +00:00
}
2014-01-30 21:39:02 +00:00
2018-09-27 14:00:51 +00:00
// PauseSync is used to pause anti-entropy while bulk changes are made. It also
// sets state that agent-local watches use to "ride out" config reloads and bulk
// updates which might spuriously unload state and reload it again.
2014-02-07 20:19:56 +00:00
func ( a * Agent ) PauseSync ( ) {
2018-09-27 14:00:51 +00:00
// Do this outside of lock as it has it's own locking
2017-08-28 12:17:09 +00:00
a . sync . Pause ( )
2018-09-27 14:00:51 +00:00
// Coordinate local state watchers
a . syncMu . Lock ( )
defer a . syncMu . Unlock ( )
if a . syncCh == nil {
a . syncCh = make ( chan struct { } )
}
2014-02-07 20:19:56 +00:00
}
2014-04-18 05:46:31 +00:00
// ResumeSync is used to unpause anti-entropy after bulk changes are make
2014-02-07 20:19:56 +00:00
func ( a * Agent ) ResumeSync ( ) {
2018-09-27 14:00:51 +00:00
// a.sync maintains a stack/ref count of Pause calls since we call
// Pause/Resume in nested way during a reload and AddService. We only want to
// trigger local state watchers if this Resume call actually started sync back
// up again (i.e. was the last resume on the stack). We could check that
// separately with a.sync.Paused but that is racey since another Pause call
// might be made between our Resume and checking Paused.
resumed := a . sync . Resume ( )
if ! resumed {
// Return early so we don't notify local watchers until we are actually
// resumed.
return
}
// Coordinate local state watchers
a . syncMu . Lock ( )
defer a . syncMu . Unlock ( )
if a . syncCh != nil {
close ( a . syncCh )
a . syncCh = nil
}
}
2020-01-27 19:54:32 +00:00
// SyncPausedCh returns either a channel or nil. If nil sync is not paused. If
2018-09-27 14:00:51 +00:00
// non-nil, the channel will be closed when sync resumes.
2019-09-26 02:55:52 +00:00
func ( a * Agent ) SyncPausedCh ( ) <- chan struct { } {
2018-09-27 14:00:51 +00:00
a . syncMu . Lock ( )
defer a . syncMu . Unlock ( )
return a . syncCh
2014-02-07 20:19:56 +00:00
}
2017-08-14 14:36:07 +00:00
// GetLANCoordinate returns the coordinates of this node in the local pools
// (assumes coordinates are enabled, so check that before calling).
func ( a * Agent ) GetLANCoordinate ( ) ( lib . CoordinateSet , error ) {
2017-05-15 14:05:17 +00:00
return a . delegate . GetLANCoordinate ( )
2015-10-16 02:28:31 +00:00
}
2015-06-06 03:31:33 +00:00
// sendCoordinate is a long-running loop that periodically sends our coordinate
// to the server. Closing the agent's shutdownChannel will cause this to exit.
func ( a * Agent ) sendCoordinate ( ) {
2017-08-14 14:36:07 +00:00
OUTER :
2015-04-15 23:12:45 +00:00
for {
2015-06-30 19:02:05 +00:00
rate := a . config . SyncCoordinateRateTarget
min := a . config . SyncCoordinateIntervalMin
2021-10-26 20:08:55 +00:00
intv := lib . RateScaledInterval ( rate , min , len ( a . LANMembersInAgentPartition ( ) ) )
2016-01-29 19:42:34 +00:00
intv = intv + lib . RandomStagger ( intv )
2015-06-06 03:31:33 +00:00
2015-04-15 23:12:45 +00:00
select {
2015-04-29 01:47:41 +00:00
case <- time . After ( intv ) :
2021-10-26 20:08:55 +00:00
members := a . LANMembersInAgentPartition ( )
2015-10-27 21:30:29 +00:00
grok , err := consul . CanServersUnderstandProtocol ( members , 3 )
if err != nil {
2020-01-28 23:50:41 +00:00
a . logger . Error ( "Failed to check servers" , "error" , err )
2015-10-27 21:30:29 +00:00
continue
}
if ! grok {
2020-01-28 23:50:41 +00:00
a . logger . Debug ( "Skipping coordinate updates until servers are upgraded" )
2015-10-16 02:28:31 +00:00
continue
}
2017-08-14 14:36:07 +00:00
cs , err := a . GetLANCoordinate ( )
2015-10-27 21:30:29 +00:00
if err != nil {
2020-01-28 23:50:41 +00:00
a . logger . Error ( "Failed to get coordinate" , "error" , err )
2015-06-29 22:53:29 +00:00
continue
}
2017-08-14 14:36:07 +00:00
for segment , coord := range cs {
2020-01-27 19:54:32 +00:00
agentToken := a . tokens . AgentToken ( )
2017-08-14 14:36:07 +00:00
req := structs . CoordinateUpdateRequest {
2021-08-19 20:09:42 +00:00
Datacenter : a . config . Datacenter ,
Node : a . config . NodeName ,
Segment : segment ,
Coord : coord ,
2021-10-26 20:08:55 +00:00
EnterpriseMeta : * a . AgentEnterpriseMeta ( ) ,
2021-08-19 20:09:42 +00:00
WriteRequest : structs . WriteRequest { Token : agentToken } ,
2017-08-14 14:36:07 +00:00
}
var reply struct { }
2020-01-27 19:54:32 +00:00
// todo(kit) port all of these logger calls to hclog w/ loglevel configuration
// todo(kit) handle acl.ErrNotFound cases here in the future
2017-08-14 14:36:07 +00:00
if err := a . RPC ( "Coordinate.Update" , & req , & reply ) ; err != nil {
if acl . IsErrPermissionDenied ( err ) {
2020-01-27 19:54:32 +00:00
accessorID := a . aclAccessorID ( agentToken )
2020-01-29 17:16:08 +00:00
a . logger . Warn ( "Coordinate update blocked by ACLs" , "accessorID" , accessorID )
2017-08-14 14:36:07 +00:00
} else {
2020-01-28 23:50:41 +00:00
a . logger . Error ( "Coordinate update error" , "error" , err )
2017-08-14 14:36:07 +00:00
}
continue OUTER
2017-07-14 05:33:47 +00:00
}
2015-04-15 23:12:45 +00:00
}
2015-04-19 00:49:49 +00:00
case <- a . shutdownCh :
2015-04-15 23:12:45 +00:00
return
}
2015-04-13 20:45:42 +00:00
}
2015-04-09 20:23:14 +00:00
}
2016-08-16 19:52:30 +00:00
// reapServicesInternal does a single pass, looking for services to reap.
func ( a * Agent ) reapServicesInternal ( ) {
2019-12-10 02:26:41 +00:00
reaped := make ( map [ structs . ServiceID ] bool )
2021-08-19 20:09:42 +00:00
for checkID , cs := range a . State . AllCriticalCheckStates ( ) {
2019-12-10 02:26:41 +00:00
serviceID := cs . Check . CompoundServiceID ( )
2017-08-28 12:17:12 +00:00
2016-08-16 19:52:30 +00:00
// There's nothing to do if there's no service.
2019-12-10 02:26:41 +00:00
if serviceID . ID == "" {
2016-08-16 19:52:30 +00:00
continue
}
2016-08-16 07:05:55 +00:00
2016-08-16 19:52:30 +00:00
// There might be multiple checks for one service, so
// we don't need to reap multiple times.
2017-08-28 12:17:12 +00:00
if reaped [ serviceID ] {
2016-08-16 19:52:30 +00:00
continue
}
2016-08-16 07:05:55 +00:00
2016-08-16 19:52:30 +00:00
// See if there's a timeout.
2018-03-19 16:56:00 +00:00
// todo(fs): this looks fishy... why is there another data structure in the agent with its own lock?
2019-03-04 14:34:05 +00:00
a . stateLock . Lock ( )
2017-08-28 12:17:12 +00:00
timeout := a . checkReapAfter [ checkID ]
2019-03-04 14:34:05 +00:00
a . stateLock . Unlock ( )
2016-08-16 19:52:30 +00:00
// Reap, if necessary. We keep track of which service
// this is so that we won't try to remove it again.
2017-08-28 12:17:12 +00:00
if timeout > 0 && cs . CriticalFor ( ) > timeout {
reaped [ serviceID ] = true
2019-09-24 15:04:48 +00:00
if err := a . RemoveService ( serviceID ) ; err != nil {
2022-04-07 20:10:20 +00:00
a . logger . Error ( "failed to deregister service with critical health that exceeded health check's 'deregister_critical_service_after' timeout" ,
2020-01-28 23:50:41 +00:00
"service" , serviceID . String ( ) ,
"check" , checkID . String ( ) ,
2022-04-07 20:10:20 +00:00
"timeout" , timeout . String ( ) ,
"error" , err ,
)
2019-03-04 14:34:05 +00:00
} else {
2022-04-07 20:10:20 +00:00
a . logger . Info ( "deregistered service with critical health due to exceeding health check's 'deregister_critical_service_after' timeout" ,
2020-01-28 23:50:41 +00:00
"service" , serviceID . String ( ) ,
"check" , checkID . String ( ) ,
2022-04-07 20:10:20 +00:00
"timeout" , timeout . String ( ) ,
2020-01-28 23:50:41 +00:00
)
2019-03-04 14:34:05 +00:00
}
2016-08-16 07:05:55 +00:00
}
}
2016-08-16 19:52:30 +00:00
}
2016-08-16 07:05:55 +00:00
2016-08-16 19:52:30 +00:00
// reapServices is a long running goroutine that looks for checks that have been
2017-10-26 02:17:41 +00:00
// critical too long and deregisters their associated services.
2016-08-16 19:52:30 +00:00
func ( a * Agent ) reapServices ( ) {
2016-08-16 07:05:55 +00:00
for {
select {
case <- time . After ( a . config . CheckReapInterval ) :
2016-08-16 19:52:30 +00:00
a . reapServicesInternal ( )
2016-08-16 07:05:55 +00:00
case <- a . shutdownCh :
return
}
}
}
2017-06-15 16:46:06 +00:00
// persistedService is used to wrap a service definition and bundle it
// with an ACL token so we can restore both at a later agent start.
type persistedService struct {
Token string
Service * structs . NodeService
2019-09-24 15:04:48 +00:00
Source string
2020-10-12 19:45:08 +00:00
// whether this service was registered as a sidecar, see structs.NodeService
// we store this field here because it is excluded from json serialization
// to exclude it from API output, but we need it to properly deregister
// persisted sidecars.
LocallyRegisteredAsSidecar bool ` json:",omitempty" `
2017-06-15 16:46:06 +00:00
}
2021-11-04 20:07:54 +00:00
func ( a * Agent ) makeServiceFilePath ( svcID structs . ServiceID ) string {
return filepath . Join ( a . config . DataDir , servicesDir , svcID . StringHashSHA256 ( ) )
}
2014-11-24 08:36:03 +00:00
// persistService saves a service definition to a JSON file in the data dir
2019-09-24 15:04:48 +00:00
func ( a * Agent ) persistService ( service * structs . NodeService , source configSource ) error {
2019-12-10 02:26:41 +00:00
svcID := service . CompoundServiceID ( )
2021-11-04 20:07:54 +00:00
svcPath := a . makeServiceFilePath ( svcID )
2016-11-07 18:51:03 +00:00
2015-05-06 05:08:03 +00:00
wrapped := persistedService {
2020-10-12 19:45:08 +00:00
Token : a . State . ServiceToken ( service . CompoundServiceID ( ) ) ,
Service : service ,
Source : source . String ( ) ,
LocallyRegisteredAsSidecar : service . LocallyRegisteredAsSidecar ,
2015-05-06 05:08:03 +00:00
}
encoded , err := json . Marshal ( wrapped )
if err != nil {
2016-04-26 22:03:26 +00:00
return err
2015-05-06 05:08:03 +00:00
}
2016-11-07 18:51:03 +00:00
2018-05-03 20:56:42 +00:00
return file . WriteAtomic ( svcPath , encoded )
2014-11-24 08:36:03 +00:00
}
// purgeService removes a persisted service definition file from the data dir
2019-12-10 02:26:41 +00:00
func ( a * Agent ) purgeService ( serviceID structs . ServiceID ) error {
2021-11-04 20:07:54 +00:00
svcPath := a . makeServiceFilePath ( serviceID )
2014-11-24 08:36:03 +00:00
if _ , err := os . Stat ( svcPath ) ; err == nil {
return os . Remove ( svcPath )
}
return nil
}
// persistCheck saves a check definition to the local agent's state directory
2019-09-24 15:04:48 +00:00
func ( a * Agent ) persistCheck ( check * structs . HealthCheck , chkType * structs . CheckType , source configSource ) error {
2019-12-10 02:26:41 +00:00
cid := check . CompoundCheckID ( )
2021-11-04 20:07:54 +00:00
checkPath := filepath . Join ( a . config . DataDir , checksDir , cid . StringHashSHA256 ( ) )
2014-11-29 20:25:01 +00:00
// Create the persisted check
2015-04-28 19:44:46 +00:00
wrapped := persistedCheck {
Check : check ,
ChkType : chkType ,
2019-12-10 02:26:41 +00:00
Token : a . State . CheckToken ( check . CompoundCheckID ( ) ) ,
2019-09-24 15:04:48 +00:00
Source : source . String ( ) ,
2015-04-28 19:44:46 +00:00
}
2014-11-29 20:25:01 +00:00
2015-04-28 19:44:46 +00:00
encoded , err := json . Marshal ( wrapped )
2014-11-29 20:25:01 +00:00
if err != nil {
2016-04-26 22:03:26 +00:00
return err
2014-11-29 20:25:01 +00:00
}
2016-11-07 18:51:03 +00:00
2018-05-03 20:56:42 +00:00
return file . WriteAtomic ( checkPath , encoded )
2014-11-24 08:36:03 +00:00
}
// purgeCheck removes a persisted check definition file from the data dir
2019-12-10 02:26:41 +00:00
func ( a * Agent ) purgeCheck ( checkID structs . CheckID ) error {
2021-11-04 20:07:54 +00:00
checkPath := filepath . Join ( a . config . DataDir , checksDir , checkID . StringHashSHA256 ( ) )
2014-11-24 08:36:03 +00:00
if _ , err := os . Stat ( checkPath ) ; err == nil {
return os . Remove ( checkPath )
}
return nil
}
2019-09-24 15:04:48 +00:00
// persistedServiceConfig is used to serialize the resolved service config that
// feeds into the ServiceManager at registration time so that it may be
// restored later on.
type persistedServiceConfig struct {
ServiceID string
Defaults * structs . ServiceConfigResponse
2022-04-05 21:10:06 +00:00
acl . EnterpriseMeta
2019-09-24 15:04:48 +00:00
}
2021-11-04 20:07:54 +00:00
func ( a * Agent ) makeServiceConfigFilePath ( serviceID structs . ServiceID ) string {
return filepath . Join ( a . config . DataDir , serviceConfigDir , serviceID . StringHashSHA256 ( ) )
}
2019-12-10 02:26:41 +00:00
func ( a * Agent ) persistServiceConfig ( serviceID structs . ServiceID , defaults * structs . ServiceConfigResponse ) error {
2019-09-24 15:04:48 +00:00
// Create the persisted config.
wrapped := persistedServiceConfig {
2019-12-10 02:26:41 +00:00
ServiceID : serviceID . ID ,
Defaults : defaults ,
EnterpriseMeta : serviceID . EnterpriseMeta ,
2019-09-24 15:04:48 +00:00
}
encoded , err := json . Marshal ( wrapped )
if err != nil {
return err
}
dir := filepath . Join ( a . config . DataDir , serviceConfigDir )
2021-11-04 20:07:54 +00:00
configPath := a . makeServiceConfigFilePath ( serviceID )
2019-09-24 15:04:48 +00:00
// Create the config dir if it doesn't exist
if err := os . MkdirAll ( dir , 0700 ) ; err != nil {
return fmt . Errorf ( "failed creating service configs dir %q: %s" , dir , err )
}
return file . WriteAtomic ( configPath , encoded )
}
2019-12-10 02:26:41 +00:00
func ( a * Agent ) purgeServiceConfig ( serviceID structs . ServiceID ) error {
2021-11-04 20:07:54 +00:00
configPath := a . makeServiceConfigFilePath ( serviceID )
2019-09-24 15:04:48 +00:00
if _ , err := os . Stat ( configPath ) ; err == nil {
return os . Remove ( configPath )
}
return nil
}
2019-12-10 02:26:41 +00:00
func ( a * Agent ) readPersistedServiceConfigs ( ) ( map [ structs . ServiceID ] * structs . ServiceConfigResponse , error ) {
out := make ( map [ structs . ServiceID ] * structs . ServiceConfigResponse )
2019-09-24 15:04:48 +00:00
configDir := filepath . Join ( a . config . DataDir , serviceConfigDir )
files , err := ioutil . ReadDir ( configDir )
if err != nil {
if os . IsNotExist ( err ) {
return nil , nil
}
return nil , fmt . Errorf ( "Failed reading service configs dir %q: %s" , configDir , err )
}
for _ , fi := range files {
// Skip all dirs
if fi . IsDir ( ) {
continue
}
// Skip all partially written temporary files
if strings . HasSuffix ( fi . Name ( ) , "tmp" ) {
2020-01-28 23:50:41 +00:00
a . logger . Warn ( "Ignoring temporary service config file" , "file" , fi . Name ( ) )
2019-09-24 15:04:48 +00:00
continue
}
// Read the contents into a buffer
file := filepath . Join ( configDir , fi . Name ( ) )
buf , err := ioutil . ReadFile ( file )
if err != nil {
2021-11-18 20:44:20 +00:00
return nil , fmt . Errorf ( "failed reading service config file %q: %w" , file , err )
2019-09-24 15:04:48 +00:00
}
// Try decoding the service config definition
var p persistedServiceConfig
if err := json . Unmarshal ( buf , & p ) ; err != nil {
2020-01-28 23:50:41 +00:00
a . logger . Error ( "Failed decoding service config file" ,
"file" , file ,
"error" , err ,
)
2019-09-24 15:04:48 +00:00
continue
}
2021-11-04 20:07:54 +00:00
serviceID := structs . NewServiceID ( p . ServiceID , & p . EnterpriseMeta )
// Rename files that used the old md5 hash to the new sha256 name; only needed when upgrading from 1.10 and before.
newPath := a . makeServiceConfigFilePath ( serviceID )
if file != newPath {
if err := os . Rename ( file , newPath ) ; err != nil {
2021-11-18 20:44:20 +00:00
a . logger . Error ( "Failed renaming service config file" ,
"file" , file ,
"targetFile" , newPath ,
"error" , err ,
)
}
}
2022-04-05 21:10:06 +00:00
if ! acl . EqualPartitions ( a . AgentEnterpriseMeta ( ) . PartitionOrDefault ( ) , p . PartitionOrDefault ( ) ) {
2021-11-18 20:44:20 +00:00
a . logger . Info ( "Purging service config file in wrong partition" ,
"file" , file ,
"partition" , p . PartitionOrDefault ( ) ,
)
if err := os . Remove ( file ) ; err != nil {
a . logger . Error ( "Failed purging service config file" ,
"file" , file ,
"error" , err ,
)
2021-11-04 20:07:54 +00:00
}
2021-11-18 20:44:20 +00:00
continue
2021-11-04 20:07:54 +00:00
}
out [ serviceID ] = p . Defaults
2019-09-24 15:04:48 +00:00
}
return out , nil
}
2020-11-30 17:53:46 +00:00
// AddService is used to add a service entry and its check. Any check for this service missing from chkTypes will be deleted.
2019-09-02 15:38:29 +00:00
// This entry is persistent and the agent will make a best effort to
// ensure it is registered
2020-11-30 18:26:58 +00:00
func ( a * Agent ) AddService ( req AddServiceRequest ) error {
2020-11-30 22:01:37 +00:00
a . stateLock . Lock ( )
defer a . stateLock . Unlock ( )
2020-11-30 20:39:06 +00:00
rl := addServiceLockedRequest {
AddServiceRequest : req ,
serviceDefaults : serviceDefaultsFromCache ( a . baseDeps , req ) ,
persistServiceConfig : true ,
}
return a . addServiceLocked ( rl )
2019-09-02 15:38:29 +00:00
}
2019-04-24 13:11:08 +00:00
// addServiceLocked adds a service entry to the service manager if enabled, or directly
// to the local state if it is not. This function assumes the state lock is already held.
2020-11-30 20:39:06 +00:00
func ( a * Agent ) addServiceLocked ( req addServiceLockedRequest ) error {
2020-11-30 18:26:58 +00:00
req . Service . EnterpriseMeta . Normalize ( )
2019-12-10 02:26:41 +00:00
2020-11-30 18:26:58 +00:00
if err := a . validateService ( req . Service , req . chkTypes ) ; err != nil {
2019-04-23 06:39:02 +00:00
return err
2019-01-08 10:13:49 +00:00
}
2020-11-30 19:24:08 +00:00
if a . config . EnableCentralServiceConfig && ( req . Service . IsSidecarProxy ( ) || req . Service . IsGateway ( ) ) {
2019-09-24 15:04:48 +00:00
return a . serviceManager . AddService ( req )
2015-02-09 17:22:51 +00:00
}
2019-09-24 15:04:48 +00:00
req . persistServiceConfig = false
2020-11-30 20:39:06 +00:00
return a . addServiceInternal ( addServiceInternalRequest { addServiceLockedRequest : req } )
}
type addServiceLockedRequest struct {
AddServiceRequest
persistServiceConfig bool
// serviceDefaults is a function which will return centralized service
// configuration.
// When loading service definitions from disk this will return a copy
// loaded from a persisted file. Otherwise it will query a Server for the
// centralized config.
// serviceDefaults is called when the Agent.stateLock is held, so it must
// never attempt to acquire that lock.
serviceDefaults func ( context . Context ) ( * structs . ServiceConfigResponse , error )
2020-11-30 22:01:37 +00:00
// checkStateSnapshot may optionally be set to a snapshot of the checks in
// the local.State. If checkStateSnapshot is nil, addServiceInternal will
// callState.Checks to get the snapshot.
checkStateSnapshot map [ structs . CheckID ] * structs . HealthCheck
2019-09-24 15:04:48 +00:00
}
2020-11-30 22:07:36 +00:00
// AddServiceRequest contains the fields used to register a service on the local
// agent using Agent.AddService.
2020-11-30 18:26:58 +00:00
type AddServiceRequest struct {
Service * structs . NodeService
2019-09-24 15:04:48 +00:00
chkTypes [ ] * structs . CheckType
persist bool
token string
replaceExistingChecks bool
2020-11-30 18:26:58 +00:00
Source configSource
2019-09-24 15:04:48 +00:00
}
2020-11-30 18:46:14 +00:00
type addServiceInternalRequest struct {
2020-11-30 20:39:06 +00:00
addServiceLockedRequest
2020-11-30 22:07:36 +00:00
// persistService may be set to a NodeService definition to indicate to
// addServiceInternal that if persist=true, it should persist this definition
// of the service, not the one from the Service field. This is necessary so
// that the service is persisted without the serviceDefaults.
persistService * structs . NodeService
// persistServiceDefaults may be set to a ServiceConfigResponse to indicate to
// addServiceInternal that it should persist the value in a file.
persistServiceDefaults * structs . ServiceConfigResponse
2019-04-23 06:39:02 +00:00
}
2015-02-09 17:30:06 +00:00
2019-04-24 13:11:08 +00:00
// addServiceInternal adds the given service and checks to the local state.
2020-11-30 18:46:14 +00:00
func ( a * Agent ) addServiceInternal ( req addServiceInternalRequest ) error {
2020-11-30 19:08:26 +00:00
service := req . Service
2019-09-24 15:04:48 +00:00
2015-05-06 19:28:42 +00:00
// Pause the service syncs during modification
a . PauseSync ( )
defer a . ResumeSync ( )
2020-01-17 14:54:17 +00:00
// Set default tagged addresses
serviceIP := net . ParseIP ( service . Address )
serviceAddressIs4 := serviceIP != nil && serviceIP . To4 ( ) != nil
serviceAddressIs6 := serviceIP != nil && serviceIP . To4 ( ) == nil
if service . TaggedAddresses == nil {
service . TaggedAddresses = map [ string ] structs . ServiceAddress { }
}
if _ , ok := service . TaggedAddresses [ structs . TaggedAddressLANIPv4 ] ; ! ok && serviceAddressIs4 {
service . TaggedAddresses [ structs . TaggedAddressLANIPv4 ] = structs . ServiceAddress { Address : service . Address , Port : service . Port }
}
if _ , ok := service . TaggedAddresses [ structs . TaggedAddressWANIPv4 ] ; ! ok && serviceAddressIs4 {
service . TaggedAddresses [ structs . TaggedAddressWANIPv4 ] = structs . ServiceAddress { Address : service . Address , Port : service . Port }
}
if _ , ok := service . TaggedAddresses [ structs . TaggedAddressLANIPv6 ] ; ! ok && serviceAddressIs6 {
service . TaggedAddresses [ structs . TaggedAddressLANIPv6 ] = structs . ServiceAddress { Address : service . Address , Port : service . Port }
}
if _ , ok := service . TaggedAddresses [ structs . TaggedAddressWANIPv6 ] ; ! ok && serviceAddressIs6 {
service . TaggedAddresses [ structs . TaggedAddressWANIPv6 ] = structs . ServiceAddress { Address : service . Address , Port : service . Port }
}
2019-03-04 14:34:05 +00:00
var checks [ ] * structs . HealthCheck
2014-11-24 08:36:03 +00:00
2019-12-10 02:26:41 +00:00
// all the checks must be associated with the same enterprise meta of the service
// so this map can just use the main CheckID for indexing
existingChecks := map [ structs . CheckID ] bool { }
for _ , check := range a . State . ChecksForService ( service . CompoundServiceID ( ) , false ) {
existingChecks [ check . CompoundCheckID ( ) ] = false
2019-09-02 15:38:29 +00:00
}
2020-11-30 22:01:37 +00:00
// Note, this is explicitly a nil check instead of len() == 0 because
// Agent.Start does not have a snapshot, and we don't want to query
// State.Checks each time.
if req . checkStateSnapshot == nil {
2021-08-19 20:09:42 +00:00
req . checkStateSnapshot = a . State . AllChecks ( )
2020-11-30 22:01:37 +00:00
}
2014-01-30 21:39:02 +00:00
// Create an associated health check
2020-11-30 19:08:26 +00:00
for i , chkType := range req . chkTypes {
2017-05-15 19:49:13 +00:00
checkID := string ( chkType . CheckID )
if checkID == "" {
checkID = fmt . Sprintf ( "service:%s" , service . ID )
2020-11-30 19:08:26 +00:00
if len ( req . chkTypes ) > 1 {
2017-05-15 19:49:13 +00:00
checkID += fmt . Sprintf ( ":%d" , i + 1 )
}
}
2019-11-14 15:59:06 +00:00
2020-04-15 16:03:29 +00:00
cid := structs . NewCheckID ( types . CheckID ( checkID ) , & service . EnterpriseMeta )
2019-12-10 02:26:41 +00:00
existingChecks [ cid ] = true
2019-11-14 15:59:06 +00:00
2017-05-15 19:49:13 +00:00
name := chkType . Name
if name == "" {
name = fmt . Sprintf ( "Service '%s' check" , service . Service )
2015-01-14 01:52:17 +00:00
}
2022-02-18 20:05:33 +00:00
var intervalStr string
var timeoutStr string
if chkType . Interval != 0 {
intervalStr = chkType . Interval . String ( )
}
if chkType . Timeout != 0 {
timeoutStr = chkType . Interval . String ( )
}
2014-01-30 21:39:02 +00:00
check := & structs . HealthCheck {
2019-12-10 02:26:41 +00:00
Node : a . config . NodeName ,
CheckID : types . CheckID ( checkID ) ,
Name : name ,
2022-02-18 20:05:33 +00:00
Interval : intervalStr ,
Timeout : timeoutStr ,
2019-12-10 02:26:41 +00:00
Status : api . HealthCritical ,
Notes : chkType . Notes ,
ServiceID : service . ID ,
ServiceName : service . Service ,
ServiceTags : service . Tags ,
Type : chkType . Type ( ) ,
EnterpriseMeta : service . EnterpriseMeta ,
2014-01-30 21:39:02 +00:00
}
2015-04-12 00:53:48 +00:00
if chkType . Status != "" {
check . Status = chkType . Status
}
2019-03-04 14:34:05 +00:00
2019-07-17 19:06:50 +00:00
// Restore the fields from the snapshot.
2020-11-30 22:01:37 +00:00
prev , ok := req . checkStateSnapshot [ cid ]
2019-07-17 19:06:50 +00:00
if ok {
check . Output = prev . Output
check . Status = prev . Status
}
2019-03-04 14:34:05 +00:00
checks = append ( checks , check )
}
// cleanup, store the ids of services and checks that weren't previously
2019-09-26 02:55:52 +00:00
// registered so we clean them up if something fails halfway through the
2019-03-04 14:34:05 +00:00
// process.
2019-12-10 02:26:41 +00:00
var cleanupServices [ ] structs . ServiceID
var cleanupChecks [ ] structs . CheckID
2019-03-04 14:34:05 +00:00
2019-12-10 02:26:41 +00:00
sid := service . CompoundServiceID ( )
if s := a . State . Service ( sid ) ; s == nil {
cleanupServices = append ( cleanupServices , sid )
2019-03-04 14:34:05 +00:00
}
for _ , check := range checks {
2019-12-10 02:26:41 +00:00
cid := check . CompoundCheckID ( )
if c := a . State . Check ( cid ) ; c == nil {
cleanupChecks = append ( cleanupChecks , cid )
2019-03-04 14:34:05 +00:00
}
}
2020-11-30 19:08:26 +00:00
err := a . State . AddServiceWithChecks ( service , checks , req . token )
2019-03-04 14:34:05 +00:00
if err != nil {
a . cleanupRegistration ( cleanupServices , cleanupChecks )
return err
}
2020-11-30 19:08:26 +00:00
source := req . Source
persist := req . persist
2019-03-04 14:34:05 +00:00
for i := range checks {
2020-11-30 19:08:26 +00:00
if err := a . addCheck ( checks [ i ] , req . chkTypes [ i ] , service , req . token , source ) ; err != nil {
2019-03-04 14:34:05 +00:00
a . cleanupRegistration ( cleanupServices , cleanupChecks )
return err
}
if persist && a . config . DataDir != "" {
2020-11-30 19:08:26 +00:00
if err := a . persistCheck ( checks [ i ] , req . chkTypes [ i ] , source ) ; err != nil {
2019-03-04 14:34:05 +00:00
a . cleanupRegistration ( cleanupServices , cleanupChecks )
return err
}
}
}
2019-09-26 02:55:52 +00:00
// If a proxy service wishes to expose checks, check targets need to be rerouted to the proxy listener
// This needs to be called after chkTypes are added to the agent, to avoid being overwritten
2020-04-15 16:03:29 +00:00
psid := structs . NewServiceID ( service . Proxy . DestinationServiceID , & service . EnterpriseMeta )
2019-12-10 02:26:41 +00:00
2019-09-26 02:55:52 +00:00
if service . Proxy . Expose . Checks {
agent: rewrite checks with proxy address, not local service address (#7518)
Exposing checks is supposed to allow a Consul agent bound to a different
IP address (e.g., in a different Kubernetes pod) to access healthchecks
through the proxy while the underlying service binds to localhost. This
is an important security feature that makes sure no external traffic
reaches the service except through the proxy.
However, as far as I can tell, this is subtly broken in the case where
the Consul agent cannot reach the proxy over localhost.
If a proxy is configured with: `{ LocalServiceAddress: "127.0.0.1",
Checks: true }`, as is typical with a sidecar proxy, the Consul checks
are currently rewritten to `127.0.0.1:<random port>`. A Consul agent
that does not share the loopback address cannot reach this address. Just
to make sure I was not misunderstanding, I tried configuring the proxy
with `{ LocalServiceAddress: "<pod ip>", Checks: true }`. In this case,
while the checks are rewritten as expected and the agent can reach the
dynamic port, the proxy can no longer reach its backend because the
traffic is no longer on the loopback interface.
I think rewriting the checks to use `proxy.Address`, the proxy's own
address, is more correct in this case. That is the IP where the proxy
can be reached, both by other proxies and by a Consul agent running on
a different IP. The local service address should continue to use
`127.0.0.1` in most cases.
2020-04-02 07:35:43 +00:00
err := a . rerouteExposedChecks ( psid , service . Address )
2019-09-26 02:55:52 +00:00
if err != nil {
2020-01-28 23:50:41 +00:00
a . logger . Warn ( "failed to reroute L7 checks to exposed proxy listener" )
2019-09-26 02:55:52 +00:00
}
} else {
// Reset check targets if proxy was re-registered but no longer wants to expose checks
// If the proxy is being registered for the first time then this is a no-op
2019-12-10 02:26:41 +00:00
a . resetExposedChecks ( psid )
2019-09-26 02:55:52 +00:00
}
2020-11-30 19:08:26 +00:00
if req . persistServiceConfig && a . config . DataDir != "" {
2019-09-24 15:04:48 +00:00
var err error
2020-11-30 22:07:36 +00:00
if req . persistServiceDefaults != nil {
err = a . persistServiceConfig ( service . CompoundServiceID ( ) , req . persistServiceDefaults )
2019-09-24 15:04:48 +00:00
} else {
2019-12-10 02:26:41 +00:00
err = a . purgeServiceConfig ( service . CompoundServiceID ( ) )
2019-09-24 15:04:48 +00:00
}
if err != nil {
a . cleanupRegistration ( cleanupServices , cleanupChecks )
return err
}
}
2019-03-04 14:34:05 +00:00
// Persist the service to a file
if persist && a . config . DataDir != "" {
2020-11-30 19:08:26 +00:00
if req . persistService == nil {
req . persistService = service
2019-09-24 15:04:48 +00:00
}
2020-11-30 19:08:26 +00:00
if err := a . persistService ( req . persistService , source ) ; err != nil {
2019-03-04 14:34:05 +00:00
a . cleanupRegistration ( cleanupServices , cleanupChecks )
2014-01-30 21:39:02 +00:00
return err
}
}
2018-09-27 13:33:12 +00:00
2020-11-30 19:08:26 +00:00
if req . replaceExistingChecks {
2019-09-02 15:38:29 +00:00
for checkID , keep := range existingChecks {
if ! keep {
a . removeCheckLocked ( checkID , persist )
}
}
}
2014-01-30 21:39:02 +00:00
return nil
}
2019-04-23 06:39:02 +00:00
// validateService validates an service and its checks, either returning an error or emitting a
// warning based on the nature of the error.
func ( a * Agent ) validateService ( service * structs . NodeService , chkTypes [ ] * structs . CheckType ) error {
if service . Service == "" {
return fmt . Errorf ( "Service name missing" )
}
if service . ID == "" && service . Service != "" {
service . ID = service . Service
}
for _ , check := range chkTypes {
if err := check . Validate ( ) ; err != nil {
return fmt . Errorf ( "Check is not valid: %v" , err )
}
}
// Set default weights if not specified. This is important as it ensures AE
// doesn't consider the service different since it has nil weights.
if service . Weights == nil {
service . Weights = & structs . Weights { Passing : 1 , Warning : 1 }
}
// Warn if the service name is incompatible with DNS
2020-08-17 21:24:49 +00:00
if dns . InvalidNameRe . MatchString ( service . Service ) {
2020-01-28 23:50:41 +00:00
a . logger . Warn ( "Service name will not be discoverable " +
2019-04-23 06:39:02 +00:00
"via DNS due to invalid characters. Valid characters include " +
2020-01-28 23:50:41 +00:00
"all alpha-numerics and dashes." ,
"service" , service . Service ,
)
2020-08-17 21:24:49 +00:00
} else if len ( service . Service ) > dns . MaxLabelLength {
2020-01-28 23:50:41 +00:00
a . logger . Warn ( "Service name will not be discoverable " +
2019-04-23 06:39:02 +00:00
"via DNS due to it being too long. Valid lengths are between " +
2020-01-28 23:50:41 +00:00
"1 and 63 bytes." ,
"service" , service . Service ,
)
2019-04-23 06:39:02 +00:00
}
// Warn if any tags are incompatible with DNS
for _ , tag := range service . Tags {
2020-08-17 21:24:49 +00:00
if dns . InvalidNameRe . MatchString ( tag ) {
2020-01-28 23:50:41 +00:00
a . logger . Debug ( "Service tag will not be discoverable " +
2019-04-23 06:39:02 +00:00
"via DNS due to invalid characters. Valid characters include " +
2020-01-28 23:50:41 +00:00
"all alpha-numerics and dashes." ,
"tag" , tag ,
)
2020-08-17 21:24:49 +00:00
} else if len ( tag ) > dns . MaxLabelLength {
2020-01-28 23:50:41 +00:00
a . logger . Debug ( "Service tag will not be discoverable " +
2019-04-23 06:39:02 +00:00
"via DNS due to it being too long. Valid lengths are between " +
2020-01-28 23:50:41 +00:00
"1 and 63 bytes." ,
"tag" , tag ,
)
2019-04-23 06:39:02 +00:00
}
}
2020-01-17 14:54:17 +00:00
// Check IPv4/IPv6 tagged addresses
if service . TaggedAddresses != nil {
if sa , ok := service . TaggedAddresses [ structs . TaggedAddressLANIPv4 ] ; ok {
ip := net . ParseIP ( sa . Address )
if ip == nil || ip . To4 ( ) == nil {
return fmt . Errorf ( "Service tagged address %q must be a valid ipv4 address" , structs . TaggedAddressLANIPv4 )
}
}
if sa , ok := service . TaggedAddresses [ structs . TaggedAddressWANIPv4 ] ; ok {
ip := net . ParseIP ( sa . Address )
if ip == nil || ip . To4 ( ) == nil {
return fmt . Errorf ( "Service tagged address %q must be a valid ipv4 address" , structs . TaggedAddressWANIPv4 )
}
}
if sa , ok := service . TaggedAddresses [ structs . TaggedAddressLANIPv6 ] ; ok {
ip := net . ParseIP ( sa . Address )
if ip == nil || ip . To4 ( ) != nil {
return fmt . Errorf ( "Service tagged address %q must be a valid ipv6 address" , structs . TaggedAddressLANIPv6 )
}
}
if sa , ok := service . TaggedAddresses [ structs . TaggedAddressLANIPv6 ] ; ok {
ip := net . ParseIP ( sa . Address )
if ip == nil || ip . To4 ( ) != nil {
return fmt . Errorf ( "Service tagged address %q must be a valid ipv6 address" , structs . TaggedAddressLANIPv6 )
}
}
}
2019-04-23 06:39:02 +00:00
return nil
}
2019-03-04 14:34:05 +00:00
// cleanupRegistration is called on registration error to ensure no there are no
// leftovers after a partial failure
2019-12-10 02:26:41 +00:00
func ( a * Agent ) cleanupRegistration ( serviceIDs [ ] structs . ServiceID , checksIDs [ ] structs . CheckID ) {
2019-03-04 14:34:05 +00:00
for _ , s := range serviceIDs {
if err := a . State . RemoveService ( s ) ; err != nil {
2020-01-28 23:50:41 +00:00
a . logger . Error ( "failed to remove service during cleanup" ,
"service" , s . String ( ) ,
"error" , err ,
)
2019-03-04 14:34:05 +00:00
}
if err := a . purgeService ( s ) ; err != nil {
2020-01-28 23:50:41 +00:00
a . logger . Error ( "failed to purge service file during cleanup" ,
"service" , s . String ( ) ,
"error" , err ,
)
2019-03-04 14:34:05 +00:00
}
2019-09-24 15:04:48 +00:00
if err := a . purgeServiceConfig ( s ) ; err != nil {
2020-01-28 23:50:41 +00:00
a . logger . Error ( "failed to purge service config file during cleanup" ,
"service" , s ,
"error" , err ,
)
2019-09-24 15:04:48 +00:00
}
2020-01-20 13:01:40 +00:00
if err := a . removeServiceSidecars ( s , true ) ; err != nil {
2020-01-28 23:50:41 +00:00
a . logger . Error ( "service registration: cleanup: failed remove sidecars for" , "service" , s , "error" , err )
2020-01-20 13:01:40 +00:00
}
2019-03-04 14:34:05 +00:00
}
for _ , c := range checksIDs {
a . cancelCheckMonitors ( c )
if err := a . State . RemoveCheck ( c ) ; err != nil {
2020-01-28 23:50:41 +00:00
a . logger . Error ( "failed to remove check during cleanup" ,
"check" , c . String ( ) ,
"error" , err ,
)
2019-03-04 14:34:05 +00:00
}
if err := a . purgeCheck ( c ) ; err != nil {
2020-01-28 23:50:41 +00:00
a . logger . Error ( "failed to purge check file during cleanup" ,
"check" , c . String ( ) ,
"error" , err ,
)
2019-03-04 14:34:05 +00:00
}
}
}
2014-01-30 21:39:02 +00:00
// RemoveService is used to remove a service entry.
// The agent will make a best effort to ensure it is deregistered
2019-12-10 02:26:41 +00:00
func ( a * Agent ) RemoveService ( serviceID structs . ServiceID ) error {
2019-09-24 15:04:48 +00:00
return a . removeService ( serviceID , true )
}
2019-12-10 02:26:41 +00:00
func ( a * Agent ) removeService ( serviceID structs . ServiceID , persist bool ) error {
2019-03-04 14:34:05 +00:00
a . stateLock . Lock ( )
defer a . stateLock . Unlock ( )
return a . removeServiceLocked ( serviceID , persist )
}
// removeServiceLocked is used to remove a service entry.
// The agent will make a best effort to ensure it is deregistered
2019-12-10 02:26:41 +00:00
func ( a * Agent ) removeServiceLocked ( serviceID structs . ServiceID , persist bool ) error {
2015-01-26 16:06:49 +00:00
// Validate ServiceID
2019-12-10 02:26:41 +00:00
if serviceID . ID == "" {
2015-01-26 16:06:49 +00:00
return fmt . Errorf ( "ServiceID missing" )
}
2019-04-25 09:11:07 +00:00
// Shut down the config watch in the service manager if enabled.
if a . config . EnableCentralServiceConfig {
a . serviceManager . RemoveService ( serviceID )
}
2019-04-23 06:39:02 +00:00
2019-09-26 02:55:52 +00:00
// Reset the HTTP check targets if they were exposed through a proxy
// If this is not a proxy or checks were not exposed then this is a no-op
svc := a . State . Service ( serviceID )
if svc != nil {
2020-04-15 16:03:29 +00:00
psid := structs . NewServiceID ( svc . Proxy . DestinationServiceID , & svc . EnterpriseMeta )
2019-12-10 02:26:41 +00:00
a . resetExposedChecks ( psid )
2019-09-26 02:55:52 +00:00
}
2019-12-10 02:26:41 +00:00
checks := a . State . ChecksForService ( serviceID , false )
var checkIDs [ ] structs . CheckID
for id := range checks {
2019-03-04 14:34:05 +00:00
checkIDs = append ( checkIDs , id )
}
2015-09-15 12:22:08 +00:00
// Remove service immediately
2019-03-04 14:34:05 +00:00
if err := a . State . RemoveServiceWithChecks ( serviceID , checkIDs ) ; err != nil {
2020-01-28 23:50:41 +00:00
a . logger . Warn ( "Failed to deregister service" ,
"service" , serviceID . String ( ) ,
"error" , err ,
)
2016-11-09 21:56:54 +00:00
return nil
}
2014-01-30 21:39:02 +00:00
2014-11-24 08:36:03 +00:00
// Remove the service from the data dir
2014-11-26 07:58:02 +00:00
if persist {
if err := a . purgeService ( serviceID ) ; err != nil {
return err
}
2019-09-24 15:04:48 +00:00
if err := a . purgeServiceConfig ( serviceID ) ; err != nil {
return err
}
2014-11-24 08:36:03 +00:00
}
2014-01-30 21:39:02 +00:00
// Deregister any associated health checks
2019-12-10 02:26:41 +00:00
for checkID := range checks {
2019-03-04 14:34:05 +00:00
if err := a . removeCheckLocked ( checkID , persist ) ; err != nil {
2015-01-14 01:52:17 +00:00
return err
}
2015-01-08 06:26:40 +00:00
}
2020-01-28 23:50:41 +00:00
a . logger . Debug ( "removed service" , "service" , serviceID . String ( ) )
2018-09-27 13:33:12 +00:00
// If any Sidecar services exist for the removed service ID, remove them too.
2020-01-20 13:01:40 +00:00
return a . removeServiceSidecars ( serviceID , persist )
}
func ( a * Agent ) removeServiceSidecars ( serviceID structs . ServiceID , persist bool ) error {
2020-11-30 18:14:15 +00:00
sidecarSID := structs . NewServiceID ( sidecarServiceID ( serviceID . ID ) , & serviceID . EnterpriseMeta )
2019-12-10 02:26:41 +00:00
if sidecar := a . State . Service ( sidecarSID ) ; sidecar != nil {
2018-09-27 13:33:12 +00:00
// Double check that it's not just an ID collision and we actually added
// this from a sidecar.
if sidecar . LocallyRegisteredAsSidecar {
// Remove it!
2019-12-10 02:26:41 +00:00
err := a . removeServiceLocked ( sidecarSID , persist )
2018-09-27 13:33:12 +00:00
if err != nil {
return err
}
}
}
2015-01-08 06:26:40 +00:00
return nil
2014-01-30 21:39:02 +00:00
}
// AddCheck is used to add a health check to the agent.
// This entry is persistent and the agent will make a best effort to
// ensure it is registered. The Check may include a CheckType which
// is used to automatically update the check status
2018-10-11 12:22:11 +00:00
func ( a * Agent ) AddCheck ( check * structs . HealthCheck , chkType * structs . CheckType , persist bool , token string , source configSource ) error {
2019-03-04 14:34:05 +00:00
a . stateLock . Lock ( )
defer a . stateLock . Unlock ( )
return a . addCheckLocked ( check , chkType , persist , token , source )
}
func ( a * Agent ) addCheckLocked ( check * structs . HealthCheck , chkType * structs . CheckType , persist bool , token string , source configSource ) error {
var service * structs . NodeService
2019-12-10 02:26:41 +00:00
check . EnterpriseMeta . Normalize ( )
2019-03-04 14:34:05 +00:00
if check . ServiceID != "" {
2019-12-10 02:26:41 +00:00
cid := check . CompoundServiceID ( )
service = a . State . Service ( cid )
2019-03-04 14:34:05 +00:00
if service == nil {
2019-12-10 02:26:41 +00:00
return fmt . Errorf ( "ServiceID %q does not exist" , cid . String ( ) )
2019-03-04 14:34:05 +00:00
}
}
2019-12-10 02:26:41 +00:00
// Extra validations
if err := check . Validate ( ) ; err != nil {
return err
}
2019-03-04 14:34:05 +00:00
// snapshot the current state of the health check to avoid potential flapping
2019-12-10 02:26:41 +00:00
cid := check . CompoundCheckID ( )
existing := a . State . Check ( cid )
2019-03-04 14:34:05 +00:00
defer func ( ) {
if existing != nil {
2019-12-10 02:26:41 +00:00
a . State . UpdateCheck ( cid , existing . Status , existing . Output )
2019-03-04 14:34:05 +00:00
}
} ( )
2020-06-23 17:18:22 +00:00
err := a . addCheck ( check , chkType , service , token , source )
2019-03-04 14:34:05 +00:00
if err != nil {
2019-12-10 02:26:41 +00:00
a . State . RemoveCheck ( cid )
2019-03-04 14:34:05 +00:00
return err
}
// Add to the local state for anti-entropy
err = a . State . AddCheck ( check , token )
if err != nil {
return err
}
// Persist the check
if persist && a . config . DataDir != "" {
2019-09-24 15:04:48 +00:00
return a . persistCheck ( check , chkType , source )
2019-03-04 14:34:05 +00:00
}
return nil
}
2020-06-23 17:18:22 +00:00
func ( a * Agent ) addCheck ( check * structs . HealthCheck , chkType * structs . CheckType , service * structs . NodeService , token string , source configSource ) error {
2014-01-30 21:39:02 +00:00
if check . CheckID == "" {
return fmt . Errorf ( "CheckID missing" )
}
2017-07-17 18:20:35 +00:00
if chkType != nil {
2017-10-10 23:54:06 +00:00
if err := chkType . Validate ( ) ; err != nil {
return fmt . Errorf ( "Check is not valid: %v" , err )
2017-07-17 18:20:35 +00:00
}
2018-10-11 12:22:11 +00:00
if chkType . IsScript ( ) {
if source == ConfigSourceLocal && ! a . config . EnableLocalScriptChecks {
return fmt . Errorf ( "Scripts are disabled on this agent; to enable, configure 'enable_script_checks' or 'enable_local_script_checks' to true" )
}
if source == ConfigSourceRemote && ! a . config . EnableRemoteScriptChecks {
return fmt . Errorf ( "Scripts are disabled on this agent from remote calls; to enable, configure 'enable_script_checks' to true" )
}
2017-07-17 18:20:35 +00:00
}
2014-01-30 21:39:02 +00:00
}
2015-01-14 01:52:17 +00:00
if check . ServiceID != "" {
2019-03-04 14:34:05 +00:00
check . ServiceName = service . Service
check . ServiceTags = service . Tags
2019-12-10 02:26:41 +00:00
check . EnterpriseMeta = service . EnterpriseMeta
2015-01-14 01:52:17 +00:00
}
2014-01-30 21:39:02 +00:00
// Check if already registered
if chkType != nil {
2019-06-26 15:43:25 +00:00
maxOutputSize := a . config . CheckOutputMaxSize
if maxOutputSize == 0 {
maxOutputSize = checks . DefaultBufSize
}
if chkType . OutputMaxSize > 0 && maxOutputSize > chkType . OutputMaxSize {
maxOutputSize = chkType . OutputMaxSize
}
2019-09-26 02:55:52 +00:00
2021-09-14 16:47:52 +00:00
// FailuresBeforeWarning has to default to same value as FailuresBeforeCritical
if chkType . FailuresBeforeWarning == 0 {
chkType . FailuresBeforeWarning = chkType . FailuresBeforeCritical
}
2019-09-26 02:55:52 +00:00
// Get the address of the proxy for this service if it exists
// Need its config to know whether we should reroute checks to it
var proxy * structs . NodeService
if service != nil {
2021-08-19 20:09:42 +00:00
// NOTE: Both services must live in the same namespace and
// partition so this will correctly scope the results.
2019-12-10 02:26:41 +00:00
for _ , svc := range a . State . Services ( & service . EnterpriseMeta ) {
2019-09-26 02:55:52 +00:00
if svc . Proxy . DestinationServiceID == service . ID {
proxy = svc
break
}
}
}
2021-09-14 16:47:52 +00:00
statusHandler := checks . NewStatusHandler ( a . State , a . logger , chkType . SuccessBeforePassing , chkType . FailuresBeforeWarning , chkType . FailuresBeforeCritical )
2019-12-10 02:26:41 +00:00
sid := check . CompoundServiceID ( )
cid := check . CompoundCheckID ( )
2019-10-14 20:49:49 +00:00
2017-07-12 14:01:42 +00:00
switch {
case chkType . IsTTL ( ) :
2019-12-10 02:26:41 +00:00
if existing , ok := a . checkTTLs [ cid ] ; ok {
2014-06-17 23:48:19 +00:00
existing . Stop ( )
2019-12-10 02:26:41 +00:00
delete ( a . checkTTLs , cid )
2014-01-30 21:39:02 +00:00
}
2017-10-25 09:18:07 +00:00
ttl := & checks . CheckTTL {
2019-06-26 15:43:25 +00:00
Notify : a . State ,
2019-12-10 02:26:41 +00:00
CheckID : cid ,
ServiceID : sid ,
2019-06-26 15:43:25 +00:00
TTL : chkType . TTL ,
Logger : a . logger ,
OutputMaxSize : maxOutputSize ,
2014-01-30 21:39:02 +00:00
}
2015-06-05 23:17:07 +00:00
// Restore persisted state, if any
2015-06-08 16:35:10 +00:00
if err := a . loadCheckState ( check ) ; err != nil {
2020-01-28 23:50:41 +00:00
a . logger . Warn ( "failed restoring state for check" ,
"check" , cid . String ( ) ,
"error" , err ,
)
2015-06-05 23:17:07 +00:00
}
2014-01-30 21:39:02 +00:00
ttl . Start ( )
2019-12-10 02:26:41 +00:00
a . checkTTLs [ cid ] = ttl
2017-07-12 14:01:42 +00:00
case chkType . IsHTTP ( ) :
2019-12-10 02:26:41 +00:00
if existing , ok := a . checkHTTPs [ cid ] ; ok {
2015-01-09 22:43:24 +00:00
existing . Stop ( )
2019-12-10 02:26:41 +00:00
delete ( a . checkHTTPs , cid )
2015-01-09 22:43:24 +00:00
}
2017-10-25 09:18:07 +00:00
if chkType . Interval < checks . MinInterval {
2020-01-28 23:50:41 +00:00
a . logger . Warn ( "check has interval below minimum" ,
"check" , cid . String ( ) ,
"minimum_interval" , checks . MinInterval ,
)
2017-10-25 09:18:07 +00:00
chkType . Interval = checks . MinInterval
2015-01-09 22:43:24 +00:00
}
2021-02-25 06:35:34 +00:00
tlsClientConfig := a . tlsConfigurator . OutgoingTLSConfigForCheck ( chkType . TLSSkipVerify , chkType . TLSServerName )
2017-11-08 02:22:09 +00:00
2017-10-25 09:18:07 +00:00
http := & checks . CheckHTTP {
2022-04-01 21:31:15 +00:00
CheckID : cid ,
ServiceID : sid ,
HTTP : chkType . HTTP ,
Header : chkType . Header ,
Method : chkType . Method ,
Body : chkType . Body ,
DisableRedirects : chkType . DisableRedirects ,
Interval : chkType . Interval ,
Timeout : chkType . Timeout ,
Logger : a . logger ,
OutputMaxSize : maxOutputSize ,
TLSClientConfig : tlsClientConfig ,
StatusHandler : statusHandler ,
2015-01-09 22:43:24 +00:00
}
2019-09-26 02:55:52 +00:00
if proxy != nil && proxy . Proxy . Expose . Checks {
2019-12-10 02:26:41 +00:00
port , err := a . listenerPortLocked ( sid , cid )
2019-09-26 02:55:52 +00:00
if err != nil {
2020-01-28 23:50:41 +00:00
a . logger . Error ( "error exposing check" ,
"check" , cid . String ( ) ,
"error" , err ,
)
2019-09-26 02:55:52 +00:00
return err
}
agent: rewrite checks with proxy address, not local service address (#7518)
Exposing checks is supposed to allow a Consul agent bound to a different
IP address (e.g., in a different Kubernetes pod) to access healthchecks
through the proxy while the underlying service binds to localhost. This
is an important security feature that makes sure no external traffic
reaches the service except through the proxy.
However, as far as I can tell, this is subtly broken in the case where
the Consul agent cannot reach the proxy over localhost.
If a proxy is configured with: `{ LocalServiceAddress: "127.0.0.1",
Checks: true }`, as is typical with a sidecar proxy, the Consul checks
are currently rewritten to `127.0.0.1:<random port>`. A Consul agent
that does not share the loopback address cannot reach this address. Just
to make sure I was not misunderstanding, I tried configuring the proxy
with `{ LocalServiceAddress: "<pod ip>", Checks: true }`. In this case,
while the checks are rewritten as expected and the agent can reach the
dynamic port, the proxy can no longer reach its backend because the
traffic is no longer on the loopback interface.
I think rewriting the checks to use `proxy.Address`, the proxy's own
address, is more correct in this case. That is the IP where the proxy
can be reached, both by other proxies and by a Consul agent running on
a different IP. The local service address should continue to use
`127.0.0.1` in most cases.
2020-04-02 07:35:43 +00:00
http . ProxyHTTP = httpInjectAddr ( http . HTTP , proxy . Address , port )
2021-05-12 20:51:39 +00:00
check . ExposedPort = port
2019-09-26 02:55:52 +00:00
}
2015-01-09 22:43:24 +00:00
http . Start ( )
2019-12-10 02:26:41 +00:00
a . checkHTTPs [ cid ] = http
2015-01-09 22:43:24 +00:00
2017-07-12 14:01:42 +00:00
case chkType . IsTCP ( ) :
2019-12-10 02:26:41 +00:00
if existing , ok := a . checkTCPs [ cid ] ; ok {
2015-07-23 11:45:08 +00:00
existing . Stop ( )
2019-12-10 02:26:41 +00:00
delete ( a . checkTCPs , cid )
2015-07-23 11:45:08 +00:00
}
2017-10-25 09:18:07 +00:00
if chkType . Interval < checks . MinInterval {
2020-01-28 23:50:41 +00:00
a . logger . Warn ( "check has interval below minimum" ,
"check" , cid . String ( ) ,
"minimum_interval" , checks . MinInterval ,
)
2017-10-25 09:18:07 +00:00
chkType . Interval = checks . MinInterval
2015-07-23 11:45:08 +00:00
}
2017-10-25 09:18:07 +00:00
tcp := & checks . CheckTCP {
2019-12-10 02:26:41 +00:00
CheckID : cid ,
ServiceID : sid ,
2019-10-14 20:49:49 +00:00
TCP : chkType . TCP ,
Interval : chkType . Interval ,
Timeout : chkType . Timeout ,
Logger : a . logger ,
StatusHandler : statusHandler ,
2015-07-23 11:45:08 +00:00
}
tcp . Start ( )
2019-12-10 02:26:41 +00:00
a . checkTCPs [ cid ] = tcp
2015-07-23 11:45:08 +00:00
2022-06-06 19:13:19 +00:00
case chkType . IsUDP ( ) :
if existing , ok := a . checkUDPs [ cid ] ; ok {
existing . Stop ( )
delete ( a . checkUDPs , cid )
}
if chkType . Interval < checks . MinInterval {
a . logger . Warn ( "check has interval below minimum" ,
"check" , cid . String ( ) ,
"minimum_interval" , checks . MinInterval ,
)
chkType . Interval = checks . MinInterval
}
udp := & checks . CheckUDP {
CheckID : cid ,
ServiceID : sid ,
UDP : chkType . UDP ,
Interval : chkType . Interval ,
Timeout : chkType . Timeout ,
Logger : a . logger ,
StatusHandler : statusHandler ,
}
udp . Start ( )
a . checkUDPs [ cid ] = udp
2017-12-27 04:35:22 +00:00
case chkType . IsGRPC ( ) :
2019-12-10 02:26:41 +00:00
if existing , ok := a . checkGRPCs [ cid ] ; ok {
2017-12-27 04:35:22 +00:00
existing . Stop ( )
2019-12-10 02:26:41 +00:00
delete ( a . checkGRPCs , cid )
2017-12-27 04:35:22 +00:00
}
if chkType . Interval < checks . MinInterval {
2020-01-28 23:50:41 +00:00
a . logger . Warn ( "check has interval below minimum" ,
"check" , cid . String ( ) ,
"minimum_interval" , checks . MinInterval ,
)
2017-12-27 04:35:22 +00:00
chkType . Interval = checks . MinInterval
}
var tlsClientConfig * tls . Config
2018-02-03 01:29:34 +00:00
if chkType . GRPCUseTLS {
2021-02-25 06:35:34 +00:00
tlsClientConfig = a . tlsConfigurator . OutgoingTLSConfigForCheck ( chkType . TLSSkipVerify , chkType . TLSServerName )
2017-12-27 04:35:22 +00:00
}
grpc := & checks . CheckGRPC {
2019-12-10 02:26:41 +00:00
CheckID : cid ,
ServiceID : sid ,
2017-12-27 04:35:22 +00:00
GRPC : chkType . GRPC ,
Interval : chkType . Interval ,
Timeout : chkType . Timeout ,
Logger : a . logger ,
TLSClientConfig : tlsClientConfig ,
2019-10-14 20:49:49 +00:00
StatusHandler : statusHandler ,
2017-12-27 04:35:22 +00:00
}
2019-09-26 02:55:52 +00:00
if proxy != nil && proxy . Proxy . Expose . Checks {
2019-12-10 02:26:41 +00:00
port , err := a . listenerPortLocked ( sid , cid )
2019-09-26 02:55:52 +00:00
if err != nil {
2020-01-28 23:50:41 +00:00
a . logger . Error ( "error exposing check" ,
"check" , cid . String ( ) ,
"error" , err ,
)
2019-09-26 02:55:52 +00:00
return err
}
agent: rewrite checks with proxy address, not local service address (#7518)
Exposing checks is supposed to allow a Consul agent bound to a different
IP address (e.g., in a different Kubernetes pod) to access healthchecks
through the proxy while the underlying service binds to localhost. This
is an important security feature that makes sure no external traffic
reaches the service except through the proxy.
However, as far as I can tell, this is subtly broken in the case where
the Consul agent cannot reach the proxy over localhost.
If a proxy is configured with: `{ LocalServiceAddress: "127.0.0.1",
Checks: true }`, as is typical with a sidecar proxy, the Consul checks
are currently rewritten to `127.0.0.1:<random port>`. A Consul agent
that does not share the loopback address cannot reach this address. Just
to make sure I was not misunderstanding, I tried configuring the proxy
with `{ LocalServiceAddress: "<pod ip>", Checks: true }`. In this case,
while the checks are rewritten as expected and the agent can reach the
dynamic port, the proxy can no longer reach its backend because the
traffic is no longer on the loopback interface.
I think rewriting the checks to use `proxy.Address`, the proxy's own
address, is more correct in this case. That is the IP where the proxy
can be reached, both by other proxies and by a Consul agent running on
a different IP. The local service address should continue to use
`127.0.0.1` in most cases.
2020-04-02 07:35:43 +00:00
grpc . ProxyGRPC = grpcInjectAddr ( grpc . GRPC , proxy . Address , port )
2021-05-12 20:51:39 +00:00
check . ExposedPort = port
2019-09-26 02:55:52 +00:00
}
2017-12-27 04:35:22 +00:00
grpc . Start ( )
2019-12-10 02:26:41 +00:00
a . checkGRPCs [ cid ] = grpc
2017-12-27 04:35:22 +00:00
2017-07-12 14:01:42 +00:00
case chkType . IsDocker ( ) :
2019-12-10 02:26:41 +00:00
if existing , ok := a . checkDockers [ cid ] ; ok {
2015-10-22 22:29:13 +00:00
existing . Stop ( )
2019-12-10 02:26:41 +00:00
delete ( a . checkDockers , cid )
2015-10-22 22:29:13 +00:00
}
2017-10-25 09:18:07 +00:00
if chkType . Interval < checks . MinInterval {
2020-01-28 23:50:41 +00:00
a . logger . Warn ( "check has interval below minimum" ,
"check" , cid . String ( ) ,
"minimum_interval" , checks . MinInterval ,
)
2017-10-25 09:18:07 +00:00
chkType . Interval = checks . MinInterval
2015-10-22 22:29:13 +00:00
}
2017-07-12 14:01:42 +00:00
if a . dockerClient == nil {
2019-06-26 15:43:25 +00:00
dc , err := checks . NewDockerClient ( os . Getenv ( "DOCKER_HOST" ) , int64 ( maxOutputSize ) )
2017-07-12 14:01:42 +00:00
if err != nil {
2020-01-28 23:50:41 +00:00
a . logger . Error ( "error creating docker client" , "error" , err )
2017-07-12 14:01:42 +00:00
return err
}
2020-01-28 23:50:41 +00:00
a . logger . Debug ( "created docker client" , "host" , dc . Host ( ) )
2017-07-12 14:01:42 +00:00
a . dockerClient = dc
}
2017-10-25 09:18:07 +00:00
dockerCheck := & checks . CheckDocker {
2019-12-10 02:26:41 +00:00
CheckID : cid ,
ServiceID : sid ,
2015-11-18 15:40:02 +00:00
DockerContainerID : chkType . DockerContainerID ,
2015-10-22 22:29:13 +00:00
Shell : chkType . Shell ,
2017-10-04 23:48:00 +00:00
ScriptArgs : chkType . ScriptArgs ,
2015-10-22 22:29:13 +00:00
Interval : chkType . Interval ,
Logger : a . logger ,
2017-10-25 09:18:07 +00:00
Client : a . dockerClient ,
2019-10-14 20:49:49 +00:00
StatusHandler : statusHandler ,
2015-10-26 23:45:12 +00:00
}
2019-12-10 02:26:41 +00:00
if prev := a . checkDockers [ cid ] ; prev != nil {
2017-10-26 10:03:07 +00:00
prev . Stop ( )
}
2015-10-22 22:29:13 +00:00
dockerCheck . Start ( )
2019-12-10 02:26:41 +00:00
a . checkDockers [ cid ] = dockerCheck
2017-07-12 14:01:42 +00:00
case chkType . IsMonitor ( ) :
2019-12-10 02:26:41 +00:00
if existing , ok := a . checkMonitors [ cid ] ; ok {
2015-10-26 22:02:23 +00:00
existing . Stop ( )
2019-12-10 02:26:41 +00:00
delete ( a . checkMonitors , cid )
2015-10-26 22:02:23 +00:00
}
2017-10-25 09:18:07 +00:00
if chkType . Interval < checks . MinInterval {
2020-01-28 23:50:41 +00:00
a . logger . Warn ( "check has interval below minimum" ,
"check" , cid . String ( ) ,
"minimum_interval" , checks . MinInterval ,
)
2017-10-25 09:18:07 +00:00
chkType . Interval = checks . MinInterval
2015-10-26 22:02:23 +00:00
}
2017-10-25 09:18:07 +00:00
monitor := & checks . CheckMonitor {
2019-06-26 15:43:25 +00:00
Notify : a . State ,
2019-12-10 02:26:41 +00:00
CheckID : cid ,
ServiceID : sid ,
2019-06-26 15:43:25 +00:00
ScriptArgs : chkType . ScriptArgs ,
Interval : chkType . Interval ,
Timeout : chkType . Timeout ,
Logger : a . logger ,
OutputMaxSize : maxOutputSize ,
2019-10-14 20:49:49 +00:00
StatusHandler : statusHandler ,
2015-10-26 22:02:23 +00:00
}
monitor . Start ( )
2019-12-10 02:26:41 +00:00
a . checkMonitors [ cid ] = monitor
2017-07-12 14:01:42 +00:00
2021-04-09 19:12:10 +00:00
case chkType . IsH2PING ( ) :
if existing , ok := a . checkH2PINGs [ cid ] ; ok {
existing . Stop ( )
delete ( a . checkH2PINGs , cid )
}
if chkType . Interval < checks . MinInterval {
a . logger . Warn ( "check has interval below minimum" ,
"check" , cid . String ( ) ,
"minimum_interval" , checks . MinInterval ,
)
chkType . Interval = checks . MinInterval
}
2021-07-25 20:08:44 +00:00
var tlsClientConfig * tls . Config
2021-10-05 01:36:18 +00:00
if chkType . H2PingUseTLS {
2021-07-25 20:08:44 +00:00
tlsClientConfig = a . tlsConfigurator . OutgoingTLSConfigForCheck ( chkType . TLSSkipVerify , chkType . TLSServerName )
tlsClientConfig . NextProtos = [ ] string { http2 . NextProtoTLS }
}
2021-04-09 19:12:10 +00:00
h2ping := & checks . CheckH2PING {
CheckID : cid ,
ServiceID : sid ,
H2PING : chkType . H2PING ,
Interval : chkType . Interval ,
Timeout : chkType . Timeout ,
Logger : a . logger ,
TLSClientConfig : tlsClientConfig ,
StatusHandler : statusHandler ,
}
h2ping . Start ( )
a . checkH2PINGs [ cid ] = h2ping
2018-06-30 13:38:56 +00:00
case chkType . IsAlias ( ) :
2019-12-10 02:26:41 +00:00
if existing , ok := a . checkAliases [ cid ] ; ok {
2018-06-30 13:38:56 +00:00
existing . Stop ( )
2019-12-10 02:26:41 +00:00
delete ( a . checkAliases , cid )
2018-06-30 13:38:56 +00:00
}
var rpcReq structs . NodeSpecificRequest
rpcReq . Datacenter = a . config . Datacenter
2021-10-26 20:08:55 +00:00
rpcReq . EnterpriseMeta = * a . AgentEnterpriseMeta ( )
2018-07-12 17:17:53 +00:00
// The token to set is really important. The behavior below follows
// the same behavior as anti-entropy: we use the user-specified token
// if set (either on the service or check definition), otherwise
// we use the "UserToken" on the agent. This is tested.
rpcReq . Token = a . tokens . UserToken ( )
if token != "" {
rpcReq . Token = token
}
2018-06-30 13:38:56 +00:00
2020-04-15 16:03:29 +00:00
aliasServiceID := structs . NewServiceID ( chkType . AliasService , & check . EnterpriseMeta )
2018-06-30 13:38:56 +00:00
chkImpl := & checks . CheckAlias {
2019-12-10 02:26:41 +00:00
Notify : a . State ,
RPC : a . delegate ,
RPCReq : rpcReq ,
CheckID : cid ,
Node : chkType . AliasNode ,
ServiceID : aliasServiceID ,
EnterpriseMeta : check . EnterpriseMeta ,
2018-06-30 13:38:56 +00:00
}
chkImpl . Start ( )
2019-12-10 02:26:41 +00:00
a . checkAliases [ cid ] = chkImpl
2018-06-30 13:38:56 +00:00
2017-07-12 14:01:42 +00:00
default :
2015-10-27 02:52:32 +00:00
return fmt . Errorf ( "Check type is not valid" )
2014-01-30 21:39:02 +00:00
}
2016-08-16 07:05:55 +00:00
2019-09-26 02:55:52 +00:00
// Notify channel that watches for service state changes
// This is a non-blocking send to avoid synchronizing on a large number of check updates
2019-12-10 02:26:41 +00:00
s := a . State . ServiceState ( sid )
2019-09-26 02:55:52 +00:00
if s != nil && ! s . Deleted {
select {
case s . WatchCh <- struct { } { } :
default :
}
}
2016-08-16 07:05:55 +00:00
if chkType . DeregisterCriticalServiceAfter > 0 {
timeout := chkType . DeregisterCriticalServiceAfter
if timeout < a . config . CheckDeregisterIntervalMin {
timeout = a . config . CheckDeregisterIntervalMin
2020-01-28 23:50:41 +00:00
a . logger . Warn ( "check has deregister interval below minimum" ,
"check" , cid . String ( ) ,
"minimum_interval" , a . config . CheckDeregisterIntervalMin ,
)
2016-08-16 07:05:55 +00:00
}
2019-12-10 02:26:41 +00:00
a . checkReapAfter [ cid ] = timeout
2016-08-16 07:05:55 +00:00
} else {
2019-12-10 02:26:41 +00:00
delete ( a . checkReapAfter , cid )
2016-08-16 07:05:55 +00:00
}
2014-01-30 21:39:02 +00:00
}
2014-11-25 03:24:32 +00:00
return nil
2014-01-30 21:39:02 +00:00
}
// RemoveCheck is used to remove a health check.
// The agent will make a best effort to ensure it is deregistered
2019-12-10 02:26:41 +00:00
func ( a * Agent ) RemoveCheck ( checkID structs . CheckID , persist bool ) error {
2019-03-04 14:34:05 +00:00
a . stateLock . Lock ( )
defer a . stateLock . Unlock ( )
return a . removeCheckLocked ( checkID , persist )
}
// removeCheckLocked is used to remove a health check.
// The agent will make a best effort to ensure it is deregistered
2019-12-10 02:26:41 +00:00
func ( a * Agent ) removeCheckLocked ( checkID structs . CheckID , persist bool ) error {
2015-01-26 16:06:49 +00:00
// Validate CheckID
2019-12-10 02:26:41 +00:00
if checkID . ID == "" {
2015-01-26 16:06:49 +00:00
return fmt . Errorf ( "CheckID missing" )
}
2019-09-26 02:55:52 +00:00
// Notify channel that watches for service state changes
// This is a non-blocking send to avoid synchronizing on a large number of check updates
2019-12-10 02:26:41 +00:00
var svcID structs . ServiceID
if c := a . State . Check ( checkID ) ; c != nil {
svcID = c . CompoundServiceID ( )
2019-09-26 02:55:52 +00:00
}
2019-12-10 02:26:41 +00:00
2019-09-26 02:55:52 +00:00
s := a . State . ServiceState ( svcID )
if s != nil && ! s . Deleted {
select {
case s . WatchCh <- struct { } { } :
default :
}
}
// Delete port from allocated port set
// If checks weren't being exposed then this is a no-op
2019-12-10 02:26:41 +00:00
portKey := listenerPortKey ( svcID , checkID )
2019-09-26 02:55:52 +00:00
delete ( a . exposedPorts , portKey )
2017-07-18 21:54:20 +00:00
a . cancelCheckMonitors ( checkID )
2019-03-04 14:34:05 +00:00
a . State . RemoveCheck ( checkID )
2017-07-18 21:54:20 +00:00
if persist {
if err := a . purgeCheck ( checkID ) ; err != nil {
return err
}
if err := a . purgeCheckState ( checkID ) ; err != nil {
return err
}
}
2019-09-26 02:55:52 +00:00
2020-01-28 23:50:41 +00:00
a . logger . Debug ( "removed check" , "check" , checkID . String ( ) )
2017-07-18 21:54:20 +00:00
return nil
}
2020-04-01 20:52:23 +00:00
// ServiceHTTPBasedChecks returns HTTP and GRPC based Checks
// for the given serviceID
2019-12-10 02:26:41 +00:00
func ( a * Agent ) ServiceHTTPBasedChecks ( serviceID structs . ServiceID ) [ ] structs . CheckType {
2019-09-26 02:55:52 +00:00
a . stateLock . Lock ( )
defer a . stateLock . Unlock ( )
var chkTypes = make ( [ ] structs . CheckType , 0 )
for _ , c := range a . checkHTTPs {
if c . ServiceID == serviceID {
chkTypes = append ( chkTypes , c . CheckType ( ) )
}
}
for _ , c := range a . checkGRPCs {
if c . ServiceID == serviceID {
chkTypes = append ( chkTypes , c . CheckType ( ) )
}
}
return chkTypes
}
2020-04-01 20:52:23 +00:00
// AdvertiseAddrLAN returns the AdvertiseAddrLAN config value
2019-09-26 02:55:52 +00:00
func ( a * Agent ) AdvertiseAddrLAN ( ) string {
return a . config . AdvertiseAddrLAN . String ( )
}
2019-12-10 02:26:41 +00:00
func ( a * Agent ) cancelCheckMonitors ( checkID structs . CheckID ) {
2014-01-30 21:39:02 +00:00
// Stop any monitors
2016-08-16 07:05:55 +00:00
delete ( a . checkReapAfter , checkID )
2014-01-30 21:39:02 +00:00
if check , ok := a . checkMonitors [ checkID ] ; ok {
check . Stop ( )
delete ( a . checkMonitors , checkID )
}
2015-01-12 22:34:39 +00:00
if check , ok := a . checkHTTPs [ checkID ] ; ok {
check . Stop ( )
delete ( a . checkHTTPs , checkID )
}
2015-07-23 11:45:08 +00:00
if check , ok := a . checkTCPs [ checkID ] ; ok {
check . Stop ( )
delete ( a . checkTCPs , checkID )
}
2022-06-06 19:13:19 +00:00
if check , ok := a . checkUDPs [ checkID ] ; ok {
check . Stop ( )
delete ( a . checkUDPs , checkID )
}
2017-12-27 04:35:22 +00:00
if check , ok := a . checkGRPCs [ checkID ] ; ok {
check . Stop ( )
delete ( a . checkGRPCs , checkID )
}
2014-01-30 21:39:02 +00:00
if check , ok := a . checkTTLs [ checkID ] ; ok {
check . Stop ( )
delete ( a . checkTTLs , checkID )
}
2017-07-18 18:50:37 +00:00
if check , ok := a . checkDockers [ checkID ] ; ok {
check . Stop ( )
delete ( a . checkDockers , checkID )
}
2021-04-09 19:12:10 +00:00
if check , ok := a . checkH2PINGs [ checkID ] ; ok {
check . Stop ( )
delete ( a . checkH2PINGs , checkID )
}
2014-01-30 21:39:02 +00:00
}
2016-08-16 07:05:55 +00:00
// updateTTLCheck is used to update the status of a TTL check via the Agent API.
2019-12-10 02:26:41 +00:00
func ( a * Agent ) updateTTLCheck ( checkID structs . CheckID , status , output string ) error {
2019-03-04 14:34:05 +00:00
a . stateLock . Lock ( )
defer a . stateLock . Unlock ( )
2014-01-30 21:39:02 +00:00
2016-08-16 07:05:55 +00:00
// Grab the TTL check.
2014-01-30 21:39:02 +00:00
check , ok := a . checkTTLs [ checkID ]
if ! ok {
2019-12-10 02:26:41 +00:00
return fmt . Errorf ( "CheckID %q does not have associated TTL" , checkID . String ( ) )
2014-01-30 21:39:02 +00:00
}
2016-08-16 07:05:55 +00:00
// Set the status through CheckTTL to reset the TTL.
2019-06-26 15:43:25 +00:00
outputTruncated := check . SetStatus ( status , output )
2015-06-05 23:17:07 +00:00
2016-08-16 07:05:55 +00:00
// We don't write any files in dev mode so bail here.
2018-06-06 20:04:19 +00:00
if a . config . DataDir == "" {
2015-11-29 04:40:05 +00:00
return nil
}
2016-08-16 07:05:55 +00:00
// Persist the state so the TTL check can come up in a good state after
// an agent restart, especially with long TTL values.
2019-06-26 15:43:25 +00:00
if err := a . persistCheckState ( check , status , outputTruncated ) ; err != nil {
2019-12-10 02:26:41 +00:00
return fmt . Errorf ( "failed persisting state for check %q: %s" , checkID . String ( ) , err )
2015-06-05 23:17:07 +00:00
}
return nil
}
// persistCheckState is used to record the check status into the data dir.
// This allows the state to be restored on a later agent start. Currently
// only useful for TTL based checks.
2017-10-25 09:18:07 +00:00
func ( a * Agent ) persistCheckState ( check * checks . CheckTTL , status , output string ) error {
2015-06-05 23:17:07 +00:00
// Create the persisted state
state := persistedCheckState {
2019-12-10 02:26:41 +00:00
CheckID : check . CheckID . ID ,
Status : status ,
Output : output ,
Expires : time . Now ( ) . Add ( check . TTL ) . Unix ( ) ,
EnterpriseMeta : check . CheckID . EnterpriseMeta ,
2015-06-05 23:17:07 +00:00
}
// Encode the state
buf , err := json . Marshal ( state )
if err != nil {
return err
}
// Create the state dir if it doesn't exist
dir := filepath . Join ( a . config . DataDir , checkStateDir )
if err := os . MkdirAll ( dir , 0700 ) ; err != nil {
return fmt . Errorf ( "failed creating check state dir %q: %s" , dir , err )
}
// Write the state to the file
2021-11-04 20:07:54 +00:00
file := filepath . Join ( dir , check . CheckID . StringHashSHA256 ( ) )
2016-11-07 18:51:03 +00:00
// Create temp file in same dir, to make more likely atomic
2016-08-03 15:32:21 +00:00
tempFile := file + ".tmp"
2016-11-07 20:24:31 +00:00
// persistCheckState is called frequently, so don't use writeFileAtomic to avoid calling fsync here
2016-08-03 15:32:21 +00:00
if err := ioutil . WriteFile ( tempFile , buf , 0600 ) ; err != nil {
return fmt . Errorf ( "failed writing temp file %q: %s" , tempFile , err )
}
if err := os . Rename ( tempFile , file ) ; err != nil {
return fmt . Errorf ( "failed to rename temp file from %q to %q: %s" , tempFile , file , err )
2015-06-05 23:17:07 +00:00
}
return nil
}
2015-06-08 16:35:10 +00:00
// loadCheckState is used to restore the persisted state of a check.
func ( a * Agent ) loadCheckState ( check * structs . HealthCheck ) error {
2019-12-10 02:26:41 +00:00
cid := check . CompoundCheckID ( )
2015-06-05 23:17:07 +00:00
// Try to read the persisted state for this check
2021-11-04 20:07:54 +00:00
file := filepath . Join ( a . config . DataDir , checkStateDir , cid . StringHashSHA256 ( ) )
2015-06-05 23:17:07 +00:00
buf , err := ioutil . ReadFile ( file )
if err != nil {
if os . IsNotExist ( err ) {
2021-11-04 20:07:54 +00:00
// try the md5 based name. This can be removed once we no longer support upgrades from versions that use MD5 hashing
oldFile := filepath . Join ( a . config . DataDir , checkStateDir , cid . StringHashMD5 ( ) )
buf , err = ioutil . ReadFile ( oldFile )
if err != nil {
if os . IsNotExist ( err ) {
return nil
} else {
2021-11-18 20:44:20 +00:00
return fmt . Errorf ( "failed reading check state %q: %w" , file , err )
2021-11-04 20:07:54 +00:00
}
}
if err := os . Rename ( oldFile , file ) ; err != nil {
2021-11-18 20:44:20 +00:00
a . logger . Error ( "Failed renaming check state" ,
"file" , oldFile ,
"targetFile" , file ,
"error" , err ,
)
2021-11-04 20:07:54 +00:00
}
} else {
2021-11-18 20:44:20 +00:00
return fmt . Errorf ( "failed reading file %q: %w" , file , err )
2015-06-05 23:17:07 +00:00
}
}
// Decode the state data
var p persistedCheckState
if err := json . Unmarshal ( buf , & p ) ; err != nil {
2020-01-28 23:50:41 +00:00
a . logger . Error ( "failed decoding check state" , "error" , err )
2019-12-10 02:26:41 +00:00
return a . purgeCheckState ( cid )
2015-06-05 23:17:07 +00:00
}
// Check if the state has expired
2015-06-05 23:45:05 +00:00
if time . Now ( ) . Unix ( ) >= p . Expires {
2020-01-28 23:50:41 +00:00
a . logger . Debug ( "check state expired, not restoring" , "check" , cid . String ( ) )
2019-12-10 02:26:41 +00:00
return a . purgeCheckState ( cid )
2015-06-05 23:17:07 +00:00
}
// Restore the fields from the state
check . Output = p . Output
check . Status = p . Status
2014-01-30 21:39:02 +00:00
return nil
}
2014-02-24 00:42:39 +00:00
2015-06-05 23:57:14 +00:00
// purgeCheckState is used to purge the state of a check from the data dir
2019-12-10 02:26:41 +00:00
func ( a * Agent ) purgeCheckState ( checkID structs . CheckID ) error {
2021-11-04 20:07:54 +00:00
file := filepath . Join ( a . config . DataDir , checkStateDir , checkID . StringHashSHA256 ( ) )
2015-06-05 23:57:14 +00:00
err := os . Remove ( file )
if os . IsNotExist ( err ) {
return nil
}
return err
}
2014-02-24 00:42:39 +00:00
// Stats is used to get various debugging state from the sub-systems
func ( a * Agent ) Stats ( ) map [ string ] map [ string ] string {
2017-05-15 14:05:17 +00:00
stats := a . delegate . Stats ( )
2014-02-24 00:42:39 +00:00
stats [ "agent" ] = map [ string ] string {
2017-08-28 12:17:12 +00:00
"check_monitors" : strconv . Itoa ( len ( a . checkMonitors ) ) ,
"check_ttls" : strconv . Itoa ( len ( a . checkTTLs ) ) ,
}
2017-08-28 12:17:13 +00:00
for k , v := range a . State . Stats ( ) {
2017-08-28 12:17:12 +00:00
stats [ "agent" ] [ k ] = v
2014-02-24 00:42:39 +00:00
}
2014-06-06 21:40:22 +00:00
revision := a . config . Revision
if len ( revision ) > 8 {
revision = revision [ : 8 ]
}
stats [ "build" ] = map [ string ] string {
2022-05-05 02:16:18 +00:00
"revision" : revision ,
"version" : a . config . Version ,
"version_metadata" : a . config . VersionMetadata ,
"prerelease" : a . config . VersionPrerelease ,
2014-06-06 21:40:22 +00:00
}
2021-05-11 14:50:03 +00:00
for outerKey , outerValue := range a . enterpriseStats ( ) {
if _ , ok := stats [ outerKey ] ; ok {
for innerKey , innerValue := range outerValue {
stats [ outerKey ] [ innerKey ] = innerValue
}
} else {
stats [ outerKey ] = outerValue
}
}
2014-02-24 00:42:39 +00:00
return stats
}
2014-05-06 03:29:50 +00:00
2014-05-06 19:43:33 +00:00
// storePid is used to write out our PID to a file if necessary
2014-05-06 16:57:53 +00:00
func ( a * Agent ) storePid ( ) error {
2014-05-06 19:43:33 +00:00
// Quit fast if no pidfile
2014-05-06 03:29:50 +00:00
pidPath := a . config . PidFile
2014-05-06 19:43:33 +00:00
if pidPath == "" {
return nil
}
2014-05-06 03:29:50 +00:00
2014-05-06 19:43:33 +00:00
// Open the PID file
pidFile , err := os . OpenFile ( pidPath , os . O_CREATE | os . O_WRONLY | os . O_TRUNC , 0666 )
if err != nil {
return fmt . Errorf ( "Could not open pid file: %v" , err )
2014-05-06 03:29:50 +00:00
}
2014-05-06 19:43:33 +00:00
defer pidFile . Close ( )
2014-05-06 16:57:53 +00:00
2014-05-06 19:43:33 +00:00
// Write out the PID
pid := os . Getpid ( )
_ , err = pidFile . WriteString ( fmt . Sprintf ( "%d" , pid ) )
if err != nil {
return fmt . Errorf ( "Could not write to pid file: %s" , err )
}
2014-05-06 16:57:53 +00:00
return nil
2014-05-06 03:29:50 +00:00
}
2014-05-06 19:43:33 +00:00
// deletePid is used to delete our PID on exit
2014-05-06 16:57:53 +00:00
func ( a * Agent ) deletePid ( ) error {
2014-05-06 19:43:33 +00:00
// Quit fast if no pidfile
2014-05-06 03:29:50 +00:00
pidPath := a . config . PidFile
2014-05-06 19:43:33 +00:00
if pidPath == "" {
return nil
}
2014-05-06 03:29:50 +00:00
2014-05-06 19:43:33 +00:00
stat , err := os . Stat ( pidPath )
if err != nil {
return fmt . Errorf ( "Could not remove pid file: %s" , err )
}
2014-05-06 03:29:50 +00:00
2014-05-06 19:43:33 +00:00
if stat . IsDir ( ) {
return fmt . Errorf ( "Specified pid file path is directory" )
2014-05-06 03:29:50 +00:00
}
2014-05-06 16:57:53 +00:00
2014-05-06 19:43:33 +00:00
err = os . Remove ( pidPath )
if err != nil {
return fmt . Errorf ( "Could not remove pid file: %s" , err )
}
2014-05-06 16:57:53 +00:00
return nil
2014-05-06 03:29:50 +00:00
}
2014-11-26 07:58:02 +00:00
2015-01-08 02:05:46 +00:00
// loadServices will load service definitions from configuration and persisted
// definitions on disk, and load them into the local agent.
2020-03-09 11:59:41 +00:00
func ( a * Agent ) loadServices ( conf * config . RuntimeConfig , snap map [ structs . CheckID ] * structs . HealthCheck ) error {
2019-09-24 15:04:48 +00:00
// Load any persisted service configs so we can feed those into the initial
// registrations below.
persistedServiceConfigs , err := a . readPersistedServiceConfigs ( )
if err != nil {
return err
}
2014-11-26 07:58:02 +00:00
// Register the services from config
for _ , service := range conf . Services {
ns := service . NodeService ( )
2017-10-10 23:54:06 +00:00
chkTypes , err := service . CheckTypes ( )
if err != nil {
return fmt . Errorf ( "Failed to validate checks for service %q: %v" , service . Name , err )
}
2018-09-27 13:33:12 +00:00
// Grab and validate sidecar if there is one too
sidecar , sidecarChecks , sidecarToken , err := a . sidecarServiceFromNodeService ( ns , service . Token )
if err != nil {
return fmt . Errorf ( "Failed to validate sidecar for service %q: %v" , service . Name , err )
}
// Remove sidecar from NodeService now it's done it's job it's just a config
// syntax sugar and shouldn't be persisted in local or server state.
ns . Connect . SidecarService = nil
2019-12-10 02:26:41 +00:00
sid := ns . CompoundServiceID ( )
2020-11-30 20:39:06 +00:00
err = a . addServiceLocked ( addServiceLockedRequest {
AddServiceRequest : AddServiceRequest {
Service : ns ,
chkTypes : chkTypes ,
persist : false , // don't rewrite the file with the same data we just read
token : service . Token ,
replaceExistingChecks : false , // do default behavior
Source : ConfigSourceLocal ,
} ,
serviceDefaults : serviceDefaultsFromStruct ( persistedServiceConfigs [ sid ] ) ,
persistServiceConfig : false , // don't rewrite the file with the same data we just read
2020-11-30 22:01:37 +00:00
checkStateSnapshot : snap ,
2020-09-24 21:24:04 +00:00
} )
2019-09-24 15:04:48 +00:00
if err != nil {
2017-10-10 23:54:06 +00:00
return fmt . Errorf ( "Failed to register service %q: %v" , service . Name , err )
2014-11-26 07:58:02 +00:00
}
2018-09-27 13:33:12 +00:00
// If there is a sidecar service, register that too.
if sidecar != nil {
2019-12-10 02:26:41 +00:00
sidecarServiceID := sidecar . CompoundServiceID ( )
2020-11-30 20:39:06 +00:00
err = a . addServiceLocked ( addServiceLockedRequest {
AddServiceRequest : AddServiceRequest {
Service : sidecar ,
chkTypes : sidecarChecks ,
persist : false , // don't rewrite the file with the same data we just read
token : sidecarToken ,
replaceExistingChecks : false , // do default behavior
Source : ConfigSourceLocal ,
} ,
serviceDefaults : serviceDefaultsFromStruct ( persistedServiceConfigs [ sidecarServiceID ] ) ,
persistServiceConfig : false , // don't rewrite the file with the same data we just read
2020-11-30 22:01:37 +00:00
checkStateSnapshot : snap ,
2020-09-24 21:24:04 +00:00
} )
2019-09-24 15:04:48 +00:00
if err != nil {
2018-09-27 13:33:12 +00:00
return fmt . Errorf ( "Failed to register sidecar for service %q: %v" , service . Name , err )
}
}
2014-11-26 07:58:02 +00:00
}
// Load any persisted services
2015-01-08 05:24:47 +00:00
svcDir := filepath . Join ( a . config . DataDir , servicesDir )
2015-06-04 21:33:30 +00:00
files , err := ioutil . ReadDir ( svcDir )
if err != nil {
if os . IsNotExist ( err ) {
return nil
}
2021-11-18 20:44:20 +00:00
return fmt . Errorf ( "Failed reading services dir %q: %w" , svcDir , err )
2014-11-26 07:58:02 +00:00
}
2015-06-04 21:33:30 +00:00
for _ , fi := range files {
// Skip all dirs
if fi . IsDir ( ) {
continue
}
2014-11-26 07:58:02 +00:00
2017-07-24 17:37:14 +00:00
// Skip all partially written temporary files
if strings . HasSuffix ( fi . Name ( ) , "tmp" ) {
2020-01-28 23:50:41 +00:00
a . logger . Warn ( "Ignoring temporary service file" , "file" , fi . Name ( ) )
2017-07-24 17:37:14 +00:00
continue
}
2017-07-25 02:07:48 +00:00
2015-06-04 21:33:30 +00:00
// Read the contents into a buffer
2019-09-24 15:04:48 +00:00
file := filepath . Join ( svcDir , fi . Name ( ) )
buf , err := ioutil . ReadFile ( file )
2015-01-08 05:24:47 +00:00
if err != nil {
2021-11-18 20:44:20 +00:00
return fmt . Errorf ( "failed reading service file %q: %w" , file , err )
2015-01-08 05:24:47 +00:00
}
2015-06-04 21:33:30 +00:00
// Try decoding the service definition
var p persistedService
if err := json . Unmarshal ( buf , & p ) ; err != nil {
2015-04-28 19:18:41 +00:00
// Backwards-compatibility for pre-0.5.1 persisted services
2015-06-04 21:33:30 +00:00
if err := json . Unmarshal ( buf , & p . Service ) ; err != nil {
2020-01-28 23:50:41 +00:00
a . logger . Error ( "Failed decoding service file" ,
"file" , file ,
"error" , err ,
)
2018-01-19 22:07:36 +00:00
continue
2015-04-28 19:18:41 +00:00
}
2015-01-08 05:24:47 +00:00
}
2020-10-12 19:45:08 +00:00
2021-11-04 20:07:54 +00:00
// Rename files that used the old md5 hash to the new sha256 name; only needed when upgrading from 1.10 and before.
newPath := a . makeServiceFilePath ( p . Service . CompoundServiceID ( ) )
if file != newPath {
if err := os . Rename ( file , newPath ) ; err != nil {
2021-11-18 20:44:20 +00:00
a . logger . Error ( "Failed renaming service file" ,
"file" , file ,
"targetFile" , newPath ,
"error" , err ,
)
2021-11-04 20:07:54 +00:00
}
}
2022-04-05 21:10:06 +00:00
if ! acl . EqualPartitions ( a . AgentEnterpriseMeta ( ) . PartitionOrDefault ( ) , p . Service . PartitionOrDefault ( ) ) {
2021-11-18 20:44:20 +00:00
a . logger . Info ( "Purging service file in wrong partition" ,
"file" , file ,
"partition" , p . Service . EnterpriseMeta . PartitionOrDefault ( ) ,
)
if err := os . Remove ( file ) ; err != nil {
a . logger . Error ( "Failed purging service file" ,
"file" , file ,
"error" , err ,
)
}
continue
}
2020-10-12 19:45:08 +00:00
// Restore LocallyRegisteredAsSidecar, see persistedService.LocallyRegisteredAsSidecar
p . Service . LocallyRegisteredAsSidecar = p . LocallyRegisteredAsSidecar
2019-12-10 02:26:41 +00:00
serviceID := p . Service . CompoundServiceID ( )
2015-01-08 05:24:47 +00:00
2019-09-24 15:04:48 +00:00
source , ok := ConfigSourceFromName ( p . Source )
if ! ok {
2020-01-28 23:50:41 +00:00
a . logger . Warn ( "service exists with invalid source, purging" ,
"service" , serviceID . String ( ) ,
"source" , p . Source ,
)
2019-09-24 15:04:48 +00:00
if err := a . purgeService ( serviceID ) ; err != nil {
2021-11-18 20:44:20 +00:00
return fmt . Errorf ( "failed purging service %q: %w" , serviceID , err )
2019-09-24 15:04:48 +00:00
}
if err := a . purgeServiceConfig ( serviceID ) ; err != nil {
2021-11-18 20:44:20 +00:00
return fmt . Errorf ( "failed purging service config %q: %w" , serviceID , err )
2019-09-24 15:04:48 +00:00
}
continue
}
2017-08-28 12:17:13 +00:00
if a . State . Service ( serviceID ) != nil {
2015-01-08 05:24:47 +00:00
// Purge previously persisted service. This allows config to be
// preferred over services persisted from the API.
2020-01-28 23:50:41 +00:00
a . logger . Debug ( "service exists, not restoring from file" ,
"service" , serviceID . String ( ) ,
"file" , file ,
)
2015-06-04 21:33:30 +00:00
if err := a . purgeService ( serviceID ) ; err != nil {
2021-11-18 20:44:20 +00:00
return fmt . Errorf ( "failed purging service %q: %w" , serviceID . String ( ) , err )
2015-06-04 21:33:30 +00:00
}
2019-09-24 15:04:48 +00:00
if err := a . purgeServiceConfig ( serviceID ) ; err != nil {
2021-11-18 20:44:20 +00:00
return fmt . Errorf ( "failed purging service config %q: %w" , serviceID . String ( ) , err )
2019-09-24 15:04:48 +00:00
}
2015-01-08 05:24:47 +00:00
} else {
2020-01-28 23:50:41 +00:00
a . logger . Debug ( "restored service definition from file" ,
"service" , serviceID . String ( ) ,
"file" , file ,
)
2020-11-30 20:39:06 +00:00
err = a . addServiceLocked ( addServiceLockedRequest {
AddServiceRequest : AddServiceRequest {
Service : p . Service ,
chkTypes : nil ,
persist : false , // don't rewrite the file with the same data we just read
token : p . Token ,
replaceExistingChecks : false , // do default behavior
Source : source ,
} ,
serviceDefaults : serviceDefaultsFromStruct ( persistedServiceConfigs [ serviceID ] ) ,
persistServiceConfig : false , // don't rewrite the file with the same data we just read
2020-11-30 22:01:37 +00:00
checkStateSnapshot : snap ,
2020-09-24 21:24:04 +00:00
} )
2019-09-24 15:04:48 +00:00
if err != nil {
2021-11-18 20:44:20 +00:00
return fmt . Errorf ( "failed adding service %q: %w" , serviceID , err )
2015-06-04 21:33:30 +00:00
}
2015-01-08 05:24:47 +00:00
}
2015-06-04 21:33:30 +00:00
}
2015-01-08 05:24:47 +00:00
2020-04-01 20:52:23 +00:00
for serviceID := range persistedServiceConfigs {
2019-09-24 15:04:48 +00:00
if a . State . Service ( serviceID ) == nil {
// This can be cleaned up now.
if err := a . purgeServiceConfig ( serviceID ) ; err != nil {
2021-11-18 20:44:20 +00:00
return fmt . Errorf ( "failed purging service config %q: %w" , serviceID , err )
2019-09-24 15:04:48 +00:00
}
}
}
2015-06-04 21:33:30 +00:00
return nil
2014-11-26 07:58:02 +00:00
}
2017-08-30 10:25:49 +00:00
// unloadServices will deregister all services.
2015-01-08 02:05:46 +00:00
func ( a * Agent ) unloadServices ( ) error {
2021-08-19 20:09:42 +00:00
for id := range a . State . AllServices ( ) {
2019-03-04 14:34:05 +00:00
if err := a . removeServiceLocked ( id , false ) ; err != nil {
2017-08-28 12:17:11 +00:00
return fmt . Errorf ( "Failed deregistering service '%s': %v" , id , err )
2014-11-26 07:58:02 +00:00
}
}
2015-01-08 02:05:46 +00:00
return nil
}
// loadChecks loads check definitions and/or persisted check definitions from
// disk and re-registers them with the local agent.
2019-12-10 02:26:41 +00:00
func ( a * Agent ) loadChecks ( conf * config . RuntimeConfig , snap map [ structs . CheckID ] * structs . HealthCheck ) error {
2014-11-26 07:58:02 +00:00
// Register the checks from config
for _ , check := range conf . Checks {
health := check . HealthCheck ( conf . NodeName )
2019-07-17 19:06:50 +00:00
// Restore the fields from the snapshot.
2019-12-10 02:26:41 +00:00
if prev , ok := snap [ health . CompoundCheckID ( ) ] ; ok {
2019-07-17 19:06:50 +00:00
health . Output = prev . Output
health . Status = prev . Status
}
2017-05-15 19:49:13 +00:00
chkType := check . CheckType ( )
2019-03-04 14:34:05 +00:00
if err := a . addCheckLocked ( health , chkType , false , check . Token , ConfigSourceLocal ) ; err != nil {
2014-11-26 07:58:02 +00:00
return fmt . Errorf ( "Failed to register check '%s': %v %v" , check . Name , err , check )
}
}
// Load any persisted checks
2015-01-08 05:24:47 +00:00
checkDir := filepath . Join ( a . config . DataDir , checksDir )
2015-06-04 21:33:30 +00:00
files , err := ioutil . ReadDir ( checkDir )
if err != nil {
if os . IsNotExist ( err ) {
return nil
}
2021-11-18 20:44:20 +00:00
return fmt . Errorf ( "Failed reading checks dir %q: %w" , checkDir , err )
2014-11-26 07:58:02 +00:00
}
2015-06-04 21:33:30 +00:00
for _ , fi := range files {
// Ignore dirs - we only care about the check definition files
if fi . IsDir ( ) {
continue
}
2014-11-26 07:58:02 +00:00
2015-06-04 21:33:30 +00:00
// Read the contents into a buffer
2019-09-24 15:04:48 +00:00
file := filepath . Join ( checkDir , fi . Name ( ) )
buf , err := ioutil . ReadFile ( file )
2015-01-08 05:24:47 +00:00
if err != nil {
2021-11-18 20:44:20 +00:00
return fmt . Errorf ( "failed reading check file %q: %w" , file , err )
2015-01-08 05:24:47 +00:00
}
2015-06-04 21:33:30 +00:00
// Decode the check
2015-01-08 05:24:47 +00:00
var p persistedCheck
2015-06-04 21:33:30 +00:00
if err := json . Unmarshal ( buf , & p ) ; err != nil {
2020-01-28 23:50:41 +00:00
a . logger . Error ( "Failed decoding check file" ,
"file" , file ,
"error" , err ,
)
2018-01-19 22:07:36 +00:00
continue
2015-01-08 05:24:47 +00:00
}
2019-12-10 02:26:41 +00:00
checkID := p . Check . CompoundCheckID ( )
2015-01-08 05:24:47 +00:00
2021-11-04 20:07:54 +00:00
// Rename files that used the old md5 hash to the new sha256 name; only needed when upgrading from 1.10 and before.
newPath := filepath . Join ( a . config . DataDir , checksDir , checkID . StringHashSHA256 ( ) )
if file != newPath {
if err := os . Rename ( file , newPath ) ; err != nil {
2021-11-18 20:44:20 +00:00
a . logger . Error ( "Failed renaming check file" ,
"file" , file ,
"targetFile" , newPath ,
"error" , err ,
)
}
}
2022-04-05 21:10:06 +00:00
if ! acl . EqualPartitions ( a . AgentEnterpriseMeta ( ) . PartitionOrDefault ( ) , p . Check . PartitionOrDefault ( ) ) {
2021-11-18 20:44:20 +00:00
a . logger . Info ( "Purging check file in wrong partition" ,
"file" , file ,
"partition" , p . Check . PartitionOrDefault ( ) ,
)
if err := os . Remove ( file ) ; err != nil {
return fmt . Errorf ( "failed purging check %q: %w" , checkID , err )
2021-11-04 20:07:54 +00:00
}
2021-11-18 20:44:20 +00:00
continue
2021-11-04 20:07:54 +00:00
}
2019-09-24 15:04:48 +00:00
source , ok := ConfigSourceFromName ( p . Source )
if ! ok {
2020-01-28 23:50:41 +00:00
a . logger . Warn ( "check exists with invalid source, purging" ,
"check" , checkID . String ( ) ,
"source" , p . Source ,
)
2019-09-24 15:04:48 +00:00
if err := a . purgeCheck ( checkID ) ; err != nil {
2021-11-18 20:44:20 +00:00
return fmt . Errorf ( "failed purging check %q: %w" , checkID , err )
2019-09-24 15:04:48 +00:00
}
continue
}
2017-08-28 12:17:13 +00:00
if a . State . Check ( checkID ) != nil {
2015-01-08 05:24:47 +00:00
// Purge previously persisted check. This allows config to be
// preferred over persisted checks from the API.
2020-01-28 23:50:41 +00:00
a . logger . Debug ( "check exists, not restoring from file" ,
"check" , checkID . String ( ) ,
"file" , file ,
)
2015-06-04 21:33:30 +00:00
if err := a . purgeCheck ( checkID ) ; err != nil {
2021-11-18 20:44:20 +00:00
return fmt . Errorf ( "Failed purging check %q: %w" , checkID , err )
2015-06-04 21:33:30 +00:00
}
2015-01-08 05:24:47 +00:00
} else {
// Default check to critical to avoid placing potentially unhealthy
// services into the active pool
2017-04-19 23:00:11 +00:00
p . Check . Status = api . HealthCritical
2015-01-08 05:24:47 +00:00
2019-07-17 19:06:50 +00:00
// Restore the fields from the snapshot.
2019-12-10 02:26:41 +00:00
if prev , ok := snap [ p . Check . CompoundCheckID ( ) ] ; ok {
2019-07-17 19:06:50 +00:00
p . Check . Output = prev . Output
p . Check . Status = prev . Status
}
2019-09-24 15:04:48 +00:00
if err := a . addCheckLocked ( p . Check , p . ChkType , false , p . Token , source ) ; err != nil {
2015-03-11 23:13:19 +00:00
// Purge the check if it is unable to be restored.
2020-01-28 23:50:41 +00:00
a . logger . Warn ( "Failed to restore check" ,
"check" , checkID . String ( ) ,
"error" , err ,
)
2015-06-04 21:33:30 +00:00
if err := a . purgeCheck ( checkID ) ; err != nil {
2021-11-18 20:44:20 +00:00
return fmt . Errorf ( "Failed purging check %q: %w" , checkID , err )
2015-06-04 21:33:30 +00:00
}
2015-03-11 23:13:19 +00:00
}
2020-01-28 23:50:41 +00:00
a . logger . Debug ( "restored health check from file" ,
"check" , p . Check . CheckID ,
"file" , file ,
)
2015-01-08 05:24:47 +00:00
}
2015-06-04 21:33:30 +00:00
}
2015-01-08 05:24:47 +00:00
2015-06-04 21:33:30 +00:00
return nil
2014-11-26 07:58:02 +00:00
}
2015-01-08 02:05:46 +00:00
// unloadChecks will deregister all checks known to the local agent.
func ( a * Agent ) unloadChecks ( ) error {
2021-08-19 20:09:42 +00:00
for id := range a . State . AllChecks ( ) {
2019-03-04 14:34:05 +00:00
if err := a . removeCheckLocked ( id , false ) ; err != nil {
2017-08-28 12:17:11 +00:00
return fmt . Errorf ( "Failed deregistering check '%s': %s" , id , err )
2015-01-08 02:05:46 +00:00
}
}
return nil
}
2015-01-15 08:16:34 +00:00
2015-02-17 20:00:04 +00:00
// snapshotCheckState is used to snapshot the current state of the health
// checks. This is done before we reload our checks, so that we can properly
// restore into the same state.
2019-12-10 02:26:41 +00:00
func ( a * Agent ) snapshotCheckState ( ) map [ structs . CheckID ] * structs . HealthCheck {
2021-08-19 20:09:42 +00:00
return a . State . AllChecks ( )
2015-02-17 20:00:04 +00:00
}
2017-01-11 19:41:12 +00:00
// loadMetadata loads node metadata fields from the agent config and
2017-01-05 22:10:26 +00:00
// updates them on the local agent.
2017-09-25 18:40:42 +00:00
func ( a * Agent ) loadMetadata ( conf * config . RuntimeConfig ) error {
2017-08-28 12:17:12 +00:00
meta := map [ string ] string { }
for k , v := range conf . NodeMeta {
meta [ k ] = v
2017-01-11 19:41:12 +00:00
}
2017-08-28 12:17:12 +00:00
meta [ structs . MetaSegmentKey ] = conf . SegmentName
2017-08-28 12:17:13 +00:00
return a . State . LoadMetadata ( meta )
2017-01-11 19:41:12 +00:00
}
2017-01-05 22:10:26 +00:00
// unloadMetadata resets the local metadata state
2017-01-11 19:41:12 +00:00
func ( a * Agent ) unloadMetadata ( ) {
2017-08-28 12:17:13 +00:00
a . State . UnloadMetadata ( )
2017-01-05 22:10:26 +00:00
}
2015-01-15 20:20:57 +00:00
// serviceMaintCheckID returns the ID of a given service's maintenance check
2019-12-10 02:26:41 +00:00
func serviceMaintCheckID ( serviceID structs . ServiceID ) structs . CheckID {
2020-04-15 16:03:29 +00:00
cid := types . CheckID ( structs . ServiceMaintPrefix + serviceID . ID )
return structs . NewCheckID ( cid , & serviceID . EnterpriseMeta )
2015-01-15 20:20:57 +00:00
}
2015-01-15 08:25:36 +00:00
// EnableServiceMaintenance will register a false health check against the given
// service ID with critical status. This will exclude the service from queries.
2019-12-10 02:26:41 +00:00
func ( a * Agent ) EnableServiceMaintenance ( serviceID structs . ServiceID , reason , token string ) error {
service := a . State . Service ( serviceID )
if service == nil {
return fmt . Errorf ( "No service registered with ID %q" , serviceID . String ( ) )
2015-01-15 08:16:34 +00:00
}
2015-01-15 20:20:57 +00:00
// Check if maintenance mode is not already enabled
checkID := serviceMaintCheckID ( serviceID )
2019-12-10 02:26:41 +00:00
if a . State . Check ( checkID ) != nil {
2015-01-15 18:51:00 +00:00
return nil
2015-01-15 08:16:34 +00:00
}
2015-01-21 20:21:57 +00:00
// Use default notes if no reason provided
if reason == "" {
2015-01-21 22:45:09 +00:00
reason = defaultServiceMaintReason
2015-01-21 20:21:57 +00:00
}
2015-01-15 08:16:34 +00:00
// Create and register the critical health check
check := & structs . HealthCheck {
2019-12-10 02:26:41 +00:00
Node : a . config . NodeName ,
CheckID : checkID . ID ,
Name : "Service Maintenance Mode" ,
Notes : reason ,
ServiceID : service . ID ,
ServiceName : service . Service ,
Status : api . HealthCritical ,
Type : "maintenance" ,
EnterpriseMeta : checkID . EnterpriseMeta ,
2015-01-15 08:16:34 +00:00
}
2018-10-11 12:22:11 +00:00
a . AddCheck ( check , nil , true , token , ConfigSourceLocal )
2020-01-28 23:50:41 +00:00
a . logger . Info ( "Service entered maintenance mode" , "service" , serviceID . String ( ) )
2015-01-15 08:16:34 +00:00
return nil
}
2015-01-15 08:25:36 +00:00
// DisableServiceMaintenance will deregister the fake maintenance mode check
// if the service has been marked as in maintenance.
2019-12-10 02:26:41 +00:00
func ( a * Agent ) DisableServiceMaintenance ( serviceID structs . ServiceID ) error {
if a . State . Service ( serviceID ) == nil {
return fmt . Errorf ( "No service registered with ID %q" , serviceID . String ( ) )
2015-01-15 08:16:34 +00:00
}
2015-01-15 20:20:57 +00:00
// Check if maintenance mode is enabled
checkID := serviceMaintCheckID ( serviceID )
2019-12-10 02:26:41 +00:00
if a . State . Check ( checkID ) == nil {
// maintenance mode is not enabled
2015-01-15 20:20:57 +00:00
return nil
}
2015-01-15 08:16:34 +00:00
// Deregister the maintenance check
2015-01-15 20:20:57 +00:00
a . RemoveCheck ( checkID , true )
2020-01-28 23:50:41 +00:00
a . logger . Info ( "Service left maintenance mode" , "service" , serviceID . String ( ) )
2015-01-15 20:20:57 +00:00
2015-01-15 08:16:34 +00:00
return nil
}
2015-01-15 19:20:22 +00:00
// EnableNodeMaintenance places a node into maintenance mode.
2015-09-10 18:43:59 +00:00
func ( a * Agent ) EnableNodeMaintenance ( reason , token string ) {
2015-01-15 19:20:22 +00:00
// Ensure node maintenance is not already enabled
2019-12-10 02:26:41 +00:00
if a . State . Check ( structs . NodeMaintCheckID ) != nil {
2015-01-15 19:20:22 +00:00
return
}
2015-01-21 20:21:57 +00:00
// Use a default notes value
if reason == "" {
2015-01-21 22:45:09 +00:00
reason = defaultNodeMaintReason
2015-01-21 20:21:57 +00:00
}
2015-01-15 19:20:22 +00:00
// Create and register the node maintenance check
check := & structs . HealthCheck {
Node : a . config . NodeName ,
2016-11-29 21:15:20 +00:00
CheckID : structs . NodeMaint ,
2015-01-15 19:20:22 +00:00
Name : "Node Maintenance Mode" ,
2015-01-21 20:21:57 +00:00
Notes : reason ,
2017-04-19 23:00:11 +00:00
Status : api . HealthCritical ,
2019-10-17 18:33:11 +00:00
Type : "maintenance" ,
2015-01-15 19:20:22 +00:00
}
2018-10-11 12:22:11 +00:00
a . AddCheck ( check , nil , true , token , ConfigSourceLocal )
2020-01-28 23:50:41 +00:00
a . logger . Info ( "Node entered maintenance mode" )
2015-01-15 19:20:22 +00:00
}
// DisableNodeMaintenance removes a node from maintenance mode
func ( a * Agent ) DisableNodeMaintenance ( ) {
2019-12-10 02:26:41 +00:00
if a . State . Check ( structs . NodeMaintCheckID ) == nil {
2015-01-15 20:20:57 +00:00
return
}
2019-12-10 02:26:41 +00:00
a . RemoveCheck ( structs . NodeMaintCheckID , true )
2020-01-28 23:50:41 +00:00
a . logger . Info ( "Node left maintenance mode" )
2015-01-15 19:20:22 +00:00
}
2015-11-12 17:19:33 +00:00
2022-03-31 19:11:49 +00:00
func ( a * Agent ) AutoReloadConfig ( ) error {
return a . reloadConfig ( true )
}
func ( a * Agent ) ReloadConfig ( ) error {
return a . reloadConfig ( false )
}
2020-06-10 20:47:35 +00:00
// ReloadConfig will atomically reload all configuration, including
// all services, checks, tokens, metadata, dnsServer configs, etc.
2020-04-01 20:52:23 +00:00
// It will also reload all ongoing watches.
2022-03-31 19:11:49 +00:00
func ( a * Agent ) reloadConfig ( autoReload bool ) error {
2020-09-14 22:31:07 +00:00
newCfg , err := a . baseDeps . AutoConfig . ReadConfig ( )
2020-06-10 20:47:35 +00:00
if err != nil {
return err
}
// copy over the existing node id, this cannot be
// changed while running anyways but this prevents
// breaking some existing behavior.
newCfg . NodeID = a . config . NodeID
2022-06-01 21:53:52 +00:00
// if auto reload is enabled, make sure we have the right certs file watched.
2022-03-31 19:11:49 +00:00
if autoReload {
for _ , f := range [ ] struct {
oldCfg tlsutil . ProtocolConfig
newCfg tlsutil . ProtocolConfig
} {
{ a . config . TLS . InternalRPC , newCfg . TLS . InternalRPC } ,
{ a . config . TLS . GRPC , newCfg . TLS . GRPC } ,
{ a . config . TLS . HTTPS , newCfg . TLS . HTTPS } ,
} {
if f . oldCfg . KeyFile != f . newCfg . KeyFile {
2022-04-04 15:31:39 +00:00
a . configFileWatcher . Replace ( f . oldCfg . KeyFile , f . newCfg . KeyFile )
2022-03-31 19:11:49 +00:00
if err != nil {
return err
}
}
if f . oldCfg . CertFile != f . newCfg . CertFile {
2022-04-04 15:31:39 +00:00
a . configFileWatcher . Replace ( f . oldCfg . CertFile , f . newCfg . CertFile )
2022-03-31 19:11:49 +00:00
if err != nil {
return err
}
}
if revertStaticConfig ( f . oldCfg , f . newCfg ) {
a . logger . Warn ( "Changes to your configuration were detected that for security reasons cannot be automatically applied by 'auto_reload_config'. Manually reload your configuration (e.g. with 'consul reload') to apply these changes." , "StaticRuntimeConfig" , f . oldCfg , "StaticRuntimeConfig From file" , f . newCfg )
}
}
if ! reflect . DeepEqual ( newCfg . StaticRuntimeConfig , a . config . StaticRuntimeConfig ) {
a . logger . Warn ( "Changes to your configuration were detected that for security reasons cannot be automatically applied by 'auto_reload_config'. Manually reload your configuration (e.g. with 'consul reload') to apply these changes." , "StaticRuntimeConfig" , a . config . StaticRuntimeConfig , "StaticRuntimeConfig From file" , newCfg . StaticRuntimeConfig )
// reset not reloadable fields
newCfg . StaticRuntimeConfig = a . config . StaticRuntimeConfig
}
}
2020-06-10 20:47:35 +00:00
return a . reloadConfigInternal ( newCfg )
}
2022-03-31 19:11:49 +00:00
func revertStaticConfig ( oldCfg tlsutil . ProtocolConfig , newCfg tlsutil . ProtocolConfig ) bool {
newNewCfg := oldCfg
newNewCfg . CertFile = newCfg . CertFile
newNewCfg . KeyFile = newCfg . KeyFile
newOldcfg := newCfg
newOldcfg . CertFile = oldCfg . CertFile
newOldcfg . KeyFile = oldCfg . KeyFile
if ! reflect . DeepEqual ( newOldcfg , oldCfg ) {
return true
}
return false
}
2020-06-10 20:47:35 +00:00
// reloadConfigInternal is mainly needed for some unit tests. Instead of parsing
// the configuration using CLI flags and on disk config, this just takes a
// runtime configuration and applies it.
func ( a * Agent ) reloadConfigInternal ( newCfg * config . RuntimeConfig ) error {
// Change the log level and update it
2020-08-19 17:17:05 +00:00
if logging . ValidateLogLevel ( newCfg . Logging . LogLevel ) {
a . logger . SetLevel ( logging . LevelFromString ( newCfg . Logging . LogLevel ) )
2020-06-10 20:47:35 +00:00
} else {
2020-08-19 17:17:05 +00:00
a . logger . Warn ( "Invalid log level in new configuration" , "level" , newCfg . Logging . LogLevel )
newCfg . Logging . LogLevel = a . config . Logging . LogLevel
2020-06-10 20:47:35 +00:00
}
2017-06-02 12:56:49 +00:00
// Bulk update the services and checks
a . PauseSync ( )
defer a . ResumeSync ( )
2019-03-04 14:34:05 +00:00
a . stateLock . Lock ( )
defer a . stateLock . Unlock ( )
2019-07-17 19:06:50 +00:00
// Snapshot the current state, and use that to initialize the checks when
// they are recreated.
2017-06-02 12:56:49 +00:00
snap := a . snapshotCheckState ( )
// First unload all checks, services, and metadata. This lets us begin the reload
// with a clean slate.
if err := a . unloadServices ( ) ; err != nil {
2017-06-24 19:52:41 +00:00
return fmt . Errorf ( "Failed unloading services: %s" , err )
2017-06-02 12:56:49 +00:00
}
if err := a . unloadChecks ( ) ; err != nil {
2017-06-24 19:52:41 +00:00
return fmt . Errorf ( "Failed unloading checks: %s" , err )
2017-06-02 12:56:49 +00:00
}
a . unloadMetadata ( )
2019-02-27 19:28:31 +00:00
// Reload tokens - should be done before all the other loading
// to ensure the correct tokens are available for attaching to
// the checks and service registrations.
2020-08-17 23:30:25 +00:00
a . tokens . Load ( newCfg . ACLTokens , a . logger )
2019-02-27 19:28:31 +00:00
2022-03-18 10:46:58 +00:00
if err := a . tlsConfigurator . Update ( newCfg . TLS ) ; err != nil {
2019-03-13 09:29:06 +00:00
return fmt . Errorf ( "Failed reloading tls configuration: %s" , err )
}
2017-06-02 12:56:49 +00:00
// Reload service/check definitions and metadata.
2020-03-09 11:59:41 +00:00
if err := a . loadServices ( newCfg , snap ) ; err != nil {
2017-06-24 19:52:41 +00:00
return fmt . Errorf ( "Failed reloading services: %s" , err )
2017-06-02 12:56:49 +00:00
}
2019-07-17 19:06:50 +00:00
if err := a . loadChecks ( newCfg , snap ) ; err != nil {
2017-06-24 19:52:41 +00:00
return fmt . Errorf ( "Failed reloading checks: %s" , err )
2017-06-02 12:56:49 +00:00
}
if err := a . loadMetadata ( newCfg ) ; err != nil {
2017-06-24 19:52:41 +00:00
return fmt . Errorf ( "Failed reloading metadata: %s" , err )
2017-06-02 12:56:49 +00:00
}
2017-06-24 19:52:41 +00:00
if err := a . reloadWatches ( newCfg ) ; err != nil {
return fmt . Errorf ( "Failed reloading watches: %v" , err )
2017-06-02 12:56:49 +00:00
}
2020-01-31 16:19:37 +00:00
a . httpConnLimiter . SetConfig ( connlimit . Config {
MaxConnsPerClientIP : newCfg . HTTPMaxConnsPerClient ,
} )
2019-04-24 18:11:54 +00:00
for _ , s := range a . dnsServers {
if err := s . ReloadConfig ( newCfg ) ; err != nil {
return fmt . Errorf ( "Failed reloading dns config : %v" , err )
}
}
2020-04-16 22:07:52 +00:00
err := a . reloadEnterprise ( newCfg )
if err != nil {
return err
}
2020-09-16 17:28:03 +00:00
cc := consul . ReloadableConfig {
2021-05-04 14:36:53 +00:00
RPCRateLimit : newCfg . RPCRateLimit ,
RPCMaxBurst : newCfg . RPCMaxBurst ,
RPCMaxConnsPerClient : newCfg . RPCMaxConnsPerClient ,
ConfigEntryBootstrap : newCfg . ConfigEntryBootstrap ,
RaftSnapshotThreshold : newCfg . RaftSnapshotThreshold ,
RaftSnapshotInterval : newCfg . RaftSnapshotInterval ,
2022-04-25 14:19:26 +00:00
HeartbeatTimeout : newCfg . ConsulRaftHeartbeatTimeout ,
ElectionTimeout : newCfg . ConsulRaftElectionTimeout ,
2021-05-04 14:36:53 +00:00
RaftTrailingLogs : newCfg . RaftTrailingLogs ,
2018-06-11 19:51:17 +00:00
}
2020-09-16 17:28:03 +00:00
if err := a . delegate . ReloadConfig ( cc ) ; err != nil {
2018-06-11 19:51:17 +00:00
return err
}
2018-04-08 21:28:29 +00:00
2020-08-24 21:33:10 +00:00
if a . cache . ReloadOptions ( newCfg . Cache ) {
a . logger . Info ( "Cache options have been updated" )
} else {
a . logger . Debug ( "Cache options have not been modified" )
}
2017-08-08 19:33:30 +00:00
// Update filtered metrics
2018-06-14 12:52:48 +00:00
metrics . UpdateFilter ( newCfg . Telemetry . AllowedPrefixes ,
newCfg . Telemetry . BlockedPrefixes )
2017-08-08 19:33:30 +00:00
2017-08-28 12:17:13 +00:00
a . State . SetDiscardCheckOutput ( newCfg . DiscardCheckOutput )
2017-10-11 00:04:52 +00:00
2020-09-23 11:37:33 +00:00
for _ , r := range a . configReloaders {
if err := r ( newCfg ) ; err != nil {
return err
}
}
2020-09-17 10:48:14 +00:00
2017-06-24 19:52:41 +00:00
return nil
2017-06-02 12:56:49 +00:00
}
2018-04-11 08:52:51 +00:00
2019-09-26 02:55:52 +00:00
// LocalBlockingQuery performs a blocking query in a generic way against
// local agent state that has no RPC or raft to back it. It uses `hash` parameter
// instead of an `index`.
// `alwaysBlock` determines whether we block if the provided hash is empty.
// Callers like the AgentService endpoint will want to return the current result if a hash isn't provided.
// On the other hand, for cache notifications we always want to block. This avoids an empty first response.
func ( a * Agent ) LocalBlockingQuery ( alwaysBlock bool , hash string , wait time . Duration ,
fn func ( ws memdb . WatchSet ) ( string , interface { } , error ) ) ( string , interface { } , error ) {
// If we are not blocking we can skip tracking and allocating - nil WatchSet
// is still valid to call Add on and will just be a no op.
var ws memdb . WatchSet
2020-06-24 16:36:54 +00:00
var ctx context . Context = & lib . StopChannelContext { StopCh : a . shutdownCh }
shouldBlock := false
2019-09-26 02:55:52 +00:00
if alwaysBlock || hash != "" {
if wait == 0 {
wait = defaultQueryTime
}
if wait > 10 * time . Minute {
wait = maxQueryTime
}
// Apply a small amount of jitter to the request.
wait += lib . RandomStagger ( wait / 16 )
2020-06-24 16:36:54 +00:00
var cancel func ( )
ctx , cancel = context . WithDeadline ( ctx , time . Now ( ) . Add ( wait ) )
defer cancel ( )
shouldBlock = true
2019-09-26 02:55:52 +00:00
}
for {
// Must reset this every loop in case the Watch set is already closed but
// hash remains same. In that case we'll need to re-block on ws.Watch()
// again.
ws = memdb . NewWatchSet ( )
curHash , curResp , err := fn ( ws )
if err != nil {
return "" , curResp , err
}
// Return immediately if there is no timeout, the hash is different or the
// Watch returns true (indicating timeout fired). Note that Watch on a nil
// WatchSet immediately returns false which would incorrectly cause this to
// loop and repeat again, however we rely on the invariant that ws == nil
// IFF timeout == nil in which case the Watch call is never invoked.
2020-06-24 16:36:54 +00:00
if ! shouldBlock || hash != curHash || ws . WatchCtx ( ctx ) != nil {
2019-09-26 02:55:52 +00:00
return curHash , curResp , err
}
// Watch returned false indicating a change was detected, loop and repeat
// the callback to load the new value. If agent sync is paused it means
// local state is currently being bulk-edited e.g. config reload. In this
// case it's likely that local state just got unloaded and may or may not be
// reloaded yet. Wait a short amount of time for Sync to resume to ride out
// typical config reloads.
if syncPauseCh := a . SyncPausedCh ( ) ; syncPauseCh != nil {
select {
case <- syncPauseCh :
2020-06-24 16:36:54 +00:00
case <- ctx . Done ( ) :
2019-09-26 02:55:52 +00:00
}
}
}
}
2020-10-05 21:31:35 +00:00
// registerCache types on a.cache.
// This function may only be called once from New.
//
// Note: this function no longer registered all cache-types. Newer cache-types
// that do not depend on Agent are registered from registerCacheTypes.
2018-04-11 08:52:51 +00:00
func ( a * Agent ) registerCache ( ) {
2018-09-06 10:34:28 +00:00
// Note that you should register the _agent_ as the RPC implementation and not
// the a.delegate directly, otherwise tests that rely on overriding RPC
// routing via a.registerEndpoint will not work.
2020-04-14 22:29:30 +00:00
a . cache . RegisterType ( cachetype . ConnectCARootName , & cachetype . ConnectCARoot { RPC : a } )
2018-04-17 23:26:58 +00:00
2018-04-30 21:23:49 +00:00
a . cache . RegisterType ( cachetype . ConnectCALeafName , & cachetype . ConnectCALeaf {
2019-01-10 12:46:11 +00:00
RPC : a ,
Cache : a . cache ,
Datacenter : a . config . Datacenter ,
TestOverrideCAChangeInitialDelay : a . config . ConnectTestCALeafRootChangeSpread ,
2018-04-30 21:23:49 +00:00
} )
2020-04-14 22:29:30 +00:00
a . cache . RegisterType ( cachetype . IntentionMatchName , & cachetype . IntentionMatch { RPC : a } )
2018-09-06 10:34:28 +00:00
2021-03-17 19:40:39 +00:00
a . cache . RegisterType ( cachetype . IntentionUpstreamsName , & cachetype . IntentionUpstreams { RPC : a } )
2022-07-14 18:45:51 +00:00
a . cache . RegisterType ( cachetype . IntentionUpstreamsDestinationName , & cachetype . IntentionUpstreamsDestination { RPC : a } )
2021-03-17 19:40:39 +00:00
2020-04-14 22:29:30 +00:00
a . cache . RegisterType ( cachetype . CatalogServicesName , & cachetype . CatalogServices { RPC : a } )
2018-09-06 10:34:28 +00:00
2020-04-14 22:29:30 +00:00
a . cache . RegisterType ( cachetype . HealthServicesName , & cachetype . HealthServices { RPC : a } )
2018-09-06 10:34:28 +00:00
2020-04-14 22:29:30 +00:00
a . cache . RegisterType ( cachetype . PreparedQueryName , & cachetype . PreparedQuery { RPC : a } )
2019-02-25 19:06:01 +00:00
2020-04-14 22:29:30 +00:00
a . cache . RegisterType ( cachetype . NodeServicesName , & cachetype . NodeServices { RPC : a } )
2019-04-23 06:39:02 +00:00
2020-04-14 22:29:30 +00:00
a . cache . RegisterType ( cachetype . ResolvedServiceConfigName , & cachetype . ResolvedServiceConfig { RPC : a } )
2019-06-24 18:11:34 +00:00
2020-04-14 22:29:30 +00:00
a . cache . RegisterType ( cachetype . CatalogListServicesName , & cachetype . CatalogListServices { RPC : a } )
2019-06-24 18:11:34 +00:00
2020-04-14 22:29:30 +00:00
a . cache . RegisterType ( cachetype . CatalogServiceListName , & cachetype . CatalogServiceList { RPC : a } )
2020-01-24 15:04:58 +00:00
2020-04-14 22:29:30 +00:00
a . cache . RegisterType ( cachetype . CatalogDatacentersName , & cachetype . CatalogDatacenters { RPC : a } )
2019-06-20 19:04:39 +00:00
2020-04-14 22:29:30 +00:00
a . cache . RegisterType ( cachetype . InternalServiceDumpName , & cachetype . InternalServiceDump { RPC : a } )
2019-07-02 03:10:51 +00:00
2020-04-14 22:29:30 +00:00
a . cache . RegisterType ( cachetype . CompiledDiscoveryChainName , & cachetype . CompiledDiscoveryChain { RPC : a } )
2019-07-02 00:45:42 +00:00
2020-04-14 22:29:30 +00:00
a . cache . RegisterType ( cachetype . GatewayServicesName , & cachetype . GatewayServices { RPC : a } )
2022-07-14 18:45:51 +00:00
a . cache . RegisterType ( cachetype . ServiceGatewaysName , & cachetype . ServiceGateways { RPC : a } )
2020-04-16 21:00:48 +00:00
2022-05-12 20:34:17 +00:00
a . cache . RegisterType ( cachetype . ConfigEntryListName , & cachetype . ConfigEntryList { RPC : a } )
2019-09-26 02:55:52 +00:00
2020-04-27 23:36:20 +00:00
a . cache . RegisterType ( cachetype . ConfigEntryName , & cachetype . ConfigEntry { RPC : a } )
2020-04-14 22:29:30 +00:00
a . cache . RegisterType ( cachetype . ServiceHTTPChecksName , & cachetype . ServiceHTTPChecks { Agent : a } )
2020-03-09 20:59:02 +00:00
2022-06-01 21:53:52 +00:00
a . cache . RegisterType ( cachetype . TrustBundleReadName , & cachetype . TrustBundle { Client : a . rpcClientPeering } )
2022-06-06 19:20:41 +00:00
a . cache . RegisterType ( cachetype . ExportedPeeredServicesName , & cachetype . ExportedPeeredServices { RPC : a } )
2022-06-01 21:53:52 +00:00
2020-04-14 22:29:30 +00:00
a . cache . RegisterType ( cachetype . FederationStateListMeshGatewaysName ,
& cachetype . FederationStateListMeshGateways { RPC : a } )
2021-10-25 00:38:02 +00:00
2022-05-23 23:57:42 +00:00
a . cache . RegisterType ( cachetype . TrustBundleListName , & cachetype . TrustBundles { Client : a . rpcClientPeering } )
2022-07-13 16:14:57 +00:00
a . cache . RegisterType ( cachetype . PeeredUpstreamsName , & cachetype . PeeredUpstreams { RPC : a } )
2021-10-25 00:38:02 +00:00
a . registerEntCache ( )
2019-09-26 02:55:52 +00:00
}
2020-04-01 20:52:23 +00:00
// LocalState returns the agent's local state
2019-09-26 02:55:52 +00:00
func ( a * Agent ) LocalState ( ) * local . State {
return a . State
}
// rerouteExposedChecks will inject proxy address into check targets
// Future calls to check() will dial the proxy listener
// The agent stateLock MUST be held for this to be called
2019-12-10 02:26:41 +00:00
func ( a * Agent ) rerouteExposedChecks ( serviceID structs . ServiceID , proxyAddr string ) error {
for cid , c := range a . checkHTTPs {
2019-09-26 02:55:52 +00:00
if c . ServiceID != serviceID {
continue
}
2019-12-10 02:26:41 +00:00
port , err := a . listenerPortLocked ( serviceID , cid )
2019-09-26 02:55:52 +00:00
if err != nil {
return err
}
c . ProxyHTTP = httpInjectAddr ( c . HTTP , proxyAddr , port )
2021-05-12 20:51:39 +00:00
hc := a . State . Check ( cid )
hc . ExposedPort = port
2019-09-26 02:55:52 +00:00
}
2019-12-10 02:26:41 +00:00
for cid , c := range a . checkGRPCs {
2019-09-26 02:55:52 +00:00
if c . ServiceID != serviceID {
continue
}
2019-12-10 02:26:41 +00:00
port , err := a . listenerPortLocked ( serviceID , cid )
2019-09-26 02:55:52 +00:00
if err != nil {
return err
}
c . ProxyGRPC = grpcInjectAddr ( c . GRPC , proxyAddr , port )
2021-05-12 20:51:39 +00:00
hc := a . State . Check ( cid )
hc . ExposedPort = port
2019-09-26 02:55:52 +00:00
}
return nil
}
// resetExposedChecks will set Proxy addr in HTTP checks to empty string
// Future calls to check() will use the original target c.HTTP or c.GRPC
// The agent stateLock MUST be held for this to be called
2019-12-10 02:26:41 +00:00
func ( a * Agent ) resetExposedChecks ( serviceID structs . ServiceID ) {
ids := make ( [ ] structs . CheckID , 0 )
for cid , c := range a . checkHTTPs {
2019-09-26 02:55:52 +00:00
if c . ServiceID == serviceID {
c . ProxyHTTP = ""
2021-05-12 20:51:39 +00:00
hc := a . State . Check ( cid )
hc . ExposedPort = 0
2019-12-10 02:26:41 +00:00
ids = append ( ids , cid )
2019-09-26 02:55:52 +00:00
}
}
2019-12-10 02:26:41 +00:00
for cid , c := range a . checkGRPCs {
2019-09-26 02:55:52 +00:00
if c . ServiceID == serviceID {
c . ProxyGRPC = ""
2021-05-12 20:51:39 +00:00
hc := a . State . Check ( cid )
hc . ExposedPort = 0
2019-12-10 02:26:41 +00:00
ids = append ( ids , cid )
2019-09-26 02:55:52 +00:00
}
}
for _ , checkID := range ids {
delete ( a . exposedPorts , listenerPortKey ( serviceID , checkID ) )
}
}
// listenerPort allocates a port from the configured range
// The agent stateLock MUST be held when this is called
2019-12-10 02:26:41 +00:00
func ( a * Agent ) listenerPortLocked ( svcID structs . ServiceID , checkID structs . CheckID ) ( int , error ) {
2019-09-26 02:55:52 +00:00
key := listenerPortKey ( svcID , checkID )
if a . exposedPorts == nil {
a . exposedPorts = make ( map [ string ] int )
}
if p , ok := a . exposedPorts [ key ] ; ok {
return p , nil
}
allocated := make ( map [ int ] bool )
for _ , v := range a . exposedPorts {
allocated [ v ] = true
}
var port int
for i := 0 ; i < a . config . ExposeMaxPort - a . config . ExposeMinPort ; i ++ {
port = a . config . ExposeMinPort + i
if ! allocated [ port ] {
a . exposedPorts [ key ] = port
break
}
}
if port == 0 {
return 0 , fmt . Errorf ( "no ports available to expose '%s'" , checkID )
}
return port , nil
}
proxycfg: server-local config entry data sources
This is the OSS portion of enterprise PR 2056.
This commit provides server-local implementations of the proxycfg.ConfigEntry
and proxycfg.ConfigEntryList interfaces, that source data from streaming events.
It makes use of the LocalMaterializer type introduced for peering replication,
adding the necessary support for authorization.
It also adds support for "wildcard" subscriptions (within a topic) to the event
publisher, as this is needed to fetch service-resolvers for all services when
configuring mesh gateways.
Currently, events will be emitted for just the ingress-gateway, service-resolver,
and mesh config entry types, as these are the only entries required by proxycfg
— the events will be emitted on topics named IngressGateway, ServiceResolver,
and MeshConfig topics respectively.
Though these events will only be consumed "locally" for now, they can also be
consumed via the gRPC endpoint (confirmed using grpcurl) so using them from
client agents should be a case of swapping the LocalMaterializer for an
RPCMaterializer.
2022-07-01 15:09:47 +00:00
func ( a * Agent ) proxyDataSources ( ) proxycfg . DataSources {
sources := proxycfg . DataSources {
CARoots : proxycfgglue . CacheCARoots ( a . cache ) ,
CompiledDiscoveryChain : proxycfgglue . CacheCompiledDiscoveryChain ( a . cache ) ,
ConfigEntry : proxycfgglue . CacheConfigEntry ( a . cache ) ,
ConfigEntryList : proxycfgglue . CacheConfigEntryList ( a . cache ) ,
Datacenters : proxycfgglue . CacheDatacenters ( a . cache ) ,
FederationStateListMeshGateways : proxycfgglue . CacheFederationStateListMeshGateways ( a . cache ) ,
GatewayServices : proxycfgglue . CacheGatewayServices ( a . cache ) ,
2022-07-14 18:45:51 +00:00
ServiceGateways : proxycfgglue . CacheServiceGateways ( a . cache ) ,
2022-07-12 10:37:48 +00:00
Health : proxycfgglue . ClientHealth ( a . rpcClientHealth ) ,
proxycfg: server-local config entry data sources
This is the OSS portion of enterprise PR 2056.
This commit provides server-local implementations of the proxycfg.ConfigEntry
and proxycfg.ConfigEntryList interfaces, that source data from streaming events.
It makes use of the LocalMaterializer type introduced for peering replication,
adding the necessary support for authorization.
It also adds support for "wildcard" subscriptions (within a topic) to the event
publisher, as this is needed to fetch service-resolvers for all services when
configuring mesh gateways.
Currently, events will be emitted for just the ingress-gateway, service-resolver,
and mesh config entry types, as these are the only entries required by proxycfg
— the events will be emitted on topics named IngressGateway, ServiceResolver,
and MeshConfig topics respectively.
Though these events will only be consumed "locally" for now, they can also be
consumed via the gRPC endpoint (confirmed using grpcurl) so using them from
client agents should be a case of swapping the LocalMaterializer for an
RPCMaterializer.
2022-07-01 15:09:47 +00:00
HTTPChecks : proxycfgglue . CacheHTTPChecks ( a . cache ) ,
Intentions : proxycfgglue . CacheIntentions ( a . cache ) ,
IntentionUpstreams : proxycfgglue . CacheIntentionUpstreams ( a . cache ) ,
2022-07-14 18:45:51 +00:00
IntentionUpstreamsDestination : proxycfgglue . CacheIntentionUpstreamsDestination ( a . cache ) ,
proxycfg: server-local config entry data sources
This is the OSS portion of enterprise PR 2056.
This commit provides server-local implementations of the proxycfg.ConfigEntry
and proxycfg.ConfigEntryList interfaces, that source data from streaming events.
It makes use of the LocalMaterializer type introduced for peering replication,
adding the necessary support for authorization.
It also adds support for "wildcard" subscriptions (within a topic) to the event
publisher, as this is needed to fetch service-resolvers for all services when
configuring mesh gateways.
Currently, events will be emitted for just the ingress-gateway, service-resolver,
and mesh config entry types, as these are the only entries required by proxycfg
— the events will be emitted on topics named IngressGateway, ServiceResolver,
and MeshConfig topics respectively.
Though these events will only be consumed "locally" for now, they can also be
consumed via the gRPC endpoint (confirmed using grpcurl) so using them from
client agents should be a case of swapping the LocalMaterializer for an
RPCMaterializer.
2022-07-01 15:09:47 +00:00
InternalServiceDump : proxycfgglue . CacheInternalServiceDump ( a . cache ) ,
LeafCertificate : proxycfgglue . CacheLeafCertificate ( a . cache ) ,
PeeredUpstreams : proxycfgglue . CachePeeredUpstreams ( a . cache ) ,
PreparedQuery : proxycfgglue . CachePrepraredQuery ( a . cache ) ,
ResolvedServiceConfig : proxycfgglue . CacheResolvedServiceConfig ( a . cache ) ,
ServiceList : proxycfgglue . CacheServiceList ( a . cache ) ,
TrustBundle : proxycfgglue . CacheTrustBundle ( a . cache ) ,
TrustBundleList : proxycfgglue . CacheTrustBundleList ( a . cache ) ,
ExportedPeeredServices : proxycfgglue . CacheExportedPeeredServices ( a . cache ) ,
}
2022-07-01 15:18:33 +00:00
if server , ok := a . delegate . ( * consul . Server ) ; ok {
proxycfg: server-local config entry data sources
This is the OSS portion of enterprise PR 2056.
This commit provides server-local implementations of the proxycfg.ConfigEntry
and proxycfg.ConfigEntryList interfaces, that source data from streaming events.
It makes use of the LocalMaterializer type introduced for peering replication,
adding the necessary support for authorization.
It also adds support for "wildcard" subscriptions (within a topic) to the event
publisher, as this is needed to fetch service-resolvers for all services when
configuring mesh gateways.
Currently, events will be emitted for just the ingress-gateway, service-resolver,
and mesh config entry types, as these are the only entries required by proxycfg
— the events will be emitted on topics named IngressGateway, ServiceResolver,
and MeshConfig topics respectively.
Though these events will only be consumed "locally" for now, they can also be
consumed via the gRPC endpoint (confirmed using grpcurl) so using them from
client agents should be a case of swapping the LocalMaterializer for an
RPCMaterializer.
2022-07-01 15:09:47 +00:00
deps := proxycfgglue . ServerDataSourceDeps {
2022-07-12 10:34:14 +00:00
Datacenter : a . config . Datacenter ,
proxycfg: server-local config entry data sources
This is the OSS portion of enterprise PR 2056.
This commit provides server-local implementations of the proxycfg.ConfigEntry
and proxycfg.ConfigEntryList interfaces, that source data from streaming events.
It makes use of the LocalMaterializer type introduced for peering replication,
adding the necessary support for authorization.
It also adds support for "wildcard" subscriptions (within a topic) to the event
publisher, as this is needed to fetch service-resolvers for all services when
configuring mesh gateways.
Currently, events will be emitted for just the ingress-gateway, service-resolver,
and mesh config entry types, as these are the only entries required by proxycfg
— the events will be emitted on topics named IngressGateway, ServiceResolver,
and MeshConfig topics respectively.
Though these events will only be consumed "locally" for now, they can also be
consumed via the gRPC endpoint (confirmed using grpcurl) so using them from
client agents should be a case of swapping the LocalMaterializer for an
RPCMaterializer.
2022-07-01 15:09:47 +00:00
EventPublisher : a . baseDeps . EventPublisher ,
ViewStore : a . baseDeps . ViewStore ,
Logger : a . logger . Named ( "proxycfg.server-data-sources" ) ,
ACLResolver : a . delegate ,
2022-07-01 15:18:33 +00:00
GetStore : func ( ) proxycfgglue . Store { return server . FSM ( ) . State ( ) } ,
proxycfg: server-local config entry data sources
This is the OSS portion of enterprise PR 2056.
This commit provides server-local implementations of the proxycfg.ConfigEntry
and proxycfg.ConfigEntryList interfaces, that source data from streaming events.
It makes use of the LocalMaterializer type introduced for peering replication,
adding the necessary support for authorization.
It also adds support for "wildcard" subscriptions (within a topic) to the event
publisher, as this is needed to fetch service-resolvers for all services when
configuring mesh gateways.
Currently, events will be emitted for just the ingress-gateway, service-resolver,
and mesh config entry types, as these are the only entries required by proxycfg
— the events will be emitted on topics named IngressGateway, ServiceResolver,
and MeshConfig topics respectively.
Though these events will only be consumed "locally" for now, they can also be
consumed via the gRPC endpoint (confirmed using grpcurl) so using them from
client agents should be a case of swapping the LocalMaterializer for an
RPCMaterializer.
2022-07-01 15:09:47 +00:00
}
sources . ConfigEntry = proxycfgglue . ServerConfigEntry ( deps )
sources . ConfigEntryList = proxycfgglue . ServerConfigEntryList ( deps )
2022-07-12 10:34:14 +00:00
sources . CompiledDiscoveryChain = proxycfgglue . ServerCompiledDiscoveryChain ( deps , proxycfgglue . CacheCompiledDiscoveryChain ( a . cache ) )
2022-07-22 10:52:05 +00:00
sources . ExportedPeeredServices = proxycfgglue . ServerExportedPeeredServices ( deps )
2022-07-12 10:43:42 +00:00
sources . FederationStateListMeshGateways = proxycfgglue . ServerFederationStateListMeshGateways ( deps )
2022-07-12 10:41:29 +00:00
sources . GatewayServices = proxycfgglue . ServerGatewayServices ( deps )
2022-07-12 10:37:48 +00:00
sources . Health = proxycfgglue . ServerHealth ( deps , proxycfgglue . ClientHealth ( a . rpcClientHealth ) )
2022-07-01 15:15:49 +00:00
sources . Intentions = proxycfgglue . ServerIntentions ( deps )
2022-07-01 15:18:33 +00:00
sources . IntentionUpstreams = proxycfgglue . ServerIntentionUpstreams ( deps )
2022-07-21 12:38:28 +00:00
sources . PeeredUpstreams = proxycfgglue . ServerPeeredUpstreams ( deps )
2022-07-12 10:35:52 +00:00
sources . ServiceList = proxycfgglue . ServerServiceList ( deps , proxycfgglue . CacheServiceList ( a . cache ) )
2022-07-12 10:39:27 +00:00
sources . TrustBundle = proxycfgglue . ServerTrustBundle ( deps )
sources . TrustBundleList = proxycfgglue . ServerTrustBundleList ( deps )
proxycfg: server-local config entry data sources
This is the OSS portion of enterprise PR 2056.
This commit provides server-local implementations of the proxycfg.ConfigEntry
and proxycfg.ConfigEntryList interfaces, that source data from streaming events.
It makes use of the LocalMaterializer type introduced for peering replication,
adding the necessary support for authorization.
It also adds support for "wildcard" subscriptions (within a topic) to the event
publisher, as this is needed to fetch service-resolvers for all services when
configuring mesh gateways.
Currently, events will be emitted for just the ingress-gateway, service-resolver,
and mesh config entry types, as these are the only entries required by proxycfg
— the events will be emitted on topics named IngressGateway, ServiceResolver,
and MeshConfig topics respectively.
Though these events will only be consumed "locally" for now, they can also be
consumed via the gRPC endpoint (confirmed using grpcurl) so using them from
client agents should be a case of swapping the LocalMaterializer for an
RPCMaterializer.
2022-07-01 15:09:47 +00:00
}
a . fillEnterpriseProxyDataSources ( & sources )
return sources
}
2019-12-10 02:26:41 +00:00
func listenerPortKey ( svcID structs . ServiceID , checkID structs . CheckID ) string {
2019-09-26 02:55:52 +00:00
return fmt . Sprintf ( "%s:%s" , svcID , checkID )
}
// grpcInjectAddr injects an ip and port into an address of the form: ip:port[/service]
func grpcInjectAddr ( existing string , ip string , port int ) string {
portRepl := fmt . Sprintf ( "${1}:%d${3}" , port )
out := grpcAddrRE . ReplaceAllString ( existing , portRepl )
addrRepl := fmt . Sprintf ( "%s${2}${3}" , ip )
out = grpcAddrRE . ReplaceAllString ( out , addrRepl )
return out
}
// httpInjectAddr injects a port then an IP into a URL
func httpInjectAddr ( url string , ip string , port int ) string {
portRepl := fmt . Sprintf ( "${1}${2}:%d${4}${5}" , port )
out := httpAddrRE . ReplaceAllString ( url , portRepl )
// Ensure that ipv6 addr is enclosed in brackets (RFC 3986)
ip = fixIPv6 ( ip )
addrRepl := fmt . Sprintf ( "${1}%s${3}${4}${5}" , ip )
out = httpAddrRE . ReplaceAllString ( out , addrRepl )
return out
}
func fixIPv6 ( address string ) string {
if strings . Count ( address , ":" ) < 2 {
return address
}
if ! strings . HasSuffix ( address , "]" ) {
address = address + "]"
}
if ! strings . HasPrefix ( address , "[" ) {
address = "[" + address
}
return address
2018-04-11 08:52:51 +00:00
}
2019-09-24 15:04:48 +00:00
// defaultIfEmpty returns the value if not empty otherwise the default value.
func defaultIfEmpty ( val , defaultVal string ) string {
if val != "" {
return val
}
return defaultVal
}