connect: remove managed proxies (#6220)

* connect: remove managed proxies implementation and all supporting config options and structs

* connect: remove deprecated ProxyDestination

* command: remove CONNECT_PROXY_TOKEN env var

* agent: remove entire proxyprocess proxy manager

* test: remove all managed proxy tests

* test: remove irrelevant managed proxy note from TestService_ServerTLSConfig

* test: update ContentHash to reflect managed proxy removal

* test: remove deprecated ProxyDestination test

* telemetry: remove managed proxy note

* http: remove /v1/agent/connect/proxy endpoint

* ci: remove deprecated test exclusion

* website: update managed proxies deprecation page to note removal

* website: remove managed proxy configuration API docs

* website: remove managed proxy note from built-in proxy config

* website: add note on removing proxy subdirectory of data_dir
This commit is contained in:
Mike Morris 2019-08-09 15:19:30 -04:00 committed by GitHub
parent a12b51e784
commit 88df658243
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
65 changed files with 226 additions and 8530 deletions

View File

@ -102,7 +102,7 @@ jobs:
- run: sudo apt-get update && sudo apt-get install -y rsyslog
- run: sudo service rsyslog start
- run: |
PACKAGE_NAMES=$(go list ./... | grep -v github.com/hashicorp/consul/agent/proxyprocess | circleci tests split --split-by=timings --timings-type=classname)
PACKAGE_NAMES=$(go list ./... | circleci tests split --split-by=timings --timings-type=classname)
gotestsum --format=short-verbose --junitfile $TEST_RESULTS_DIR/gotestsum-report.xml -- -tags=$GOTAGS -p 3 $PACKAGE_NAMES
- store_test_results:

View File

@ -4,7 +4,6 @@ import (
"fmt"
"github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/local"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/types"
"github.com/hashicorp/serf/serf"
@ -63,18 +62,6 @@ func (a *Agent) initializeACLs() error {
return nil
}
// resolveProxyToken attempts to resolve an ACL ID to a local proxy token.
// If a local proxy isn't found with that token, nil is returned.
func (a *Agent) resolveProxyToken(id string) *local.ManagedProxy {
for _, p := range a.State.Proxies() {
if p.ProxyToken == id {
return p
}
}
return nil
}
// vetServiceRegister makes sure the service registration action is allowed by
// the given token.
func (a *Agent) vetServiceRegister(token string, service *structs.NodeService) error {

View File

@ -30,7 +30,6 @@ import (
"github.com/hashicorp/consul/agent/consul"
"github.com/hashicorp/consul/agent/local"
"github.com/hashicorp/consul/agent/proxycfg"
"github.com/hashicorp/consul/agent/proxyprocess"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/agent/systemd"
"github.com/hashicorp/consul/agent/token"
@ -241,14 +240,10 @@ type Agent struct {
// the configuration directly.
tokens *token.Store
// proxyManager is the proxy process manager for managed Connect proxies.
proxyManager *proxyprocess.Manager
// proxyConfig is the manager for proxy service (Kind = connect-proxy)
// configuration state. This ensures all state needed by a proxy registration
// is maintained in cache and handles pushing updates to that state into XDS
// server to be pushed out to Envoy. This is NOT related to managed proxies
// directly.
// server to be pushed out to Envoy.
proxyConfig *proxycfg.Manager
// serviceManager is the manager for combining local service registrations with
@ -325,8 +320,6 @@ func LocalConfig(cfg *config.RuntimeConfig) local.Config {
NodeID: cfg.NodeID,
NodeName: cfg.NodeName,
TaggedAddresses: map[string]string{},
ProxyBindMinPort: cfg.ConnectProxyBindMinPort,
ProxyBindMaxPort: cfg.ConnectProxyBindMaxPort,
}
for k, v := range cfg.TaggedAddresses {
lc.TaggedAddresses[k] = v
@ -334,30 +327,6 @@ func LocalConfig(cfg *config.RuntimeConfig) local.Config {
return lc
}
func (a *Agent) setupProxyManager() error {
acfg, err := a.config.APIConfig(true)
if err != nil {
return fmt.Errorf("[INFO] agent: Connect managed proxies are disabled due to providing an invalid HTTP configuration")
}
a.proxyManager = proxyprocess.NewManager()
a.proxyManager.AllowRoot = a.config.ConnectProxyAllowManagedRoot
a.proxyManager.State = a.State
a.proxyManager.Logger = a.logger
if a.config.DataDir != "" {
// DataDir is required for all non-dev mode agents, but we want
// to allow setting the data dir for demos and so on for the agent,
// so do the check above instead.
a.proxyManager.DataDir = filepath.Join(a.config.DataDir, "proxy")
// Restore from our snapshot (if it exists)
if err := a.proxyManager.Restore(a.proxyManager.SnapshotPath()); err != nil {
a.logger.Printf("[WARN] agent: error restoring proxy state: %s", err)
}
}
a.proxyManager.ProxyEnv = acfg.GenerateEnv()
return nil
}
func (a *Agent) Start() error {
a.stateLock.Lock()
defer a.stateLock.Unlock()
@ -454,9 +423,6 @@ func (a *Agent) Start() error {
if err := a.loadServices(c); err != nil {
return err
}
if err := a.loadProxies(c); err != nil {
return err
}
if err := a.loadChecks(c, nil); err != nil {
return err
}
@ -464,17 +430,6 @@ func (a *Agent) Start() error {
return err
}
// create the proxy process manager and start it. This is purposely
// done here after the local state above is loaded in so we can have
// a more accurate initial state view.
if !c.ConnectTestDisableManagedProxies {
if err := a.setupProxyManager(); err != nil {
a.logger.Printf(err.Error())
} else {
go a.proxyManager.Run()
}
}
// Start the proxy config manager.
a.proxyConfig, err = proxycfg.NewManager(proxycfg.ManagerConfig{
Cache: a.cache,
@ -1662,24 +1617,6 @@ func (a *Agent) ShutdownAgent() error {
a.proxyConfig.Close()
}
// Stop the proxy process manager
if a.proxyManager != nil {
// If persistence is disabled (implies DevMode but a subset of DevMode) then
// don't leave the proxies running since the agent will not be able to
// recover them later.
if a.config.DataDir == "" {
a.logger.Printf("[WARN] agent: dev mode disabled persistence, killing " +
"all proxies since we can't recover them")
if err := a.proxyManager.Kill(); err != nil {
a.logger.Printf("[WARN] agent: error shutting down proxy manager: %s", err)
}
} else {
if err := a.proxyManager.Close(); err != nil {
a.logger.Printf("[WARN] agent: error shutting down proxy manager: %s", err)
}
}
}
// Stop the cache background work
if a.cache != nil {
a.cache.Close()
@ -2017,44 +1954,6 @@ func (a *Agent) purgeService(serviceID string) error {
return nil
}
// persistedProxy is used to wrap a proxy definition and bundle it with an Proxy
// token so we can continue to authenticate the running proxy after a restart.
type persistedProxy struct {
ProxyToken string
Proxy *structs.ConnectManagedProxy
// Set to true when the proxy information originated from the agents configuration
// as opposed to API registration.
FromFile bool
}
// persistProxy saves a proxy definition to a JSON file in the data dir
func (a *Agent) persistProxy(proxy *local.ManagedProxy, FromFile bool) error {
proxyPath := filepath.Join(a.config.DataDir, proxyDir,
stringHash(proxy.Proxy.ProxyService.ID))
wrapped := persistedProxy{
ProxyToken: proxy.ProxyToken,
Proxy: proxy.Proxy,
FromFile: FromFile,
}
encoded, err := json.Marshal(wrapped)
if err != nil {
return err
}
return file.WriteAtomic(proxyPath, encoded)
}
// purgeProxy removes a persisted proxy definition file from the data dir
func (a *Agent) purgeProxy(proxyID string) error {
proxyPath := filepath.Join(a.config.DataDir, proxyDir, stringHash(proxyID))
if _, err := os.Stat(proxyPath); err == nil {
return os.Remove(proxyPath)
}
return nil
}
// persistCheck saves a check definition to the local agent's state directory
func (a *Agent) persistCheck(check *structs.HealthCheck, chkType *structs.CheckType) error {
checkPath := filepath.Join(a.config.DataDir, checksDir, checkIDHash(check.CheckID))
@ -2305,17 +2204,6 @@ func (a *Agent) removeServiceLocked(serviceID string, persist bool) error {
checkIDs = append(checkIDs, id)
}
// Remove the associated managed proxy if it exists
// This has to be DONE before purging configuration as might might have issues
// With ACLs otherwise
for proxyID, p := range a.State.Proxies() {
if p.Proxy.TargetServiceID == serviceID {
if err := a.removeProxyLocked(proxyID, true); err != nil {
return err
}
}
}
// Remove service immediately
if err := a.State.RemoveServiceWithChecks(serviceID, checkIDs); err != nil {
a.logger.Printf("[WARN] agent: Failed to deregister service %q: %s", serviceID, err)
@ -2682,105 +2570,6 @@ func (a *Agent) removeCheckLocked(checkID types.CheckID, persist bool) error {
return nil
}
// addProxyLocked adds a new local Connect Proxy instance to be managed by the agent.
//
// This assumes that the agent's proxyLock is already held
//
// It REQUIRES that the service that is being proxied is already present in the
// local state. Note that this is only used for agent-managed proxies so we can
// ensure that we always make this true. For externally managed and registered
// proxies we explicitly allow the proxy to be registered first to make
// bootstrap ordering of a new service simpler but the same is not true here
// since this is only ever called when setting up a _managed_ proxy which was
// registered as part of a service registration either from config or HTTP API
// call.
//
// The restoredProxyToken argument should only be used when restoring proxy
// definitions from disk; new proxies must leave it blank to get a new token
// assigned. We need to restore from disk to enable to continue authenticating
// running proxies that already had that credential injected.
func (a *Agent) addProxyLocked(proxy *structs.ConnectManagedProxy, persist, FromFile bool,
restoredProxyToken string, source configSource) error {
// Lookup the target service token in state if there is one.
token := a.State.ServiceToken(proxy.TargetServiceID)
// Copy the basic proxy structure so it isn't modified w/ defaults
proxyCopy := *proxy
proxy = &proxyCopy
if err := a.applyProxyDefaults(proxy); err != nil {
return err
}
// Add the proxy to local state first since we may need to assign a port which
// needs to be coordinate under state lock. AddProxy will generate the
// NodeService for the proxy populated with the allocated (or configured) port
// and an ID, but it doesn't add it to the agent directly since that could
// deadlock and we may need to coordinate adding it and persisting etc.
proxyState, err := a.State.AddProxy(proxy, token, restoredProxyToken)
if err != nil {
return err
}
proxyService := proxyState.Proxy.ProxyService
// Register proxy TCP check. The built in proxy doesn't listen publically
// until it's loaded certs so this ensures we won't route traffic until it's
// ready.
proxyCfg, err := a.applyProxyConfigDefaults(proxyState.Proxy)
if err != nil {
return err
}
chkAddr := a.resolveProxyCheckAddress(proxyCfg)
chkTypes := []*structs.CheckType{}
if chkAddr != "" {
bindPort, ok := proxyCfg["bind_port"].(int)
if !ok {
return fmt.Errorf("Cannot convert bind_port=%v to an int for creating TCP Check for address %s", proxyCfg["bind_port"], chkAddr)
}
chkTypes = []*structs.CheckType{
&structs.CheckType{
Name: "Connect Proxy Listening",
TCP: ipaddr.FormatAddressPort(chkAddr, bindPort),
Interval: 10 * time.Second,
},
}
}
err = a.addServiceLocked(proxyService, chkTypes, persist, token, source)
if err != nil {
// Remove the state too
a.State.RemoveProxy(proxyService.ID)
return err
}
// Persist the proxy
if persist && a.config.DataDir != "" {
return a.persistProxy(proxyState, FromFile)
}
return nil
}
// AddProxy adds a new local Connect Proxy instance to be managed by the agent.
//
// It REQUIRES that the service that is being proxied is already present in the
// local state. Note that this is only used for agent-managed proxies so we can
// ensure that we always make this true. For externally managed and registered
// proxies we explicitly allow the proxy to be registered first to make
// bootstrap ordering of a new service simpler but the same is not true here
// since this is only ever called when setting up a _managed_ proxy which was
// registered as part of a service registration either from config or HTTP API
// call.
//
// The restoredProxyToken argument should only be used when restoring proxy
// definitions from disk; new proxies must leave it blank to get a new token
// assigned. We need to restore from disk to enable to continue authenticating
// running proxies that already had that credential injected.
func (a *Agent) AddProxy(proxy *structs.ConnectManagedProxy, persist, FromFile bool,
restoredProxyToken string, source configSource) error {
a.stateLock.Lock()
defer a.stateLock.Unlock()
return a.addProxyLocked(proxy, persist, FromFile, restoredProxyToken, source)
}
// resolveProxyCheckAddress returns the best address to use for a TCP check of
// the proxy's public listener. It expects the input to already have default
// values populated by applyProxyConfigDefaults. It may return an empty string
@ -2819,218 +2608,6 @@ func (a *Agent) resolveProxyCheckAddress(proxyCfg map[string]interface{}) string
return "127.0.0.1"
}
// applyProxyConfigDefaults takes a *structs.ConnectManagedProxy and returns
// it's Config map merged with any defaults from the Agent's config. It would be
// nicer if this were defined as a method on structs.ConnectManagedProxy but we
// can't do that because ot the import cycle it causes with agent/config.
func (a *Agent) applyProxyConfigDefaults(p *structs.ConnectManagedProxy) (map[string]interface{}, error) {
if p == nil || p.ProxyService == nil {
// Should never happen but protect from panic
return nil, fmt.Errorf("invalid proxy state")
}
// Lookup the target service
target := a.State.Service(p.TargetServiceID)
if target == nil {
// Can happen during deregistration race between proxy and scheduler.
return nil, fmt.Errorf("unknown target service ID: %s", p.TargetServiceID)
}
// Merge globals defaults
config := make(map[string]interface{})
for k, v := range a.config.ConnectProxyDefaultConfig {
if _, ok := config[k]; !ok {
config[k] = v
}
}
// Copy config from the proxy
for k, v := range p.Config {
config[k] = v
}
// Set defaults for anything that is still not specified but required.
// Note that these are not included in the content hash. Since we expect
// them to be static in general but some like the default target service
// port might not be. In that edge case services can set that explicitly
// when they re-register which will be caught though.
if _, ok := config["bind_port"]; !ok {
config["bind_port"] = p.ProxyService.Port
}
if _, ok := config["bind_address"]; !ok {
// Default to binding to the same address the agent is configured to
// bind to.
config["bind_address"] = a.config.BindAddr.String()
}
if _, ok := config["local_service_address"]; !ok {
// Default to localhost and the port the service registered with
config["local_service_address"] = fmt.Sprintf("127.0.0.1:%d", target.Port)
}
// Basic type conversions for expected types.
if raw, ok := config["bind_port"]; ok {
switch v := raw.(type) {
case float64:
// Common since HCL/JSON parse as float64
config["bind_port"] = int(v)
// NOTE(mitchellh): No default case since errors and validation
// are handled by the ServiceDefinition.Validate function.
}
}
return config, nil
}
// applyProxyDefaults modifies the given proxy by applying any configured
// defaults, such as the default execution mode, command, etc.
func (a *Agent) applyProxyDefaults(proxy *structs.ConnectManagedProxy) error {
// Set the default exec mode
if proxy.ExecMode == structs.ProxyExecModeUnspecified {
mode, err := structs.NewProxyExecMode(a.config.ConnectProxyDefaultExecMode)
if err != nil {
return err
}
proxy.ExecMode = mode
}
if proxy.ExecMode == structs.ProxyExecModeUnspecified {
proxy.ExecMode = structs.ProxyExecModeDaemon
}
// Set the default command to the globally configured default
if len(proxy.Command) == 0 {
switch proxy.ExecMode {
case structs.ProxyExecModeDaemon:
proxy.Command = a.config.ConnectProxyDefaultDaemonCommand
case structs.ProxyExecModeScript:
proxy.Command = a.config.ConnectProxyDefaultScriptCommand
}
}
// If there is no globally configured default we need to get the
// default command so we can do "consul connect proxy"
if len(proxy.Command) == 0 {
command, err := defaultProxyCommand(a.config)
if err != nil {
return err
}
proxy.Command = command
}
return nil
}
// removeProxyLocked stops and removes a local proxy instance.
//
// It is assumed that this function is called while holding the proxyLock already
func (a *Agent) removeProxyLocked(proxyID string, persist bool) error {
// Validate proxyID
if proxyID == "" {
return fmt.Errorf("proxyID missing")
}
// Remove the proxy from the local state
p, err := a.State.RemoveProxy(proxyID)
if err != nil {
return err
}
// Remove the proxy service as well. The proxy ID is also the ID
// of the servie, but we might as well use the service pointer.
if err := a.removeServiceLocked(p.Proxy.ProxyService.ID, persist); err != nil {
return err
}
if persist && a.config.DataDir != "" {
return a.purgeProxy(proxyID)
}
return nil
}
// RemoveProxy stops and removes a local proxy instance.
func (a *Agent) RemoveProxy(proxyID string, persist bool) error {
a.stateLock.Lock()
defer a.stateLock.Unlock()
return a.removeProxyLocked(proxyID, persist)
}
// verifyProxyToken takes a token and attempts to verify it against the
// targetService name. If targetProxy is specified, then the local proxy token
// must exactly match the given proxy ID. cert, config, etc.).
//
// The given token may be a local-only proxy token or it may be an ACL token. We
// will attempt to verify the local proxy token first.
//
// The effective ACL token is returned along with a boolean which is true if the
// match was against a proxy token rather than an ACL token, and any error. In
// the case the token matches a proxy token, then the ACL token used to register
// that proxy's target service is returned for use in any RPC calls the proxy
// needs to make on behalf of that service. If the token was an ACL token
// already then it is always returned. Provided error is nil, a valid ACL token
// is always returned.
func (a *Agent) verifyProxyToken(token, targetService,
targetProxy string) (string, bool, error) {
// If we specify a target proxy, we look up that proxy directly. Otherwise,
// we resolve with any proxy we can find.
var proxy *local.ManagedProxy
if targetProxy != "" {
proxy = a.State.Proxy(targetProxy)
if proxy == nil {
return "", false, fmt.Errorf("unknown proxy service ID: %q", targetProxy)
}
// If the token DOESN'T match, then we reset the proxy which will
// cause the logic below to fall back to normal ACLs. Otherwise,
// we keep the proxy set because we also have to verify that the
// target service matches on the proxy.
if token != proxy.ProxyToken {
proxy = nil
}
} else {
proxy = a.resolveProxyToken(token)
}
// The existence of a token isn't enough, we also need to verify
// that the service name of the matching proxy matches our target
// service.
if proxy != nil {
// Get the target service since we only have the name. The nil
// check below should never be true since a proxy token always
// represents the existence of a local service.
target := a.State.Service(proxy.Proxy.TargetServiceID)
if target == nil {
return "", false, fmt.Errorf("proxy target service not found: %q",
proxy.Proxy.TargetServiceID)
}
if target.Service != targetService {
return "", false, acl.ErrPermissionDenied
}
// Resolve the actual ACL token used to register the proxy/service and
// return that for use in RPC calls.
return a.State.ServiceToken(proxy.Proxy.TargetServiceID), true, nil
}
// Doesn't match, we have to do a full token resolution. The required
// permission for any proxy-related endpoint is service:write, since
// to register a proxy you require that permission and sensitive data
// is usually present in the configuration.
rule, err := a.resolveToken(token)
if err != nil {
return "", false, err
}
if rule != nil && !rule.ServiceWrite(targetService, nil) {
return "", false, acl.ErrPermissionDenied
}
return token, false, nil
}
func (a *Agent) cancelCheckMonitors(checkID types.CheckID) {
// Stop any monitors
delete(a.checkReapAfter, checkID)
@ -3455,107 +3032,6 @@ func (a *Agent) unloadChecks() error {
return nil
}
// loadPersistedProxies will load connect proxy definitions from their
// persisted state on disk and return a slice of them
//
// This does not add them to the local
func (a *Agent) loadPersistedProxies() (map[string]persistedProxy, error) {
persistedProxies := make(map[string]persistedProxy)
proxyDir := filepath.Join(a.config.DataDir, proxyDir)
files, err := ioutil.ReadDir(proxyDir)
if err != nil {
if !os.IsNotExist(err) {
return nil, fmt.Errorf("Failed reading proxies dir %q: %s", proxyDir, err)
}
}
for _, fi := range files {
// Skip all dirs
if fi.IsDir() {
continue
}
// Skip all partially written temporary files
if strings.HasSuffix(fi.Name(), "tmp") {
return nil, fmt.Errorf("Ignoring temporary proxy file %v", fi.Name())
}
// Open the file for reading
file := filepath.Join(proxyDir, fi.Name())
fh, err := os.Open(file)
if err != nil {
return nil, fmt.Errorf("failed opening proxy file %q: %s", file, err)
}
// Read the contents into a buffer
buf, err := ioutil.ReadAll(fh)
fh.Close()
if err != nil {
return nil, fmt.Errorf("failed reading proxy file %q: %s", file, err)
}
// Try decoding the proxy definition
var p persistedProxy
if err := json.Unmarshal(buf, &p); err != nil {
return nil, fmt.Errorf("Failed decoding proxy file %q: %s", file, err)
}
svcID := p.Proxy.TargetServiceID
persistedProxies[svcID] = p
}
return persistedProxies, nil
}
// loadProxies will load connect proxy definitions from configuration and
// persisted definitions on disk, and load them into the local agent.
func (a *Agent) loadProxies(conf *config.RuntimeConfig) error {
persistedProxies, persistenceErr := a.loadPersistedProxies()
for _, svc := range conf.Services {
if svc.Connect != nil {
proxy, err := svc.ConnectManagedProxy()
if err != nil {
return fmt.Errorf("failed adding proxy: %s", err)
}
if proxy == nil {
continue
}
restoredToken := ""
if persisted, ok := persistedProxies[proxy.TargetServiceID]; ok {
restoredToken = persisted.ProxyToken
}
if err := a.addProxyLocked(proxy, true, true, restoredToken, ConfigSourceLocal); err != nil {
return fmt.Errorf("failed adding proxy: %s", err)
}
}
}
for _, persisted := range persistedProxies {
proxyID := persisted.Proxy.ProxyService.ID
if persisted.FromFile && a.State.Proxy(proxyID) == nil {
// Purge proxies that were configured previously but are no longer in the config
a.logger.Printf("[DEBUG] agent: purging stale persisted proxy %q", proxyID)
if err := a.purgeProxy(proxyID); err != nil {
return fmt.Errorf("failed purging proxy %q: %v", proxyID, err)
}
} else if !persisted.FromFile {
if a.State.Proxy(proxyID) == nil {
a.logger.Printf("[DEBUG] agent: restored proxy definition %q", proxyID)
if err := a.addProxyLocked(persisted.Proxy, false, false, persisted.ProxyToken, ConfigSourceLocal); err != nil {
return fmt.Errorf("failed adding proxy %q: %v", proxyID, err)
}
} else {
a.logger.Printf("[WARN] agent: proxy definition %q was overwritten by a proxy definition within a config file", proxyID)
}
}
}
return persistenceErr
}
type persistedTokens struct {
Replication string `json:"replication,omitempty"`
AgentMaster string `json:"agent_master,omitempty"`
@ -3640,16 +3116,6 @@ func (a *Agent) loadTokens(conf *config.RuntimeConfig) error {
return persistenceErr
}
// unloadProxies will deregister all proxies known to the local agent.
func (a *Agent) unloadProxies() error {
for id := range a.State.Proxies() {
if err := a.removeProxyLocked(id, false); err != nil {
return fmt.Errorf("Failed deregistering proxy '%s': %s", id, err)
}
}
return nil
}
// snapshotCheckState is used to snapshot the current state of the health
// checks. This is done before we reload our checks, so that we can properly
// restore into the same state.
@ -3785,9 +3251,6 @@ func (a *Agent) ReloadConfig(newCfg *config.RuntimeConfig) error {
// First unload all checks, services, and metadata. This lets us begin the reload
// with a clean slate.
if err := a.unloadProxies(); err != nil {
return fmt.Errorf("Failed unloading proxies: %s", err)
}
if err := a.unloadServices(); err != nil {
return fmt.Errorf("Failed unloading services: %s", err)
}
@ -3809,9 +3272,6 @@ func (a *Agent) ReloadConfig(newCfg *config.RuntimeConfig) error {
if err := a.loadServices(newCfg); err != nil {
return fmt.Errorf("Failed reloading services: %s", err)
}
if err := a.loadProxies(newCfg); err != nil {
return fmt.Errorf("Failed reloading proxies: %s", err)
}
if err := a.loadChecks(newCfg, snap); err != nil {
return fmt.Errorf("Failed reloading checks: %s", err)
}
@ -3978,21 +3438,3 @@ func (a *Agent) registerCache() {
RefreshTimeout: 10 * time.Minute,
})
}
// defaultProxyCommand returns the default Connect managed proxy command.
func defaultProxyCommand(agentCfg *config.RuntimeConfig) ([]string, error) {
// Get the path to the current executable. This is cached once by the
// library so this is effectively just a variable read.
execPath, err := os.Executable()
if err != nil {
return nil, err
}
// "consul connect proxy" default value for managed daemon proxy
cmd := []string{execPath, "connect", "proxy"}
if agentCfg != nil && agentCfg.LogLevel != "INFO" {
cmd = append(cmd, "-log-level", agentCfg.LogLevel)
}
return cmd, nil
}

View File

@ -2,10 +2,8 @@ package agent
import (
"encoding/json"
"errors"
"fmt"
"log"
"net"
"net/http"
"path/filepath"
"strconv"
@ -20,7 +18,6 @@ import (
"github.com/hashicorp/consul/acl"
cachetype "github.com/hashicorp/consul/agent/cache-types"
"github.com/hashicorp/consul/agent/debug"
"github.com/hashicorp/consul/agent/local"
"github.com/hashicorp/consul/agent/structs"
token_store "github.com/hashicorp/consul/agent/token"
"github.com/hashicorp/consul/api"
@ -157,7 +154,7 @@ func (s *HTTPServer) AgentReload(resp http.ResponseWriter, req *http.Request) (i
}
}
func buildAgentService(s *structs.NodeService, proxies map[string]*local.ManagedProxy) api.AgentService {
func buildAgentService(s *structs.NodeService) api.AgentService {
weights := api.AgentWeights{Passing: 1, Warning: 1}
if s.Weights != nil {
if s.Weights.Passing > 0 {
@ -200,24 +197,10 @@ func buildAgentService(s *structs.NodeService, proxies map[string]*local.Managed
s.Kind == structs.ServiceKindMeshGateway {
as.Proxy = s.Proxy.ToAPI()
// DEPRECATED (ProxyDestination) - remove this when removing ProxyDestination
// Also set the deprecated ProxyDestination
as.ProxyDestination = as.Proxy.DestinationServiceName
}
// Attach Connect configs if they exist. We use the actual proxy state since
// that may have had defaults filled in compared to the config that was
// provided with the service as stored in the NodeService here.
if proxy, ok := proxies[s.ID+"-proxy"]; ok {
as.Connect = &api.AgentServiceConnect{
Proxy: &api.AgentServiceConnectProxy{
ExecMode: api.ProxyExecMode(proxy.Proxy.ExecMode.String()),
Command: proxy.Proxy.Command,
Config: proxy.Proxy.Config,
Upstreams: proxy.Proxy.Upstreams.ToAPI(),
},
}
} else if s.Connect.Native {
// Attach Connect configs if they exist.
if s.Connect.Native {
as.Connect = &api.AgentServiceConnect{
Native: true,
}
@ -238,8 +221,6 @@ func (s *HTTPServer) AgentServices(resp http.ResponseWriter, req *http.Request)
return nil, err
}
proxies := s.agent.State.Proxies()
// Convert into api.AgentService since that includes Connect config but so far
// NodeService doesn't need to internally. They are otherwise identical since
// that is the struct used in client for reading the one we output here
@ -248,7 +229,7 @@ func (s *HTTPServer) AgentServices(resp http.ResponseWriter, req *http.Request)
// Use empty list instead of nil
for id, s := range services {
agentService := buildAgentService(s, proxies)
agentService := buildAgentService(s)
agentSvcs[id] = &agentService
}
@ -268,68 +249,6 @@ func (s *HTTPServer) AgentService(resp http.ResponseWriter, req *http.Request) (
// Get the proxy ID. Note that this is the ID of a proxy's service instance.
id := strings.TrimPrefix(req.URL.Path, "/v1/agent/service/")
// DEPRECATED(managed-proxies) - remove this whole hack.
//
// Support managed proxies until they are removed entirely. Since built-in
// proxy will now use this endpoint, in order to not break managed proxies in
// the interim until they are removed, we need to mirror the default-setting
// behavior they had. Rather than thread that through this whole method as
// special cases that need to be unwound later (and duplicate logic in the
// proxy config endpoint) just defer to that and then translate the response.
if managedProxy := s.agent.State.Proxy(id); managedProxy != nil {
// This is for a managed proxy, use the old endpoint's behavior
req.URL.Path = "/v1/agent/connect/proxy/" + id
obj, err := s.AgentConnectProxyConfig(resp, req)
if err != nil {
return obj, err
}
proxyCfg, ok := obj.(*api.ConnectProxyConfig)
if !ok {
return nil, errors.New("internal error")
}
// These are all set by defaults so type checks are just sanity checks that
// should never fail.
port, ok := proxyCfg.Config["bind_port"].(int)
if !ok || port < 1 {
return nil, errors.New("invalid proxy config")
}
addr, ok := proxyCfg.Config["bind_address"].(string)
if !ok || addr == "" {
return nil, errors.New("invalid proxy config")
}
localAddr, ok := proxyCfg.Config["local_service_address"].(string)
if !ok || localAddr == "" {
return nil, errors.New("invalid proxy config")
}
// Old local_service_address was a host:port
localAddress, localPortRaw, err := net.SplitHostPort(localAddr)
if err != nil {
return nil, err
}
localPort, err := strconv.Atoi(localPortRaw)
if err != nil {
return nil, err
}
reply := &api.AgentService{
Kind: api.ServiceKindConnectProxy,
ID: proxyCfg.ProxyServiceID,
Service: managedProxy.Proxy.ProxyService.Service,
Port: port,
Address: addr,
ContentHash: proxyCfg.ContentHash,
Proxy: &api.AgentServiceConnectProxyConfig{
DestinationServiceName: proxyCfg.TargetServiceName,
DestinationServiceID: proxyCfg.TargetServiceID,
LocalServiceAddress: localAddress,
LocalServicePort: localPort,
Config: proxyCfg.Config,
Upstreams: proxyCfg.Upstreams,
},
}
return reply, nil
}
// Maybe block
var queryOpts structs.QueryOptions
if parseWait(resp, req, &queryOpts) {
@ -794,14 +713,13 @@ func (s *HTTPServer) AgentHealthServiceByID(resp http.ResponseWriter, req *http.
return nil, &BadRequestError{Reason: "Missing serviceID"}
}
services := s.agent.State.Services()
proxies := s.agent.State.Proxies()
for _, service := range services {
if service.ID == serviceID {
code, status, healthChecks := agentHealthService(serviceID, s)
if returnTextPlain(req) {
return status, CodeWithPayloadError{StatusCode: code, Reason: status, ContentType: "text/plain"}
}
serviceInfo := buildAgentService(service, proxies)
serviceInfo := buildAgentService(service)
result := &api.AgentServiceChecksInfo{
AggregatedStatus: status,
Checks: healthChecks,
@ -832,11 +750,10 @@ func (s *HTTPServer) AgentHealthServiceByName(resp http.ResponseWriter, req *htt
status := fmt.Sprintf("ServiceName %s Not Found", serviceName)
services := s.agent.State.Services()
result := make([]api.AgentServiceChecksInfo, 0, 16)
proxies := s.agent.State.Proxies()
for _, service := range services {
if service.Service == serviceName {
scode, sstatus, healthChecks := agentHealthService(service.ID, s)
serviceInfo := buildAgentService(service, proxies)
serviceInfo := buildAgentService(service)
res := api.AgentServiceChecksInfo{
AggregatedStatus: sstatus,
Checks: healthChecks,
@ -875,8 +792,6 @@ func (s *HTTPServer) AgentRegisterService(resp http.ResponseWriter, req *http.Re
// and why we should get rid of it.
lib.TranslateKeys(rawMap, map[string]string{
"enable_tag_override": "EnableTagOverride",
// Managed Proxy Config
"exec_mode": "ExecMode",
// Proxy Upstreams
"destination_name": "DestinationName",
"destination_type": "DestinationType",
@ -906,8 +821,7 @@ func (s *HTTPServer) AgentRegisterService(resp http.ResponseWriter, req *http.Re
// Same exceptions as above, but for a nested sidecar_service note we use
// the canonical form SidecarService since that is translated by the time
// the lookup here happens. Note that sidecar service doesn't support
// managed proxies (connect.proxy).
// the lookup here happens.
"Connect.SidecarService.Meta": "",
"Connect.SidecarService.Proxy.Config": "",
"Connect.SidecarService.Proxy.Upstreams.config": "",
@ -1036,30 +950,10 @@ func (s *HTTPServer) AgentRegisterService(resp http.ResponseWriter, req *http.Re
ns.Connect.SidecarService = nil
}
// Get any proxy registrations
proxy, err := args.ConnectManagedProxy()
if err != nil {
resp.WriteHeader(http.StatusBadRequest)
fmt.Fprintf(resp, err.Error())
return nil, nil
}
// If we have a proxy, verify that we're allowed to add a proxy via the API
if proxy != nil && !s.agent.config.ConnectProxyAllowManagedAPIRegistration {
return nil, &BadRequestError{
Reason: "Managed proxy registration via the API is disallowed."}
}
// Add the service.
if err := s.agent.AddService(ns, chkTypes, true, token, ConfigSourceRemote); err != nil {
return nil, err
}
// Add proxy (which will add proxy service so do it before we trigger sync)
if proxy != nil {
if err := s.agent.AddProxy(proxy, true, false, "", ConfigSourceRemote); err != nil {
return nil, err
}
}
// Add sidecar.
if sidecar != nil {
if err := s.agent.AddService(sidecar, sidecarChecks, true, sidecarToken, ConfigSourceRemote); err != nil {
@ -1080,14 +974,6 @@ func (s *HTTPServer) AgentDeregisterService(resp http.ResponseWriter, req *http.
return nil, err
}
// Verify this isn't a proxy
if s.agent.State.Proxy(serviceID) != nil {
return nil, &BadRequestError{
Reason: "Managed proxy service cannot be deregistered directly. " +
"Deregister the service that has a managed proxy to automatically " +
"deregister the managed proxy itself."}
}
if err := s.agent.RemoveService(serviceID, true); err != nil {
return nil, err
}
@ -1405,26 +1291,12 @@ func (s *HTTPServer) AgentConnectCALeafCert(resp http.ResponseWriter, req *http.
var qOpts structs.QueryOptions
// Store DC in the ConnectCALeafRequest but query opts separately
// Don't resolve a proxy token to a real token that will be
// done with a call to verifyProxyToken later along with
// other security relevant checks.
if done := s.parseWithoutResolvingProxyToken(resp, req, &args.Datacenter, &qOpts); done {
if done := s.parse(resp, req, &args.Datacenter, &qOpts); done {
return nil, nil
}
args.MinQueryIndex = qOpts.MinQueryIndex
args.MaxQueryTime = qOpts.MaxQueryTime
// Verify the proxy token. This will check both the local proxy token
// as well as the ACL if the token isn't local. The checks done in
// verifyProxyToken are still relevant because a leaf cert can be cached
// verifying the proxy token matches the service id or that a real
// acl token still is valid and has ServiceWrite is necessary or
// that cached cert is potentially unprotected.
effectiveToken, _, err := s.agent.verifyProxyToken(qOpts.Token, serviceName, "")
if err != nil {
return nil, err
}
args.Token = effectiveToken
args.Token = qOpts.Token
raw, m, err := s.agent.cache.Get(cachetype.ConnectCALeafName, &args)
if err != nil {
@ -1442,133 +1314,6 @@ func (s *HTTPServer) AgentConnectCALeafCert(resp http.ResponseWriter, req *http.
return reply, nil
}
// GET /v1/agent/connect/proxy/:proxy_service_id
//
// Returns the local proxy config for the identified proxy. Requires token=
// param with the correct local ProxyToken (not ACL token).
func (s *HTTPServer) AgentConnectProxyConfig(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
// Get the proxy ID. Note that this is the ID of a proxy's service instance.
id := strings.TrimPrefix(req.URL.Path, "/v1/agent/connect/proxy/")
// Maybe block
var queryOpts structs.QueryOptions
if parseWait(resp, req, &queryOpts) {
// parseWait returns an error itself
return nil, nil
}
// Parse the token - don't resolve a proxy token to a real token
// that will be done with a call to verifyProxyToken later along with
// other security relevant checks.
var token string
s.parseTokenWithoutResolvingProxyToken(req, &token)
// Parse hash specially since it's only this endpoint that uses it currently.
// Eventually this should happen in parseWait and end up in QueryOptions but I
// didn't want to make very general changes right away.
hash := req.URL.Query().Get("hash")
return s.agentLocalBlockingQuery(resp, hash, &queryOpts,
func(ws memdb.WatchSet) (string, interface{}, error) {
// Retrieve the proxy specified
proxy := s.agent.State.Proxy(id)
if proxy == nil {
resp.WriteHeader(http.StatusNotFound)
fmt.Fprintf(resp, "unknown proxy service ID: %s", id)
return "", nil, nil
}
// Lookup the target service as a convenience
target := s.agent.State.Service(proxy.Proxy.TargetServiceID)
if target == nil {
// Not found since this endpoint is only useful for agent-managed proxies so
// service missing means the service was deregistered racily with this call.
resp.WriteHeader(http.StatusNotFound)
fmt.Fprintf(resp, "unknown target service ID: %s", proxy.Proxy.TargetServiceID)
return "", nil, nil
}
// Validate the ACL token - because this endpoint uses data local to a single
// agent, this function is responsible for all enforcement regarding
// protection of the configuration. verifyProxyToken will match the proxies
// token to the correct service or in the case of being provide a real ACL
// token it will ensure that the requester has ServiceWrite privileges
// for this service.
_, isProxyToken, err := s.agent.verifyProxyToken(token, target.Service, id)
if err != nil {
return "", nil, err
}
// Watch the proxy for changes
ws.Add(proxy.WatchCh)
hash, err := hashstructure.Hash(proxy.Proxy, nil)
if err != nil {
return "", nil, err
}
contentHash := fmt.Sprintf("%x", hash)
// Set defaults
config, err := s.agent.applyProxyConfigDefaults(proxy.Proxy)
if err != nil {
return "", nil, err
}
// Only merge in telemetry config from agent if the requested is
// authorized with a proxy token. This prevents us leaking potentially
// sensitive config like Circonus API token via a public endpoint. Proxy
// tokens are only ever generated in-memory and passed via ENV to a child
// proxy process so potential for abuse here seems small. This endpoint in
// general is only useful for managed proxies now so it should _always_ be
// true that auth is via a proxy token but inconvenient for testing if we
// lock it down so strictly.
if isProxyToken {
// Add telemetry config. Copy the global config so we can customize the
// prefix.
telemetryCfg := s.agent.config.Telemetry
telemetryCfg.MetricsPrefix = telemetryCfg.MetricsPrefix + ".proxy." + target.ID
// First see if the user has specified telemetry
if userRaw, ok := config["telemetry"]; ok {
// User specified domething, see if it is compatible with agent
// telemetry config:
var uCfg lib.TelemetryConfig
dec, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{
Result: &uCfg,
// Make sure that if the user passes something that isn't just a
// simple override of a valid TelemetryConfig that we fail so that we
// don't clobber their custom config.
ErrorUnused: true,
})
if err == nil {
if err = dec.Decode(userRaw); err == nil {
// It did decode! Merge any unspecified fields from agent config.
uCfg.MergeDefaults(&telemetryCfg)
config["telemetry"] = uCfg
}
}
// Failed to decode, just keep user's config["telemetry"] verbatim
// with no agent merge.
} else {
// Add agent telemetry config.
config["telemetry"] = telemetryCfg
}
}
reply := &api.ConnectProxyConfig{
ProxyServiceID: proxy.Proxy.ProxyService.ID,
TargetServiceID: target.ID,
TargetServiceName: target.Service,
ContentHash: contentHash,
ExecMode: api.ProxyExecMode(proxy.Proxy.ExecMode.String()),
Command: proxy.Proxy.Command,
Config: config,
Upstreams: proxy.Proxy.Upstreams.ToAPI(),
}
return contentHash, reply, nil
})
}
type agentLocalBlockingFunc func(ws memdb.WatchSet) (string, interface{}, error)
// agentLocalBlockingQuery performs a blocking query in a generic way against

File diff suppressed because it is too large Load Diff

View File

@ -1911,187 +1911,6 @@ func TestAgent_PurgeServiceOnDuplicate(t *testing.T) {
}
}
func TestAgent_PersistProxy(t *testing.T) {
t.Parallel()
dataDir := testutil.TempDir(t, "agent") // we manage the data dir
cfg := `
server = false
bootstrap = false
data_dir = "` + dataDir + `"
`
a := &TestAgent{Name: t.Name(), HCL: cfg, DataDir: dataDir}
a.Start(t)
defer os.RemoveAll(dataDir)
defer a.Shutdown()
require := require.New(t)
assert := assert.New(t)
// Add a service to proxy (precondition for AddProxy)
svc1 := &structs.NodeService{
ID: "redis",
Service: "redis",
Tags: []string{"foo"},
Port: 8000,
}
require.NoError(a.AddService(svc1, nil, true, "", ConfigSourceLocal))
// Add a proxy for it
proxy := &structs.ConnectManagedProxy{
TargetServiceID: svc1.ID,
Command: []string{"/bin/sleep", "3600"},
}
file := filepath.Join(a.Config.DataDir, proxyDir, stringHash("redis-proxy"))
// Proxy is not persisted unless requested
require.NoError(a.AddProxy(proxy, false, false, "", ConfigSourceLocal))
_, err := os.Stat(file)
require.Error(err, "proxy should not be persisted")
// Proxy is persisted if requested
require.NoError(a.AddProxy(proxy, true, false, "", ConfigSourceLocal))
_, err = os.Stat(file)
require.NoError(err, "proxy should be persisted")
content, err := ioutil.ReadFile(file)
require.NoError(err)
var gotProxy persistedProxy
require.NoError(json.Unmarshal(content, &gotProxy))
assert.Equal(proxy.Command, gotProxy.Proxy.Command)
assert.Len(gotProxy.ProxyToken, 36) // sanity check for UUID
// Updates service definition on disk
proxy.Config = map[string]interface{}{
"foo": "bar",
}
require.NoError(a.AddProxy(proxy, true, false, "", ConfigSourceLocal))
content, err = ioutil.ReadFile(file)
require.NoError(err)
require.NoError(json.Unmarshal(content, &gotProxy))
assert.Equal(gotProxy.Proxy.Command, proxy.Command)
assert.Equal(gotProxy.Proxy.Config, proxy.Config)
assert.Len(gotProxy.ProxyToken, 36) // sanity check for UUID
a.Shutdown()
// Should load it back during later start
a2 := &TestAgent{Name: t.Name(), HCL: cfg, DataDir: dataDir}
a2.Start(t)
defer a2.Shutdown()
restored := a2.State.Proxy("redis-proxy")
require.NotNil(restored)
assert.Equal(gotProxy.ProxyToken, restored.ProxyToken)
// Ensure the port that was auto picked at random is the same again
assert.Equal(gotProxy.Proxy.ProxyService.Port, restored.Proxy.ProxyService.Port)
assert.Equal(gotProxy.Proxy.Command, restored.Proxy.Command)
}
func TestAgent_PurgeProxy(t *testing.T) {
t.Parallel()
a := NewTestAgent(t, t.Name(), "")
defer a.Shutdown()
require := require.New(t)
// Add a service to proxy (precondition for AddProxy)
svc1 := &structs.NodeService{
ID: "redis",
Service: "redis",
Tags: []string{"foo"},
Port: 8000,
}
require.NoError(a.AddService(svc1, nil, true, "", ConfigSourceLocal))
// Add a proxy for it
proxy := &structs.ConnectManagedProxy{
TargetServiceID: svc1.ID,
Command: []string{"/bin/sleep", "3600"},
}
proxyID := "redis-proxy"
require.NoError(a.AddProxy(proxy, true, false, "", ConfigSourceLocal))
file := filepath.Join(a.Config.DataDir, proxyDir, stringHash("redis-proxy"))
// Not removed
require.NoError(a.RemoveProxy(proxyID, false))
_, err := os.Stat(file)
require.NoError(err, "should not be removed")
// Re-add the proxy
require.NoError(a.AddProxy(proxy, true, false, "", ConfigSourceLocal))
// Removed
require.NoError(a.RemoveProxy(proxyID, true))
_, err = os.Stat(file)
require.Error(err, "should be removed")
}
func TestAgent_PurgeProxyOnDuplicate(t *testing.T) {
t.Parallel()
dataDir := testutil.TempDir(t, "agent") // we manage the data dir
cfg := `
data_dir = "` + dataDir + `"
server = false
bootstrap = false
`
a := &TestAgent{Name: t.Name(), HCL: cfg, DataDir: dataDir}
a.Start(t)
defer a.Shutdown()
defer os.RemoveAll(dataDir)
require := require.New(t)
// Add a service to proxy (precondition for AddProxy)
svc1 := &structs.NodeService{
ID: "redis",
Service: "redis",
Tags: []string{"foo"},
Port: 8000,
}
require.NoError(a.AddService(svc1, nil, true, "", ConfigSourceLocal))
// Add a proxy for it
proxy := &structs.ConnectManagedProxy{
TargetServiceID: svc1.ID,
Command: []string{"/bin/sleep", "3600"},
}
proxyID := "redis-proxy"
require.NoError(a.AddProxy(proxy, true, false, "", ConfigSourceLocal))
a.Shutdown()
// Try bringing the agent back up with the service already
// existing in the config
a2 := &TestAgent{Name: t.Name() + "-a2", HCL: cfg + `
service = {
id = "redis"
name = "redis"
tags = ["bar"]
port = 9000
connect {
proxy {
command = ["/bin/sleep", "3600"]
}
}
}
`, DataDir: dataDir}
a2.Start(t)
defer a2.Shutdown()
file := filepath.Join(a.Config.DataDir, proxyDir, stringHash(proxyID))
_, err := os.Stat(file)
require.NoError(err, "Config File based proxies should be persisted too")
result := a2.State.Proxy(proxyID)
require.NotNil(result)
require.Equal(proxy.Command, result.Proxy.Command)
}
func TestAgent_PersistCheck(t *testing.T) {
t.Parallel()
dataDir := testutil.TempDir(t, "agent") // we manage the data dir
@ -2571,96 +2390,6 @@ func TestAgent_unloadServices(t *testing.T) {
}
}
func TestAgent_loadProxies(t *testing.T) {
t.Parallel()
a := NewTestAgent(t, t.Name(), `
service = {
id = "rabbitmq"
name = "rabbitmq"
port = 5672
token = "abc123"
connect {
proxy {
config {
bind_port = 1234
}
}
}
}
`)
defer a.Shutdown()
services := a.State.Services()
if _, ok := services["rabbitmq"]; !ok {
t.Fatalf("missing service")
}
if token := a.State.ServiceToken("rabbitmq"); token != "abc123" {
t.Fatalf("bad: %s", token)
}
if _, ok := services["rabbitmq-proxy"]; !ok {
t.Fatalf("missing proxy service")
}
if token := a.State.ServiceToken("rabbitmq-proxy"); token != "abc123" {
t.Fatalf("bad: %s", token)
}
proxies := a.State.Proxies()
if _, ok := proxies["rabbitmq-proxy"]; !ok {
t.Fatalf("missing proxy")
}
}
func TestAgent_loadProxies_nilProxy(t *testing.T) {
t.Parallel()
a := NewTestAgent(t, t.Name(), `
service = {
id = "rabbitmq"
name = "rabbitmq"
port = 5672
token = "abc123"
connect {
}
}
`)
defer a.Shutdown()
services := a.State.Services()
require.Contains(t, services, "rabbitmq")
require.Equal(t, "abc123", a.State.ServiceToken("rabbitmq"))
require.NotContains(t, services, "rabbitme-proxy")
require.Empty(t, a.State.Proxies())
}
func TestAgent_unloadProxies(t *testing.T) {
t.Parallel()
a := NewTestAgent(t, t.Name(), `
service = {
id = "rabbitmq"
name = "rabbitmq"
port = 5672
token = "abc123"
connect {
proxy {
config {
bind_port = 1234
}
}
}
}
`)
defer a.Shutdown()
// Sanity check it's there
require.NotNil(t, a.State.Proxy("rabbitmq-proxy"))
// Unload all proxies
if err := a.unloadProxies(); err != nil {
t.Fatalf("err: %s", err)
}
if len(a.State.Proxies()) != 0 {
t.Fatalf("should have unloaded proxies")
}
}
func TestAgent_Service_MaintenanceMode(t *testing.T) {
t.Parallel()
a := NewTestAgent(t, t.Name(), "")
@ -3312,358 +3041,6 @@ func TestAgent_reloadWatchesHTTPS(t *testing.T) {
}
}
func TestAgent_AddProxy(t *testing.T) {
t.Parallel()
tests := []struct {
desc string
proxy, wantProxy *structs.ConnectManagedProxy
wantTCPCheck string
wantErr bool
}{
{
desc: "basic proxy adding, unregistered service",
proxy: &structs.ConnectManagedProxy{
ExecMode: structs.ProxyExecModeDaemon,
Command: []string{"consul", "connect", "proxy"},
Config: map[string]interface{}{
"foo": "bar",
},
TargetServiceID: "db", // non-existent service.
},
// Target service must be registered.
wantErr: true,
},
{
desc: "basic proxy adding, registered service",
proxy: &structs.ConnectManagedProxy{
ExecMode: structs.ProxyExecModeDaemon,
Command: []string{"consul", "connect", "proxy"},
Config: map[string]interface{}{
"foo": "bar",
},
TargetServiceID: "web",
},
// Proxy will inherit agent's 0.0.0.0 bind address but we can't check that
// so we should default to localhost in that case.
wantTCPCheck: "127.0.0.1:20000",
wantErr: false,
},
{
desc: "default global exec mode",
proxy: &structs.ConnectManagedProxy{
Command: []string{"consul", "connect", "proxy"},
TargetServiceID: "web",
},
wantProxy: &structs.ConnectManagedProxy{
ExecMode: structs.ProxyExecModeScript,
Command: []string{"consul", "connect", "proxy"},
TargetServiceID: "web",
},
wantTCPCheck: "127.0.0.1:20000",
wantErr: false,
},
{
desc: "default daemon command",
proxy: &structs.ConnectManagedProxy{
ExecMode: structs.ProxyExecModeDaemon,
TargetServiceID: "web",
},
wantProxy: &structs.ConnectManagedProxy{
ExecMode: structs.ProxyExecModeDaemon,
Command: []string{"foo", "bar"},
TargetServiceID: "web",
},
wantTCPCheck: "127.0.0.1:20000",
wantErr: false,
},
{
desc: "default script command",
proxy: &structs.ConnectManagedProxy{
ExecMode: structs.ProxyExecModeScript,
TargetServiceID: "web",
},
wantProxy: &structs.ConnectManagedProxy{
ExecMode: structs.ProxyExecModeScript,
Command: []string{"bar", "foo"},
TargetServiceID: "web",
},
wantTCPCheck: "127.0.0.1:20000",
wantErr: false,
},
{
desc: "managed proxy with custom bind port",
proxy: &structs.ConnectManagedProxy{
ExecMode: structs.ProxyExecModeDaemon,
Command: []string{"consul", "connect", "proxy"},
Config: map[string]interface{}{
"foo": "bar",
"bind_address": "127.10.10.10",
"bind_port": 1234,
},
TargetServiceID: "web",
},
wantTCPCheck: "127.10.10.10:1234",
wantErr: false,
},
{
// This test is necessary since JSON and HCL both will parse
// numbers as a float64.
desc: "managed proxy with custom bind port (float64)",
proxy: &structs.ConnectManagedProxy{
ExecMode: structs.ProxyExecModeDaemon,
Command: []string{"consul", "connect", "proxy"},
Config: map[string]interface{}{
"foo": "bar",
"bind_address": "127.10.10.10",
"bind_port": float64(1234),
},
TargetServiceID: "web",
},
wantTCPCheck: "127.10.10.10:1234",
wantErr: false,
},
{
desc: "managed proxy with overridden but unspecified ipv6 bind address",
proxy: &structs.ConnectManagedProxy{
ExecMode: structs.ProxyExecModeDaemon,
Command: []string{"consul", "connect", "proxy"},
Config: map[string]interface{}{
"foo": "bar",
"bind_address": "[::]",
},
TargetServiceID: "web",
},
wantTCPCheck: "127.0.0.1:20000",
wantErr: false,
},
{
desc: "managed proxy with overridden check address",
proxy: &structs.ConnectManagedProxy{
ExecMode: structs.ProxyExecModeDaemon,
Command: []string{"consul", "connect", "proxy"},
Config: map[string]interface{}{
"foo": "bar",
"tcp_check_address": "127.20.20.20",
},
TargetServiceID: "web",
},
wantTCPCheck: "127.20.20.20:20000",
wantErr: false,
},
{
desc: "managed proxy with disabled check",
proxy: &structs.ConnectManagedProxy{
ExecMode: structs.ProxyExecModeDaemon,
Command: []string{"consul", "connect", "proxy"},
Config: map[string]interface{}{
"foo": "bar",
"disable_tcp_check": true,
},
TargetServiceID: "web",
},
wantTCPCheck: "",
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.desc, func(t *testing.T) {
require := require.New(t)
a := NewTestAgent(t, t.Name(), `
node_name = "node1"
# Explicit test because proxies inheriting this value must have a health
# check on a different IP.
bind_addr = "0.0.0.0"
connect {
proxy_defaults {
exec_mode = "script"
daemon_command = ["foo", "bar"]
script_command = ["bar", "foo"]
}
}
ports {
proxy_min_port = 20000
proxy_max_port = 20000
}
`)
defer a.Shutdown()
// Register a target service we can use
reg := &structs.NodeService{
Service: "web",
Port: 8080,
}
require.NoError(a.AddService(reg, nil, false, "", ConfigSourceLocal))
err := a.AddProxy(tt.proxy, false, false, "", ConfigSourceLocal)
if tt.wantErr {
require.Error(err)
return
}
require.NoError(err)
// Test the ID was created as we expect.
got := a.State.Proxy("web-proxy")
wantProxy := tt.wantProxy
if wantProxy == nil {
wantProxy = tt.proxy
}
wantProxy.ProxyService = got.Proxy.ProxyService
require.Equal(wantProxy, got.Proxy)
// Ensure a TCP check was created for the service.
gotCheck := a.State.Check("service:web-proxy")
if tt.wantTCPCheck == "" {
require.Nil(gotCheck)
} else {
require.NotNil(gotCheck)
require.Equal("Connect Proxy Listening", gotCheck.Name)
// Confusingly, a.State.Check("service:web-proxy") will return the state
// but it's Definition field will be empty. This appears to be expected
// when adding Checks as part of `AddService`. Notice how `AddService`
// tests in this file don't assert on that state but instead look at the
// agent's check state directly to ensure the right thing was registered.
// We'll do the same for now.
gotTCP, ok := a.checkTCPs["service:web-proxy"]
require.True(ok)
require.Equal(tt.wantTCPCheck, gotTCP.TCP)
require.Equal(10*time.Second, gotTCP.Interval)
}
})
}
}
func TestAgent_RemoveProxy(t *testing.T) {
t.Parallel()
a := NewTestAgent(t, t.Name(), `
node_name = "node1"
`)
defer a.Shutdown()
require := require.New(t)
// Register a target service we can use
reg := &structs.NodeService{
Service: "web",
Port: 8080,
}
require.NoError(a.AddService(reg, nil, false, "", ConfigSourceLocal))
// Add a proxy for web
pReg := &structs.ConnectManagedProxy{
TargetServiceID: "web",
ExecMode: structs.ProxyExecModeDaemon,
Command: []string{"foo"},
}
require.NoError(a.AddProxy(pReg, false, false, "", ConfigSourceLocal))
// Test the ID was created as we expect.
gotProxy := a.State.Proxy("web-proxy")
require.NotNil(gotProxy)
err := a.RemoveProxy("web-proxy", false)
require.NoError(err)
gotProxy = a.State.Proxy("web-proxy")
require.Nil(gotProxy)
require.Nil(a.State.Service("web-proxy"), "web-proxy service")
// Removing invalid proxy should be an error
err = a.RemoveProxy("foobar", false)
require.Error(err)
}
func TestAgent_ReLoadProxiesFromConfig(t *testing.T) {
t.Parallel()
a := NewTestAgent(t, t.Name(),
`node_name = "node1"
`)
defer a.Shutdown()
require := require.New(t)
// Register a target service we can use
reg := &structs.NodeService{
Service: "web",
Port: 8080,
}
require.NoError(a.AddService(reg, nil, false, "", ConfigSourceLocal))
proxies := a.State.Proxies()
require.Len(proxies, 0)
config := config.RuntimeConfig{
Services: []*structs.ServiceDefinition{
&structs.ServiceDefinition{
Name: "web",
Connect: &structs.ServiceConnect{
Native: false,
Proxy: &structs.ServiceDefinitionConnectProxy{},
},
},
},
}
require.NoError(a.loadProxies(&config))
// ensure we loaded the proxy
proxies = a.State.Proxies()
require.Len(proxies, 1)
// store the auto-generated token
ptok := ""
pid := ""
for id := range proxies {
pid = id
ptok = proxies[id].ProxyToken
break
}
// reload the proxies and ensure the proxy token is the same
require.NoError(a.unloadProxies())
proxies = a.State.Proxies()
require.Len(proxies, 0)
require.NoError(a.loadProxies(&config))
proxies = a.State.Proxies()
require.Len(proxies, 1)
require.Equal(ptok, proxies[pid].ProxyToken)
// make sure when the config goes away so does the proxy
require.NoError(a.unloadProxies())
proxies = a.State.Proxies()
require.Len(proxies, 0)
// a.config contains no services or proxies
require.NoError(a.loadProxies(a.config))
proxies = a.State.Proxies()
require.Len(proxies, 0)
}
func TestAgent_SetupProxyManager(t *testing.T) {
t.Parallel()
dataDir := testutil.TempDir(t, "agent") // we manage the data dir
defer os.RemoveAll(dataDir)
hcl := `
ports { http = -1 }
data_dir = "` + dataDir + `"
`
a, err := NewUnstartedAgent(t, t.Name(), hcl)
require.NoError(t, err)
require.Error(t, a.setupProxyManager(), "setupProxyManager should fail with invalid HTTP API config")
hcl = `
ports { http = 8001 }
data_dir = "` + dataDir + `"
`
a, err = NewUnstartedAgent(t, t.Name(), hcl)
require.NoError(t, err)
require.NoError(t, a.setupProxyManager())
}
func TestAgent_loadTokens(t *testing.T) {
t.Parallel()
a := NewTestAgent(t, t.Name(), `

View File

@ -947,8 +947,6 @@ func TestCatalogServiceNodes_ConnectProxy(t *testing.T) {
assert.Len(nodes, 1)
assert.Equal(structs.ServiceKindConnectProxy, nodes[0].ServiceKind)
assert.Equal(args.Service.Proxy, nodes[0].ServiceProxy)
// DEPRECATED (ProxyDestination) - remove this when removing ProxyDestination
assert.Equal(args.Service.Proxy.DestinationServiceName, nodes[0].ServiceProxyDestination)
}
// Test that the Connect-compatible endpoints can be queried for a

View File

@ -629,11 +629,6 @@ func (b *Builder) Build() (rt RuntimeConfig, err error) {
b.boolValWithDefault(c.ACL.TokenReplication, b.boolValWithDefault(c.EnableACLReplication, enableTokenReplication))
proxyDefaultExecMode := b.stringVal(c.Connect.ProxyDefaults.ExecMode)
proxyDefaultDaemonCommand := c.Connect.ProxyDefaults.DaemonCommand
proxyDefaultScriptCommand := c.Connect.ProxyDefaults.ScriptCommand
proxyDefaultConfig := c.Connect.ProxyDefaults.Config
enableRemoteScriptChecks := b.boolVal(c.EnableScriptChecks)
enableLocalScriptChecks := b.boolValWithDefault(c.EnableLocalScriptChecks, enableRemoteScriptChecks)
@ -789,125 +784,117 @@ func (b *Builder) Build() (rt RuntimeConfig, err error) {
},
// Agent
AdvertiseAddrLAN: advertiseAddrLAN,
AdvertiseAddrWAN: advertiseAddrWAN,
BindAddr: bindAddr,
Bootstrap: b.boolVal(c.Bootstrap),
BootstrapExpect: b.intVal(c.BootstrapExpect),
CAFile: b.stringVal(c.CAFile),
CAPath: b.stringVal(c.CAPath),
CertFile: b.stringVal(c.CertFile),
CheckUpdateInterval: b.durationVal("check_update_interval", c.CheckUpdateInterval),
CheckOutputMaxSize: b.intValWithDefault(c.CheckOutputMaxSize, 4096),
Checks: checks,
ClientAddrs: clientAddrs,
ConfigEntryBootstrap: configEntries,
AutoEncryptTLS: autoEncryptTLS,
AutoEncryptAllowTLS: autoEncryptAllowTLS,
ConnectEnabled: connectEnabled,
ConnectCAProvider: connectCAProvider,
ConnectCAConfig: connectCAConfig,
ConnectProxyAllowManagedRoot: b.boolVal(c.Connect.Proxy.AllowManagedRoot),
ConnectProxyAllowManagedAPIRegistration: b.boolVal(c.Connect.Proxy.AllowManagedAPIRegistration),
ConnectProxyBindMinPort: proxyMinPort,
ConnectProxyBindMaxPort: proxyMaxPort,
ConnectSidecarMinPort: sidecarMinPort,
ConnectSidecarMaxPort: sidecarMaxPort,
ConnectProxyDefaultExecMode: proxyDefaultExecMode,
ConnectProxyDefaultDaemonCommand: proxyDefaultDaemonCommand,
ConnectProxyDefaultScriptCommand: proxyDefaultScriptCommand,
ConnectProxyDefaultConfig: proxyDefaultConfig,
DataDir: b.stringVal(c.DataDir),
Datacenter: datacenter,
DevMode: b.boolVal(b.Flags.DevMode),
DisableAnonymousSignature: b.boolVal(c.DisableAnonymousSignature),
DisableCoordinates: b.boolVal(c.DisableCoordinates),
DisableHostNodeID: b.boolVal(c.DisableHostNodeID),
DisableHTTPUnprintableCharFilter: b.boolVal(c.DisableHTTPUnprintableCharFilter),
DisableKeyringFile: b.boolVal(c.DisableKeyringFile),
DisableRemoteExec: b.boolVal(c.DisableRemoteExec),
DisableUpdateCheck: b.boolVal(c.DisableUpdateCheck),
DiscardCheckOutput: b.boolVal(c.DiscardCheckOutput),
DiscoveryMaxStale: b.durationVal("discovery_max_stale", c.DiscoveryMaxStale),
EnableAgentTLSForChecks: b.boolVal(c.EnableAgentTLSForChecks),
EnableCentralServiceConfig: b.boolVal(c.EnableCentralServiceConfig),
EnableDebug: b.boolVal(c.EnableDebug),
EnableRemoteScriptChecks: enableRemoteScriptChecks,
EnableLocalScriptChecks: enableLocalScriptChecks,
EnableSyslog: b.boolVal(c.EnableSyslog),
EnableUI: b.boolVal(c.UI),
EncryptKey: b.stringVal(c.EncryptKey),
EncryptVerifyIncoming: b.boolVal(c.EncryptVerifyIncoming),
EncryptVerifyOutgoing: b.boolVal(c.EncryptVerifyOutgoing),
GRPCPort: grpcPort,
GRPCAddrs: grpcAddrs,
KeyFile: b.stringVal(c.KeyFile),
KVMaxValueSize: b.uint64Val(c.Limits.KVMaxValueSize),
LeaveDrainTime: b.durationVal("performance.leave_drain_time", c.Performance.LeaveDrainTime),
LeaveOnTerm: leaveOnTerm,
LogLevel: b.stringVal(c.LogLevel),
LogFile: b.stringVal(c.LogFile),
LogRotateBytes: b.intVal(c.LogRotateBytes),
LogRotateDuration: b.durationVal("log_rotate_duration", c.LogRotateDuration),
LogRotateMaxFiles: b.intVal(c.LogRotateMaxFiles),
NodeID: types.NodeID(b.stringVal(c.NodeID)),
NodeMeta: c.NodeMeta,
NodeName: b.nodeName(c.NodeName),
NonVotingServer: b.boolVal(c.NonVotingServer),
PidFile: b.stringVal(c.PidFile),
PrimaryDatacenter: primaryDatacenter,
RPCAdvertiseAddr: rpcAdvertiseAddr,
RPCBindAddr: rpcBindAddr,
RPCHoldTimeout: b.durationVal("performance.rpc_hold_timeout", c.Performance.RPCHoldTimeout),
RPCMaxBurst: b.intVal(c.Limits.RPCMaxBurst),
RPCProtocol: b.intVal(c.RPCProtocol),
RPCRateLimit: rate.Limit(b.float64Val(c.Limits.RPCRate)),
RaftProtocol: b.intVal(c.RaftProtocol),
RaftSnapshotThreshold: b.intVal(c.RaftSnapshotThreshold),
RaftSnapshotInterval: b.durationVal("raft_snapshot_interval", c.RaftSnapshotInterval),
RaftTrailingLogs: b.intVal(c.RaftTrailingLogs),
ReconnectTimeoutLAN: b.durationVal("reconnect_timeout", c.ReconnectTimeoutLAN),
ReconnectTimeoutWAN: b.durationVal("reconnect_timeout_wan", c.ReconnectTimeoutWAN),
RejoinAfterLeave: b.boolVal(c.RejoinAfterLeave),
RetryJoinIntervalLAN: b.durationVal("retry_interval", c.RetryJoinIntervalLAN),
RetryJoinIntervalWAN: b.durationVal("retry_interval_wan", c.RetryJoinIntervalWAN),
RetryJoinLAN: b.expandAllOptionalAddrs("retry_join", c.RetryJoinLAN),
RetryJoinMaxAttemptsLAN: b.intVal(c.RetryJoinMaxAttemptsLAN),
RetryJoinMaxAttemptsWAN: b.intVal(c.RetryJoinMaxAttemptsWAN),
RetryJoinWAN: b.expandAllOptionalAddrs("retry_join_wan", c.RetryJoinWAN),
SegmentName: b.stringVal(c.SegmentName),
Segments: segments,
SerfAdvertiseAddrLAN: serfAdvertiseAddrLAN,
SerfAdvertiseAddrWAN: serfAdvertiseAddrWAN,
SerfBindAddrLAN: serfBindAddrLAN,
SerfBindAddrWAN: serfBindAddrWAN,
SerfPortLAN: serfPortLAN,
SerfPortWAN: serfPortWAN,
ServerMode: b.boolVal(c.ServerMode),
ServerName: b.stringVal(c.ServerName),
ServerPort: serverPort,
Services: services,
SessionTTLMin: b.durationVal("session_ttl_min", c.SessionTTLMin),
SkipLeaveOnInt: skipLeaveOnInt,
StartJoinAddrsLAN: b.expandAllOptionalAddrs("start_join", c.StartJoinAddrsLAN),
StartJoinAddrsWAN: b.expandAllOptionalAddrs("start_join_wan", c.StartJoinAddrsWAN),
SyslogFacility: b.stringVal(c.SyslogFacility),
TLSCipherSuites: b.tlsCipherSuites("tls_cipher_suites", c.TLSCipherSuites),
TLSMinVersion: b.stringVal(c.TLSMinVersion),
TLSPreferServerCipherSuites: b.boolVal(c.TLSPreferServerCipherSuites),
TaggedAddresses: c.TaggedAddresses,
TranslateWANAddrs: b.boolVal(c.TranslateWANAddrs),
UIDir: b.stringVal(c.UIDir),
UIContentPath: UIPathBuilder(b.stringVal(b.Flags.Config.UIContentPath)),
UnixSocketGroup: b.stringVal(c.UnixSocket.Group),
UnixSocketMode: b.stringVal(c.UnixSocket.Mode),
UnixSocketUser: b.stringVal(c.UnixSocket.User),
VerifyIncoming: b.boolVal(c.VerifyIncoming),
VerifyIncomingHTTPS: b.boolVal(c.VerifyIncomingHTTPS),
VerifyIncomingRPC: b.boolVal(c.VerifyIncomingRPC),
VerifyOutgoing: verifyOutgoing,
VerifyServerHostname: verifyServerName,
Watches: c.Watches,
AdvertiseAddrLAN: advertiseAddrLAN,
AdvertiseAddrWAN: advertiseAddrWAN,
BindAddr: bindAddr,
Bootstrap: b.boolVal(c.Bootstrap),
BootstrapExpect: b.intVal(c.BootstrapExpect),
CAFile: b.stringVal(c.CAFile),
CAPath: b.stringVal(c.CAPath),
CertFile: b.stringVal(c.CertFile),
CheckUpdateInterval: b.durationVal("check_update_interval", c.CheckUpdateInterval),
CheckOutputMaxSize: b.intValWithDefault(c.CheckOutputMaxSize, 4096),
Checks: checks,
ClientAddrs: clientAddrs,
ConfigEntryBootstrap: configEntries,
AutoEncryptTLS: autoEncryptTLS,
AutoEncryptAllowTLS: autoEncryptAllowTLS,
ConnectEnabled: connectEnabled,
ConnectCAProvider: connectCAProvider,
ConnectCAConfig: connectCAConfig,
ConnectSidecarMinPort: sidecarMinPort,
ConnectSidecarMaxPort: sidecarMaxPort,
DataDir: b.stringVal(c.DataDir),
Datacenter: datacenter,
DevMode: b.boolVal(b.Flags.DevMode),
DisableAnonymousSignature: b.boolVal(c.DisableAnonymousSignature),
DisableCoordinates: b.boolVal(c.DisableCoordinates),
DisableHostNodeID: b.boolVal(c.DisableHostNodeID),
DisableHTTPUnprintableCharFilter: b.boolVal(c.DisableHTTPUnprintableCharFilter),
DisableKeyringFile: b.boolVal(c.DisableKeyringFile),
DisableRemoteExec: b.boolVal(c.DisableRemoteExec),
DisableUpdateCheck: b.boolVal(c.DisableUpdateCheck),
DiscardCheckOutput: b.boolVal(c.DiscardCheckOutput),
DiscoveryMaxStale: b.durationVal("discovery_max_stale", c.DiscoveryMaxStale),
EnableAgentTLSForChecks: b.boolVal(c.EnableAgentTLSForChecks),
EnableCentralServiceConfig: b.boolVal(c.EnableCentralServiceConfig),
EnableDebug: b.boolVal(c.EnableDebug),
EnableRemoteScriptChecks: enableRemoteScriptChecks,
EnableLocalScriptChecks: enableLocalScriptChecks,
EnableSyslog: b.boolVal(c.EnableSyslog),
EnableUI: b.boolVal(c.UI),
EncryptKey: b.stringVal(c.EncryptKey),
EncryptVerifyIncoming: b.boolVal(c.EncryptVerifyIncoming),
EncryptVerifyOutgoing: b.boolVal(c.EncryptVerifyOutgoing),
GRPCPort: grpcPort,
GRPCAddrs: grpcAddrs,
KeyFile: b.stringVal(c.KeyFile),
KVMaxValueSize: b.uint64Val(c.Limits.KVMaxValueSize),
LeaveDrainTime: b.durationVal("performance.leave_drain_time", c.Performance.LeaveDrainTime),
LeaveOnTerm: leaveOnTerm,
LogLevel: b.stringVal(c.LogLevel),
LogFile: b.stringVal(c.LogFile),
LogRotateBytes: b.intVal(c.LogRotateBytes),
LogRotateDuration: b.durationVal("log_rotate_duration", c.LogRotateDuration),
LogRotateMaxFiles: b.intVal(c.LogRotateMaxFiles),
NodeID: types.NodeID(b.stringVal(c.NodeID)),
NodeMeta: c.NodeMeta,
NodeName: b.nodeName(c.NodeName),
NonVotingServer: b.boolVal(c.NonVotingServer),
PidFile: b.stringVal(c.PidFile),
PrimaryDatacenter: primaryDatacenter,
RPCAdvertiseAddr: rpcAdvertiseAddr,
RPCBindAddr: rpcBindAddr,
RPCHoldTimeout: b.durationVal("performance.rpc_hold_timeout", c.Performance.RPCHoldTimeout),
RPCMaxBurst: b.intVal(c.Limits.RPCMaxBurst),
RPCProtocol: b.intVal(c.RPCProtocol),
RPCRateLimit: rate.Limit(b.float64Val(c.Limits.RPCRate)),
RaftProtocol: b.intVal(c.RaftProtocol),
RaftSnapshotThreshold: b.intVal(c.RaftSnapshotThreshold),
RaftSnapshotInterval: b.durationVal("raft_snapshot_interval", c.RaftSnapshotInterval),
RaftTrailingLogs: b.intVal(c.RaftTrailingLogs),
ReconnectTimeoutLAN: b.durationVal("reconnect_timeout", c.ReconnectTimeoutLAN),
ReconnectTimeoutWAN: b.durationVal("reconnect_timeout_wan", c.ReconnectTimeoutWAN),
RejoinAfterLeave: b.boolVal(c.RejoinAfterLeave),
RetryJoinIntervalLAN: b.durationVal("retry_interval", c.RetryJoinIntervalLAN),
RetryJoinIntervalWAN: b.durationVal("retry_interval_wan", c.RetryJoinIntervalWAN),
RetryJoinLAN: b.expandAllOptionalAddrs("retry_join", c.RetryJoinLAN),
RetryJoinMaxAttemptsLAN: b.intVal(c.RetryJoinMaxAttemptsLAN),
RetryJoinMaxAttemptsWAN: b.intVal(c.RetryJoinMaxAttemptsWAN),
RetryJoinWAN: b.expandAllOptionalAddrs("retry_join_wan", c.RetryJoinWAN),
SegmentName: b.stringVal(c.SegmentName),
Segments: segments,
SerfAdvertiseAddrLAN: serfAdvertiseAddrLAN,
SerfAdvertiseAddrWAN: serfAdvertiseAddrWAN,
SerfBindAddrLAN: serfBindAddrLAN,
SerfBindAddrWAN: serfBindAddrWAN,
SerfPortLAN: serfPortLAN,
SerfPortWAN: serfPortWAN,
ServerMode: b.boolVal(c.ServerMode),
ServerName: b.stringVal(c.ServerName),
ServerPort: serverPort,
Services: services,
SessionTTLMin: b.durationVal("session_ttl_min", c.SessionTTLMin),
SkipLeaveOnInt: skipLeaveOnInt,
StartJoinAddrsLAN: b.expandAllOptionalAddrs("start_join", c.StartJoinAddrsLAN),
StartJoinAddrsWAN: b.expandAllOptionalAddrs("start_join_wan", c.StartJoinAddrsWAN),
SyslogFacility: b.stringVal(c.SyslogFacility),
TLSCipherSuites: b.tlsCipherSuites("tls_cipher_suites", c.TLSCipherSuites),
TLSMinVersion: b.stringVal(c.TLSMinVersion),
TLSPreferServerCipherSuites: b.boolVal(c.TLSPreferServerCipherSuites),
TaggedAddresses: c.TaggedAddresses,
TranslateWANAddrs: b.boolVal(c.TranslateWANAddrs),
UIDir: b.stringVal(c.UIDir),
UIContentPath: UIPathBuilder(b.stringVal(b.Flags.Config.UIContentPath)),
UnixSocketGroup: b.stringVal(c.UnixSocket.Group),
UnixSocketMode: b.stringVal(c.UnixSocket.Mode),
UnixSocketUser: b.stringVal(c.UnixSocket.User),
VerifyIncoming: b.boolVal(c.VerifyIncoming),
VerifyIncomingHTTPS: b.boolVal(c.VerifyIncomingHTTPS),
VerifyIncomingRPC: b.boolVal(c.VerifyIncomingRPC),
VerifyOutgoing: verifyOutgoing,
VerifyServerHostname: verifyServerName,
Watches: c.Watches,
}
if rt.BootstrapExpect == 1 {
@ -1286,10 +1273,8 @@ func (b *Builder) serviceVal(v *ServiceDefinition) *structs.ServiceDefinition {
EnableTagOverride: b.boolVal(v.EnableTagOverride),
Weights: serviceWeights,
Checks: checks,
// DEPRECATED (ProxyDestination) - don't populate deprecated field, just use
// it as a default below on read. Remove that when removing ProxyDestination
Proxy: b.serviceProxyVal(v.Proxy, v.ProxyDestination),
Connect: b.serviceConnectVal(v.Connect),
Proxy: b.serviceProxyVal(v.Proxy),
Connect: b.serviceConnectVal(v.Connect),
}
}
@ -1307,13 +1292,8 @@ func (b *Builder) serviceKindVal(v *string) structs.ServiceKind {
}
}
func (b *Builder) serviceProxyVal(v *ServiceProxy, deprecatedDest *string) *structs.ConnectProxyConfig {
func (b *Builder) serviceProxyVal(v *ServiceProxy) *structs.ConnectProxyConfig {
if v == nil {
if deprecatedDest != nil {
return &structs.ConnectProxyConfig{
DestinationServiceName: b.stringVal(deprecatedDest),
}
}
return nil
}
@ -1370,16 +1350,6 @@ func (b *Builder) serviceConnectVal(v *ServiceConnect) *structs.ServiceConnect {
return nil
}
var proxy *structs.ServiceDefinitionConnectProxy
if v.Proxy != nil {
proxy = &structs.ServiceDefinitionConnectProxy{
ExecMode: b.stringVal(v.Proxy.ExecMode),
Command: v.Proxy.Command,
Config: v.Proxy.Config,
Upstreams: b.upstreamsVal(v.Proxy.Upstreams),
}
}
sidecar := b.serviceVal(v.SidecarService)
if sidecar != nil {
// Sanity checks
@ -1392,16 +1362,11 @@ func (b *Builder) serviceConnectVal(v *ServiceConnect) *structs.ServiceConnect {
b.err = multierror.Append(b.err, fmt.Errorf("sidecar_service can't have a nested sidecar_service"))
sidecar.Connect.SidecarService = nil
}
if sidecar.Connect.Proxy != nil {
b.err = multierror.Append(b.err, fmt.Errorf("sidecar_service can't have a managed proxy"))
sidecar.Connect.Proxy = nil
}
}
}
return &structs.ServiceConnect{
Native: b.boolVal(v.Native),
Proxy: proxy,
SidecarService: sidecar,
}
}

View File

@ -92,8 +92,7 @@ func Parse(data string, format string) (c Config, err error) {
"service.proxy.upstreams",
"services.proxy.upstreams",
// Need all the service(s) exceptions also for nested sidecar service except
// managed proxy which is explicitly not supported there.
// Need all the service(s) exceptions also for nested sidecar service.
"service.connect.sidecar_service.checks",
"services.connect.sidecar_service.checks",
"service.connect.sidecar_service.proxy.upstreams",
@ -387,10 +386,8 @@ type ServiceDefinition struct {
Token *string `json:"token,omitempty" hcl:"token" mapstructure:"token"`
Weights *ServiceWeights `json:"weights,omitempty" hcl:"weights" mapstructure:"weights"`
EnableTagOverride *bool `json:"enable_tag_override,omitempty" hcl:"enable_tag_override" mapstructure:"enable_tag_override"`
// DEPRECATED (ProxyDestination) - remove this when removing ProxyDestination
ProxyDestination *string `json:"proxy_destination,omitempty" hcl:"proxy_destination" mapstructure:"proxy_destination"`
Proxy *ServiceProxy `json:"proxy,omitempty" hcl:"proxy" mapstructure:"proxy"`
Connect *ServiceConnect `json:"connect,omitempty" hcl:"connect" mapstructure:"connect"`
Proxy *ServiceProxy `json:"proxy,omitempty" hcl:"proxy" mapstructure:"proxy"`
Connect *ServiceConnect `json:"connect,omitempty" hcl:"connect" mapstructure:"connect"`
}
type CheckDefinition struct {
@ -424,9 +421,6 @@ type ServiceConnect struct {
// Native is true when this service can natively understand Connect.
Native *bool `json:"native,omitempty" hcl:"native" mapstructure:"native"`
// Proxy configures a connect proxy instance for the service
Proxy *ServiceConnectProxy `json:"proxy,omitempty" hcl:"proxy" mapstructure:"proxy"`
// SidecarService is a nested Service Definition to register at the same time.
// It's purely a convenience mechanism to allow specifying a sidecar service
// along with the application service definition. It's nested nature allows
@ -437,13 +431,6 @@ type ServiceConnect struct {
SidecarService *ServiceDefinition `json:"sidecar_service,omitempty" hcl:"sidecar_service" mapstructure:"sidecar_service"`
}
type ServiceConnectProxy struct {
Command []string `json:"command,omitempty" hcl:"command" mapstructure:"command"`
ExecMode *string `json:"exec_mode,omitempty" hcl:"exec_mode" mapstructure:"exec_mode"`
Config map[string]interface{} `json:"config,omitempty" hcl:"config" mapstructure:"config"`
Upstreams []Upstream `json:"upstreams,omitempty" hcl:"upstreams" mapstructure:"upstreams"`
}
// ServiceProxy is the additional config needed for a Kind = connect-proxy
// registration.
type ServiceProxy struct {
@ -540,39 +527,9 @@ type AutoEncrypt struct {
type Connect struct {
// Enabled opts the agent into connect. It should be set on all clients and
// servers in a cluster for correct connect operation.
Enabled *bool `json:"enabled,omitempty" hcl:"enabled" mapstructure:"enabled"`
Proxy ConnectProxy `json:"proxy,omitempty" hcl:"proxy" mapstructure:"proxy"`
ProxyDefaults ConnectProxyDefaults `json:"proxy_defaults,omitempty" hcl:"proxy_defaults" mapstructure:"proxy_defaults"`
CAProvider *string `json:"ca_provider,omitempty" hcl:"ca_provider" mapstructure:"ca_provider"`
CAConfig map[string]interface{} `json:"ca_config,omitempty" hcl:"ca_config" mapstructure:"ca_config"`
}
// ConnectProxy is the agent-global connect proxy configuration.
type ConnectProxy struct {
// Consul will not execute managed proxies if its EUID is 0 (root).
// If this is true, then Consul will execute proxies if Consul is
// running as root. This is not recommended.
AllowManagedRoot *bool `json:"allow_managed_root" hcl:"allow_managed_root" mapstructure:"allow_managed_root"`
// AllowManagedAPIRegistration enables managed proxy registration
// via the agent HTTP API. If this is false, only file configurations
// can be used.
AllowManagedAPIRegistration *bool `json:"allow_managed_api_registration" hcl:"allow_managed_api_registration" mapstructure:"allow_managed_api_registration"`
}
// ConnectProxyDefaults is the agent-global defaults for managed Connect proxies.
type ConnectProxyDefaults struct {
// ExecMode is used where a registration doesn't include an exec_mode.
// Defaults to daemon.
ExecMode *string `json:"exec_mode,omitempty" hcl:"exec_mode" mapstructure:"exec_mode"`
// DaemonCommand is used to start proxy in exec_mode = daemon if not specified
// at registration time.
DaemonCommand []string `json:"daemon_command,omitempty" hcl:"daemon_command" mapstructure:"daemon_command"`
// ScriptCommand is used to start proxy in exec_mode = script if not specified
// at registration time.
ScriptCommand []string `json:"script_command,omitempty" hcl:"script_command" mapstructure:"script_command"`
// Config is merged into an Config specified at registration time.
Config map[string]interface{} `json:"config,omitempty" hcl:"config" mapstructure:"config"`
Enabled *bool `json:"enabled,omitempty" hcl:"enabled" mapstructure:"enabled"`
CAProvider *string `json:"ca_provider,omitempty" hcl:"ca_provider" mapstructure:"ca_provider"`
CAConfig map[string]interface{} `json:"ca_config,omitempty" hcl:"ca_config" mapstructure:"ca_config"`
}
// SOA is the configuration of SOA for DNS

View File

@ -531,16 +531,6 @@ type RuntimeConfig struct {
// and servers in a cluster for correct connect operation.
ConnectEnabled bool
// ConnectProxyBindMinPort is the inclusive start of the range of ports
// allocated to the agent for starting proxy listeners on where no explicit
// port is specified.
ConnectProxyBindMinPort int
// ConnectProxyBindMaxPort is the inclusive end of the range of ports
// allocated to the agent for starting proxy listeners on where no explicit
// port is specified.
ConnectProxyBindMaxPort int
// ConnectSidecarMinPort is the inclusive start of the range of ports
// allocated to the agent for asigning to sidecar services where no port is
// specified.
@ -551,47 +541,12 @@ type RuntimeConfig struct {
// specified
ConnectSidecarMaxPort int
// ConnectProxyAllowManagedRoot is true if Consul can execute managed
// proxies when running as root (EUID == 0).
ConnectProxyAllowManagedRoot bool
// ConnectProxyAllowManagedAPIRegistration enables managed proxy registration
// via the agent HTTP API. If this is false, only file configurations
// can be used.
ConnectProxyAllowManagedAPIRegistration bool
// ConnectProxyDefaultExecMode is used where a registration doesn't include an
// exec_mode. Defaults to daemon.
ConnectProxyDefaultExecMode string
// ConnectProxyDefaultDaemonCommand is used to start proxy in exec_mode =
// daemon if not specified at registration time.
ConnectProxyDefaultDaemonCommand []string
// ConnectProxyDefaultScriptCommand is used to start proxy in exec_mode =
// script if not specified at registration time.
ConnectProxyDefaultScriptCommand []string
// ConnectProxyDefaultConfig is merged with any config specified at
// registration time to allow global control of defaults.
ConnectProxyDefaultConfig map[string]interface{}
// ConnectCAProvider is the type of CA provider to use with Connect.
ConnectCAProvider string
// ConnectCAConfig is the config to use for the CA provider.
ConnectCAConfig map[string]interface{}
// ConnectTestDisableManagedProxies is not exposed to public config but is
// used by TestAgent to prevent self-executing the test binary in the
// background if a managed proxy is created for a test. The only place we
// actually want to test processes really being spun up and managed is in
// `agent/proxy` and it does it at a lower level. Note that this still allows
// registering managed proxies via API and other methods, and still creates
// all the agent state for them, just doesn't actually start external
// processes up.
ConnectTestDisableManagedProxies bool
// ConnectTestCALeafRootChangeSpread is used to control how long the CA leaf
// cache with spread CSRs over when a root change occurs. For now we don't
// expose this in public config intentionally but could later with a rename.

View File

@ -2022,40 +2022,6 @@ func TestConfigFlagsAndEdgecases(t *testing.T) {
`},
err: "sidecar_service can't have a nested sidecar_service",
},
{
desc: "sidecar_service can't have managed proxy",
args: []string{
`-data-dir=` + dataDir,
},
json: []string{`{
"service": {
"name": "web",
"port": 1234,
"connect": {
"sidecar_service": {
"connect": {
"proxy": {}
}
}
}
}
}`},
hcl: []string{`
service {
name = "web"
port = 1234
connect {
sidecar_service {
connect {
proxy {
}
}
}
}
}
`},
err: "sidecar_service can't have a managed proxy",
},
{
desc: "telemetry.prefix_filter cannot be empty",
args: []string{
@ -2351,176 +2317,6 @@ func TestConfigFlagsAndEdgecases(t *testing.T) {
},
},
{
desc: "Service managed proxy 'upstreams'",
args: []string{
`-data-dir=` + dataDir,
},
json: []string{
`{
"service": {
"name": "web",
"port": 8080,
"connect": {
"proxy": {
"upstreams": [{
"destination_name": "db",
"local_bind_port": 1234
}]
}
}
}
}`,
},
hcl: []string{
`service {
name = "web"
port = 8080
connect {
proxy {
upstreams {
destination_name = "db"
local_bind_port = 1234
}
}
}
}`,
},
patch: func(rt *RuntimeConfig) {
rt.DataDir = dataDir
rt.Services = []*structs.ServiceDefinition{
&structs.ServiceDefinition{
Name: "web",
Port: 8080,
Connect: &structs.ServiceConnect{
Proxy: &structs.ServiceDefinitionConnectProxy{
Upstreams: structs.Upstreams{
{
DestinationName: "db",
DestinationType: structs.UpstreamDestTypeService,
LocalBindPort: 1234,
},
},
},
},
Weights: &structs.Weights{
Passing: 1,
Warning: 1,
},
},
}
},
},
{
desc: "Multiple service managed proxy 'upstreams'",
args: []string{
`-data-dir=` + dataDir,
},
json: []string{
`{
"service": {
"name": "web",
"port": 8080,
"connect": {
"proxy": {
"upstreams": [{
"destination_name": "db",
"local_bind_port": 1234
}, {
"destination_name": "cache",
"local_bind_port": 2345
}]
}
}
}
}`,
},
hcl: []string{
`service {
name = "web"
port = 8080
connect {
proxy {
upstreams = [
{
destination_name = "db"
local_bind_port = 1234
},
{
destination_name = "cache"
local_bind_port = 2345
}
]
}
}
}`,
},
patch: func(rt *RuntimeConfig) {
rt.DataDir = dataDir
rt.Services = []*structs.ServiceDefinition{
&structs.ServiceDefinition{
Name: "web",
Port: 8080,
Connect: &structs.ServiceConnect{
Proxy: &structs.ServiceDefinitionConnectProxy{
Upstreams: structs.Upstreams{
{
DestinationName: "db",
DestinationType: structs.UpstreamDestTypeService,
LocalBindPort: 1234,
},
{
DestinationName: "cache",
DestinationType: structs.UpstreamDestTypeService,
LocalBindPort: 2345,
},
},
},
},
Weights: &structs.Weights{
Passing: 1,
Warning: 1,
},
},
}
},
},
{
desc: "enabling Connect allow_managed_root",
args: []string{
`-data-dir=` + dataDir,
},
json: []string{
`{ "connect": { "proxy": { "allow_managed_root": true } } }`,
},
hcl: []string{
`connect { proxy { allow_managed_root = true } }`,
},
patch: func(rt *RuntimeConfig) {
rt.DataDir = dataDir
rt.ConnectProxyAllowManagedRoot = true
},
},
{
desc: "enabling Connect allow_managed_api_registration",
args: []string{
`-data-dir=` + dataDir,
},
json: []string{
`{ "connect": { "proxy": { "allow_managed_api_registration": true } } }`,
},
hcl: []string{
`connect { proxy { allow_managed_api_registration = true } }`,
},
patch: func(rt *RuntimeConfig) {
rt.DataDir = dataDir
rt.ConnectProxyAllowManagedAPIRegistration = true
},
},
{
// This tests that we correct added the nested paths to arrays of objects
// to the exceptions in PatchSliceOfMaps in config.go (for single service)
@ -3659,17 +3455,7 @@ func TestFullConfig(t *testing.T) {
"csr_max_per_second": 100,
"csr_max_concurrent": 2
},
"enabled": true,
"proxy_defaults": {
"exec_mode": "script",
"daemon_command": ["consul", "connect", "proxy"],
"script_command": ["proxyctl.sh"],
"config": {
"foo": "bar",
"connect_timeout_ms": 1000,
"pedantic_mode": true
}
}
"enabled": true
},
"gossip_lan" : {
"gossip_nodes": 6,
@ -3761,8 +3547,6 @@ func TestFullConfig(t *testing.T) {
"https": 15127,
"server": 3757,
"grpc": 4881,
"proxy_min_port": 2000,
"proxy_max_port": 3000,
"sidecar_min_port": 8888,
"sidecar_max_port": 9999
},
@ -3993,15 +3777,7 @@ func TestFullConfig(t *testing.T) {
"deregister_critical_service_after": "68482s"
}
],
"connect": {
"proxy": {
"exec_mode": "daemon",
"command": ["awesome-proxy"],
"config": {
"foo": "qux"
}
}
}
"connect": {}
},
{
"id": "Kh81CPF6",
@ -4264,18 +4040,6 @@ func TestFullConfig(t *testing.T) {
csr_max_concurrent = 2.0
}
enabled = true
proxy_defaults {
exec_mode = "script"
daemon_command = ["consul", "connect", "proxy"]
script_command = ["proxyctl.sh"]
config = {
foo = "bar"
# hack float since json parses numbers as float and we have to
# assert against the same thing
connect_timeout_ms = 1000.0
pedantic_mode = true
}
}
}
gossip_lan {
gossip_nodes = 6
@ -4598,15 +4362,7 @@ func TestFullConfig(t *testing.T) {
deregister_critical_service_after = "68482s"
}
]
connect {
proxy {
exec_mode = "daemon"
command = ["awesome-proxy"]
config = {
foo = "qux"
}
}
}
connect {}
},
{
id = "Kh81CPF6"
@ -4957,30 +4713,18 @@ func TestFullConfig(t *testing.T) {
},
},
},
AutoEncryptTLS: true,
AutoEncryptAllowTLS: true,
ConnectEnabled: true,
ConnectProxyBindMinPort: 2000,
ConnectProxyBindMaxPort: 3000,
ConnectSidecarMinPort: 8888,
ConnectSidecarMaxPort: 9999,
ConnectCAProvider: "consul",
AutoEncryptTLS: true,
AutoEncryptAllowTLS: true,
ConnectEnabled: true,
ConnectSidecarMinPort: 8888,
ConnectSidecarMaxPort: 9999,
ConnectCAProvider: "consul",
ConnectCAConfig: map[string]interface{}{
"RotationPeriod": "90h",
"LeafCertTTL": "1h",
"CSRMaxPerSecond": float64(100),
"CSRMaxConcurrent": float64(2),
},
ConnectProxyAllowManagedRoot: false,
ConnectProxyAllowManagedAPIRegistration: false,
ConnectProxyDefaultExecMode: "script",
ConnectProxyDefaultDaemonCommand: []string{"consul", "connect", "proxy"},
ConnectProxyDefaultScriptCommand: []string{"proxyctl.sh"},
ConnectProxyDefaultConfig: map[string]interface{}{
"foo": "bar",
"connect_timeout_ms": float64(1000),
"pedantic_mode": true,
},
DNSAddrs: []net.Addr{tcpAddr("93.95.95.81:7001"), udpAddr("93.95.95.81:7001")},
DNSARecordLimit: 29907,
DNSAllowStale: true,
@ -5189,15 +4933,7 @@ func TestFullConfig(t *testing.T) {
DeregisterCriticalServiceAfter: 68482 * time.Second,
},
},
Connect: &structs.ServiceConnect{
Proxy: &structs.ServiceDefinitionConnectProxy{
ExecMode: "daemon",
Command: []string{"awesome-proxy"},
Config: map[string]interface{}{
"foo": "qux",
},
},
},
Connect: &structs.ServiceConnect{},
},
{
ID: "Kh81CPF6",
@ -5796,18 +5532,9 @@ func TestSanitize(t *testing.T) {
"ConnectCAConfig": {},
"ConnectCAProvider": "",
"ConnectEnabled": false,
"ConnectProxyAllowManagedAPIRegistration": false,
"ConnectProxyAllowManagedRoot": false,
"ConnectProxyBindMaxPort": 0,
"ConnectProxyBindMinPort": 0,
"ConnectProxyDefaultConfig": {},
"ConnectProxyDefaultDaemonCommand": [],
"ConnectProxyDefaultExecMode": "",
"ConnectProxyDefaultScriptCommand": [],
"ConnectSidecarMaxPort": 0,
"ConnectSidecarMinPort": 0,
"ConnectTestCALeafRootChangeSpread": "0s",
"ConnectTestDisableManagedProxies": false,
"ConsulCoordinateUpdateBatchSize": 0,
"ConsulCoordinateUpdateMaxBatches": 0,
"ConsulCoordinateUpdatePeriod": "15s",
@ -5974,7 +5701,6 @@ func TestSanitize(t *testing.T) {
"Name": "foo",
"Port": 0,
"Proxy": null,
"ProxyDestination": "",
"TaggedAddresses": {},
"Tags": [],
"Token": "hidden",

View File

@ -363,39 +363,6 @@ func TestCatalog_Register_ConnectProxy(t *testing.T) {
assert.Equal(args.Service.Proxy.DestinationServiceName, v.ServiceProxy.DestinationServiceName)
}
// DEPRECATED (ProxyDestination) - remove this whole test case when removing
// ProxyDestination
func TestCatalog_Register_DeprecatedConnectProxy(t *testing.T) {
t.Parallel()
assert := assert.New(t)
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
codec := rpcClient(t, s1)
defer codec.Close()
args := structs.TestRegisterRequestProxy(t)
args.Service.ProxyDestination = "legacy"
args.Service.Proxy = structs.ConnectProxyConfig{}
// Register
var out struct{}
assert.Nil(msgpackrpc.CallWithCodec(codec, "Catalog.Register", &args, &out))
// List
req := structs.ServiceSpecificRequest{
Datacenter: "dc1",
ServiceName: args.Service.Service,
}
var resp structs.IndexedServiceNodes
assert.Nil(msgpackrpc.CallWithCodec(codec, "Catalog.ServiceNodes", &req, &resp))
assert.Len(resp.ServiceNodes, 1)
v := resp.ServiceNodes[0]
assert.Equal(structs.ServiceKindConnectProxy, v.ServiceKind)
assert.Equal(args.Service.ProxyDestination, v.ServiceProxy.DestinationServiceName)
}
// Test an invalid ConnectProxy. We don't need to exhaustively test because
// this is all tested in structs on the Validate method.
func TestCatalog_Register_ConnectProxy_invalid(t *testing.T) {
@ -419,7 +386,7 @@ func TestCatalog_Register_ConnectProxy_invalid(t *testing.T) {
}
// Test that write is required for the proxy destination to register a proxy.
func TestCatalog_Register_ConnectProxy_ACLProxyDestination(t *testing.T) {
func TestCatalog_Register_ConnectProxy_ACLDestinationServiceName(t *testing.T) {
t.Parallel()
assert := assert.New(t)

View File

@ -54,14 +54,6 @@ func TestFSM_SnapshotRestore_OSS(t *testing.T) {
// Add a service instance with Connect config.
connectConf := structs.ServiceConnect{
Native: true,
Proxy: &structs.ServiceDefinitionConnectProxy{
Command: []string{"foo", "bar"},
ExecMode: "a",
Config: map[string]interface{}{
"a": "qwer",
"b": 4.3,
},
},
}
fsm.state.EnsureService(3, "foo", &structs.NodeService{
ID: "web",

View File

@ -864,7 +864,7 @@ func (s *HTTPServer) parseDC(req *http.Request, dc *string) {
// optionally resolve proxy tokens to real ACL tokens. If the token is invalid or not specified it will populate
// the token with the agents UserToken (acl_token in the consul configuration)
// Parsing has the following priority: ?token, X-Consul-Token and last "Authorization: Bearer "
func (s *HTTPServer) parseTokenInternal(req *http.Request, token *string, resolveProxyToken bool) {
func (s *HTTPServer) parseTokenInternal(req *http.Request, token *string) {
tok := ""
if other := req.URL.Query().Get("token"); other != "" {
tok = other
@ -892,13 +892,6 @@ func (s *HTTPServer) parseTokenInternal(req *http.Request, token *string, resolv
}
if tok != "" {
if resolveProxyToken {
if p := s.agent.resolveProxyToken(tok); p != nil {
*token = s.agent.State.ServiceToken(p.Proxy.TargetServiceID)
return
}
}
*token = tok
return
}
@ -907,15 +900,9 @@ func (s *HTTPServer) parseTokenInternal(req *http.Request, token *string, resolv
}
// parseToken is used to parse the ?token query param or the X-Consul-Token header or
// Authorization Bearer token header (RFC6750) and resolve proxy tokens to real ACL tokens
// Authorization Bearer token header (RFC6750)
func (s *HTTPServer) parseToken(req *http.Request, token *string) {
s.parseTokenInternal(req, token, true)
}
// parseTokenWithoutResolvingProxyToken is used to parse the ?token query param or the X-Consul-Token header
// or Authorization Bearer header token (RFC6750) and
func (s *HTTPServer) parseTokenWithoutResolvingProxyToken(req *http.Request, token *string) {
s.parseTokenInternal(req, token, false)
s.parseTokenInternal(req, token)
}
func sourceAddrFromRequest(req *http.Request) string {
@ -972,9 +959,9 @@ func (s *HTTPServer) parseMetaFilter(req *http.Request) map[string]string {
// parseInternal is a convenience method for endpoints that need
// to use both parseWait and parseDC.
func (s *HTTPServer) parseInternal(resp http.ResponseWriter, req *http.Request, dc *string, b *structs.QueryOptions, resolveProxyToken bool) bool {
func (s *HTTPServer) parseInternal(resp http.ResponseWriter, req *http.Request, dc *string, b *structs.QueryOptions) bool {
s.parseDC(req, dc)
s.parseTokenInternal(req, &b.Token, resolveProxyToken)
s.parseTokenInternal(req, &b.Token)
s.parseFilter(req, &b.Filter)
if s.parseConsistency(resp, req, b) {
return true
@ -988,12 +975,7 @@ func (s *HTTPServer) parseInternal(resp http.ResponseWriter, req *http.Request,
// parse is a convenience method for endpoints that need
// to use both parseWait and parseDC.
func (s *HTTPServer) parse(resp http.ResponseWriter, req *http.Request, dc *string, b *structs.QueryOptions) bool {
return s.parseInternal(resp, req, dc, b, true)
}
// parseWithoutResolvingProxyToken is a convenience method similar to parse except that it disables resolving proxy tokens
func (s *HTTPServer) parseWithoutResolvingProxyToken(resp http.ResponseWriter, req *http.Request, dc *string, b *structs.QueryOptions) bool {
return s.parseInternal(resp, req, dc, b, false)
return s.parseInternal(resp, req, dc, b)
}
func (s *HTTPServer) checkWriteAccess(req *http.Request) error {

View File

@ -57,7 +57,6 @@ func init() {
registerEndpoint("/v1/agent/connect/authorize", []string{"POST"}, (*HTTPServer).AgentConnectAuthorize)
registerEndpoint("/v1/agent/connect/ca/roots", []string{"GET"}, (*HTTPServer).AgentConnectCARoots)
registerEndpoint("/v1/agent/connect/ca/leaf/", []string{"GET"}, (*HTTPServer).AgentConnectCALeafCert)
registerEndpoint("/v1/agent/connect/proxy/", []string{"GET"}, (*HTTPServer).AgentConnectProxyConfig)
registerEndpoint("/v1/agent/service/register", []string{"PUT"}, (*HTTPServer).AgentRegisterService)
registerEndpoint("/v1/agent/service/deregister/", []string{"PUT"}, (*HTTPServer).AgentDeregisterService)
registerEndpoint("/v1/agent/service/maintenance/", []string{"PUT"}, (*HTTPServer).AgentServiceMaintenance)

View File

@ -1128,97 +1128,6 @@ func TestEnableWebUI(t *testing.T) {
}
}
func TestParseToken_ProxyTokenResolve(t *testing.T) {
t.Parallel()
type endpointCheck struct {
endpoint string
handler func(s *HTTPServer, resp http.ResponseWriter, req *http.Request) (interface{}, error)
}
// This is not an exhaustive list of all of our endpoints and is only testing GET endpoints
// right now. However it provides decent coverage that the proxy token resolution
// is happening properly
tests := []endpointCheck{
{"/v1/acl/info/root", (*HTTPServer).ACLGet},
{"/v1/agent/self", (*HTTPServer).AgentSelf},
{"/v1/agent/metrics", (*HTTPServer).AgentMetrics},
{"/v1/agent/services", (*HTTPServer).AgentServices},
{"/v1/agent/checks", (*HTTPServer).AgentChecks},
{"/v1/agent/members", (*HTTPServer).AgentMembers},
{"/v1/agent/connect/ca/roots", (*HTTPServer).AgentConnectCARoots},
{"/v1/agent/connect/ca/leaf/test", (*HTTPServer).AgentConnectCALeafCert},
{"/v1/agent/connect/ca/proxy/test", (*HTTPServer).AgentConnectProxyConfig},
{"/v1/catalog/connect", (*HTTPServer).CatalogConnectServiceNodes},
{"/v1/catalog/datacenters", (*HTTPServer).CatalogDatacenters},
{"/v1/catalog/nodes", (*HTTPServer).CatalogNodes},
{"/v1/catalog/node/" + t.Name(), (*HTTPServer).CatalogNodeServices},
{"/v1/catalog/services", (*HTTPServer).CatalogServices},
{"/v1/catalog/service/test", (*HTTPServer).CatalogServiceNodes},
{"/v1/connect/ca/configuration", (*HTTPServer).ConnectCAConfiguration},
{"/v1/connect/ca/roots", (*HTTPServer).ConnectCARoots},
{"/v1/connect/intentions", (*HTTPServer).IntentionEndpoint},
{"/v1/coordinate/datacenters", (*HTTPServer).CoordinateDatacenters},
{"/v1/coordinate/nodes", (*HTTPServer).CoordinateNodes},
{"/v1/coordinate/node/" + t.Name(), (*HTTPServer).CoordinateNode},
{"/v1/event/list", (*HTTPServer).EventList},
{"/v1/health/node/" + t.Name(), (*HTTPServer).HealthNodeChecks},
{"/v1/health/checks/test", (*HTTPServer).HealthNodeChecks},
{"/v1/health/state/passing", (*HTTPServer).HealthChecksInState},
{"/v1/health/service/test", (*HTTPServer).HealthServiceNodes},
{"/v1/health/connect/test", (*HTTPServer).HealthConnectServiceNodes},
{"/v1/operator/raft/configuration", (*HTTPServer).OperatorRaftConfiguration},
// keyring endpoint has issues with returning errors if you haven't enabled encryption
// {"/v1/operator/keyring", (*HTTPServer).OperatorKeyringEndpoint},
{"/v1/operator/autopilot/configuration", (*HTTPServer).OperatorAutopilotConfiguration},
{"/v1/operator/autopilot/health", (*HTTPServer).OperatorServerHealth},
{"/v1/query", (*HTTPServer).PreparedQueryGeneral},
{"/v1/session/list", (*HTTPServer).SessionList},
{"/v1/status/leader", (*HTTPServer).StatusLeader},
{"/v1/status/peers", (*HTTPServer).StatusPeers},
}
a := NewTestAgent(t, t.Name(), TestACLConfig()+testAllowProxyConfig())
defer a.Shutdown()
// Register a service with a managed proxy
{
reg := &structs.ServiceDefinition{
ID: "test-id",
Name: "test",
Address: "127.0.0.1",
Port: 8000,
Check: structs.CheckType{
TTL: 15 * time.Second,
},
Connect: &structs.ServiceConnect{
Proxy: &structs.ServiceDefinitionConnectProxy{},
},
}
req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=root", jsonReader(reg))
resp := httptest.NewRecorder()
_, err := a.srv.AgentRegisterService(resp, req)
require.NoError(t, err)
require.Equal(t, 200, resp.Code, "body: %s", resp.Body.String())
}
// Get the proxy token from the agent directly, since there is no API.
proxy := a.State.Proxy("test-id-proxy")
require.NotNil(t, proxy)
token := proxy.ProxyToken
require.NotEmpty(t, token)
for _, check := range tests {
t.Run(fmt.Sprintf("GET(%s)", check.endpoint), func(t *testing.T) {
req, _ := http.NewRequest("GET", fmt.Sprintf("%s?token=%s", check.endpoint, token), nil)
resp := httptest.NewRecorder()
_, err := check.handler(a.srv, resp, req)
require.NoError(t, err)
})
}
}
func TestAllowedNets(t *testing.T) {
type testVal struct {
nets []string

View File

@ -3,7 +3,6 @@ package local
import (
"fmt"
"log"
"math/rand"
"reflect"
"strconv"
"strings"
@ -19,7 +18,6 @@ import (
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/lib"
"github.com/hashicorp/consul/types"
uuid "github.com/hashicorp/go-uuid"
)
const fullSyncReadMaxStale = 2 * time.Second
@ -33,8 +31,6 @@ type Config struct {
NodeID types.NodeID
NodeName string
TaggedAddresses map[string]string
ProxyBindMinPort int
ProxyBindMaxPort int
}
// ServiceState describes the state of a service record.
@ -127,32 +123,6 @@ type rpc interface {
RPC(method string, args interface{}, reply interface{}) error
}
// ManagedProxy represents the local state for a registered proxy instance.
type ManagedProxy struct {
Proxy *structs.ConnectManagedProxy
// ProxyToken is a special local-only security token that grants the bearer
// access to the proxy's config as well as allowing it to request certificates
// on behalf of the target service. Certain connect endpoints will validate
// against this token and if it matches will then use the target service's
// registration token to actually authenticate the upstream RPC on behalf of
// the service. This token is passed securely to the proxy process via ENV
// vars and should never be exposed any other way. Unmanaged proxies will
// never see this and need to use service-scoped ACL tokens distributed
// externally. It is persisted in the local state to allow authenticating
// running proxies after the agent restarts.
//
// TODO(banks): In theory we only need to persist this at all to _validate_
// which means we could keep only a hash in memory and on disk and only pass
// the actual token to the process on startup. That would require a bit of
// refactoring though to have the required interaction with the proxy manager.
ProxyToken string
// WatchCh is a close-only chan that is closed when the proxy is removed or
// updated.
WatchCh chan struct{}
}
// State is used to represent the node's services,
// and checks. We use it to perform anti-entropy with the
// catalog representation
@ -201,42 +171,22 @@ type State struct {
// notifyHandlers is a map of registered channel listeners that are sent
// messages whenever state changes occur. For now these events only include
// service registration and deregistration since that is all that is needed
// but the same mechanism could be used for other state changes.
//
// Note that we haven't refactored managedProxyHandlers into this mechanism
// yet because that is soon to be deprecated and removed so it's easier to
// just leave them separate until managed proxies are removed entirely. Any
// future notifications should re-use this mechanism though.
// but the same mechanism could be used for other state changes. Any
// future notifications should re-use this mechanism.
notifyHandlers map[chan<- struct{}]struct{}
// managedProxies is a map of all managed connect proxies registered locally on
// this agent. This is NOT kept in sync with servers since it's agent-local
// config only. Proxy instances have separate service registrations in the
// services map above which are kept in sync via anti-entropy. Un-managed
// proxies (that registered themselves separately from the service
// registration) do not appear here as the agent doesn't need to manage their
// process nor config. The _do_ still exist in services above though as
// services with Kind == connect-proxy.
//
// managedProxyHandlers is a map of registered channel listeners that
// are sent a message each time a proxy changes via Add or RemoveProxy.
managedProxies map[string]*ManagedProxy
managedProxyHandlers map[chan<- struct{}]struct{}
}
// NewState creates a new local state for the agent.
func NewState(c Config, lg *log.Logger, tokens *token.Store) *State {
l := &State{
config: c,
logger: lg,
services: make(map[string]*ServiceState),
checks: make(map[types.CheckID]*CheckState),
checkAliases: make(map[string]map[types.CheckID]chan<- struct{}),
metadata: make(map[string]string),
tokens: tokens,
notifyHandlers: make(map[chan<- struct{}]struct{}),
managedProxies: make(map[string]*ManagedProxy),
managedProxyHandlers: make(map[chan<- struct{}]struct{}),
config: c,
logger: lg,
services: make(map[string]*ServiceState),
checks: make(map[types.CheckID]*CheckState),
checkAliases: make(map[string]map[types.CheckID]chan<- struct{}),
metadata: make(map[string]string),
tokens: tokens,
notifyHandlers: make(map[chan<- struct{}]struct{}),
}
l.SetDiscardCheckOutput(c.DiscardCheckOutput)
return l
@ -741,188 +691,6 @@ func (l *State) CriticalCheckStates() map[types.CheckID]*CheckState {
return m
}
// AddProxy is used to add a connect proxy entry to the local state. This
// assumes the proxy's NodeService is already registered via Agent.AddService
// (since that has to do other book keeping). The token passed here is the ACL
// token the service used to register itself so must have write on service
// record. AddProxy returns the newly added proxy and an error.
//
// The restoredProxyToken argument should only be used when restoring proxy
// definitions from disk; new proxies must leave it blank to get a new token
// assigned. We need to restore from disk to enable to continue authenticating
// running proxies that already had that credential injected.
func (l *State) AddProxy(proxy *structs.ConnectManagedProxy, token,
restoredProxyToken string) (*ManagedProxy, error) {
if proxy == nil {
return nil, fmt.Errorf("no proxy")
}
// Lookup the local service
target := l.Service(proxy.TargetServiceID)
if target == nil {
return nil, fmt.Errorf("target service ID %s not registered",
proxy.TargetServiceID)
}
// Get bind info from config
cfg, err := proxy.ParseConfig()
if err != nil {
return nil, err
}
// Construct almost all of the NodeService that needs to be registered by the
// caller outside of the lock.
svc := &structs.NodeService{
Kind: structs.ServiceKindConnectProxy,
ID: target.ID + "-proxy",
Service: target.Service + "-proxy",
Proxy: structs.ConnectProxyConfig{
DestinationServiceName: target.Service,
LocalServiceAddress: cfg.LocalServiceAddress,
LocalServicePort: cfg.LocalServicePort,
},
Address: cfg.BindAddress,
Port: cfg.BindPort,
}
// Set default port now while the target is known
if svc.Proxy.LocalServicePort < 1 {
svc.Proxy.LocalServicePort = target.Port
}
// Lock now. We can't lock earlier as l.Service would deadlock and shouldn't
// anyway to minimize the critical section.
l.Lock()
defer l.Unlock()
pToken := restoredProxyToken
// Does this proxy instance already exist?
if existing, ok := l.managedProxies[svc.ID]; ok {
// Keep the existing proxy token so we don't have to restart proxy to
// re-inject token.
pToken = existing.ProxyToken
// If the user didn't explicitly change the port, use the old one instead of
// assigning new.
if svc.Port < 1 {
svc.Port = existing.Proxy.ProxyService.Port
}
} else if proxyService, ok := l.services[svc.ID]; ok {
// The proxy-service already exists so keep the port that got assigned. This
// happens on reload from disk since service definitions are reloaded first.
svc.Port = proxyService.Service.Port
}
// If this is a new instance, generate a token
if pToken == "" {
pToken, err = uuid.GenerateUUID()
if err != nil {
return nil, err
}
}
// Allocate port if needed (min and max inclusive).
rangeLen := l.config.ProxyBindMaxPort - l.config.ProxyBindMinPort + 1
if svc.Port < 1 && l.config.ProxyBindMinPort > 0 && rangeLen > 0 {
// This should be a really short list so don't bother optimizing lookup yet.
OUTER:
for _, offset := range rand.Perm(rangeLen) {
p := l.config.ProxyBindMinPort + offset
// See if this port was already allocated to another proxy
for _, other := range l.managedProxies {
if other.Proxy.ProxyService.Port == p {
// already taken, skip to next random pick in the range
continue OUTER
}
}
// We made it through all existing proxies without a match so claim this one
svc.Port = p
break
}
}
// If no ports left (or auto ports disabled) fail
if svc.Port < 1 {
return nil, fmt.Errorf("no port provided for proxy bind_port and none "+
" left in the allocated range [%d, %d]", l.config.ProxyBindMinPort,
l.config.ProxyBindMaxPort)
}
proxy.ProxyService = svc
// All set, add the proxy and return the service
if old, ok := l.managedProxies[svc.ID]; ok {
// Notify watchers of the existing proxy config that it's changing. Note
// this is safe here even before the map is updated since we still hold the
// state lock and the watcher can't re-read the new config until we return
// anyway.
close(old.WatchCh)
}
l.managedProxies[svc.ID] = &ManagedProxy{
Proxy: proxy,
ProxyToken: pToken,
WatchCh: make(chan struct{}),
}
// Notify
for ch := range l.managedProxyHandlers {
// Do not block
select {
case ch <- struct{}{}:
default:
}
}
// No need to trigger sync as proxy state is local only.
return l.managedProxies[svc.ID], nil
}
// RemoveProxy is used to remove a proxy entry from the local state.
// This returns the proxy that was removed.
func (l *State) RemoveProxy(id string) (*ManagedProxy, error) {
l.Lock()
defer l.Unlock()
p := l.managedProxies[id]
if p == nil {
return nil, fmt.Errorf("Proxy %s does not exist", id)
}
delete(l.managedProxies, id)
// Notify watchers of the existing proxy config that it's changed.
close(p.WatchCh)
// Notify
for ch := range l.managedProxyHandlers {
// Do not block
select {
case ch <- struct{}{}:
default:
}
}
// No need to trigger sync as proxy state is local only.
return p, nil
}
// Proxy returns the local proxy state.
func (l *State) Proxy(id string) *ManagedProxy {
l.RLock()
defer l.RUnlock()
return l.managedProxies[id]
}
// Proxies returns the locally registered proxies.
func (l *State) Proxies() map[string]*ManagedProxy {
l.RLock()
defer l.RUnlock()
m := make(map[string]*ManagedProxy)
for id, p := range l.managedProxies {
m[id] = p
}
return m
}
// broadcastUpdateLocked assumes l is locked and delivers an update to all
// registered watchers.
func (l *State) broadcastUpdateLocked() {
@ -958,31 +726,6 @@ func (l *State) StopNotify(ch chan<- struct{}) {
delete(l.notifyHandlers, ch)
}
// NotifyProxy will register a channel to receive messages when the
// configuration or set of proxies changes. This will not block on
// channel send so ensure the channel has a buffer. Note that any buffer
// size is generally fine since actual data is not sent over the channel,
// so a dropped send due to a full buffer does not result in any loss of
// data. The fact that a buffer already contains a notification means that
// the receiver will still be notified that changes occurred.
//
// NOTE(mitchellh): This could be more generalized but for my use case I
// only needed proxy events. In the future if it were to be generalized I
// would add a new Notify method and remove the proxy-specific ones.
func (l *State) NotifyProxy(ch chan<- struct{}) {
l.Lock()
defer l.Unlock()
l.managedProxyHandlers[ch] = struct{}{}
}
// StopNotifyProxy will deregister a channel receiving proxy notifications.
// Pair this with all calls to NotifyProxy to clean up state.
func (l *State) StopNotifyProxy(ch chan<- struct{}) {
l.Lock()
defer l.Unlock()
delete(l.managedProxyHandlers, ch)
}
// Metadata returns the local node metadata fields that the
// agent is aware of and are being kept in sync with the server
func (l *State) Metadata() map[string]string {

View File

@ -11,8 +11,6 @@ import (
"github.com/hashicorp/consul/testrpc"
"github.com/hashicorp/go-memdb"
"github.com/hashicorp/consul/agent"
"github.com/hashicorp/consul/agent/config"
"github.com/hashicorp/consul/agent/local"
@ -1959,255 +1957,6 @@ func TestState_Notify(t *testing.T) {
drainCh(notifyCh)
}
func TestStateProxyManagement(t *testing.T) {
t.Parallel()
state := local.NewState(local.Config{
ProxyBindMinPort: 20000,
ProxyBindMaxPort: 20001,
}, log.New(os.Stderr, "", log.LstdFlags), &token.Store{})
// Stub state syncing
state.TriggerSyncChanges = func() {}
p1 := structs.ConnectManagedProxy{
ExecMode: structs.ProxyExecModeDaemon,
Command: []string{"consul", "connect", "proxy"},
TargetServiceID: "web",
}
require := require.New(t)
assert := assert.New(t)
_, err := state.AddProxy(&p1, "fake-token", "")
require.Error(err, "should fail as the target service isn't registered")
// Sanity check done, lets add a couple of target services to the state
err = state.AddService(&structs.NodeService{
Service: "web",
}, "fake-token-web")
require.NoError(err)
err = state.AddService(&structs.NodeService{
Service: "cache",
}, "fake-token-cache")
require.NoError(err)
require.NoError(err)
err = state.AddService(&structs.NodeService{
Service: "db",
}, "fake-token-db")
require.NoError(err)
// Should work now
pstate, err := state.AddProxy(&p1, "fake-token", "")
require.NoError(err)
svc := pstate.Proxy.ProxyService
assert.Equal("web-proxy", svc.ID)
assert.Equal("web-proxy", svc.Service)
assert.Equal(structs.ServiceKindConnectProxy, svc.Kind)
assert.Equal("web", svc.Proxy.DestinationServiceName)
assert.Equal("", svc.Address, "should have empty address by default")
// Port is non-deterministic but could be either of 20000 or 20001
assert.Contains([]int{20000, 20001}, svc.Port)
{
// Re-registering same proxy again should not pick a random port but re-use
// the assigned one. It should also keep the same proxy token since we don't
// want to force restart for config change.
pstateDup, err := state.AddProxy(&p1, "fake-token", "")
require.NoError(err)
svcDup := pstateDup.Proxy.ProxyService
assert.Equal("web-proxy", svcDup.ID)
assert.Equal("web-proxy", svcDup.Service)
assert.Equal(structs.ServiceKindConnectProxy, svcDup.Kind)
assert.Equal("web", svcDup.Proxy.DestinationServiceName)
assert.Equal("", svcDup.Address, "should have empty address by default")
// Port must be same as before
assert.Equal(svc.Port, svcDup.Port)
// Same ProxyToken
assert.Equal(pstate.ProxyToken, pstateDup.ProxyToken)
}
// Let's register a notifier now
notifyCh := make(chan struct{}, 1)
state.NotifyProxy(notifyCh)
defer state.StopNotifyProxy(notifyCh)
assert.Empty(notifyCh)
drainCh(notifyCh)
// Second proxy should claim other port
p2 := p1
p2.TargetServiceID = "cache"
pstate2, err := state.AddProxy(&p2, "fake-token", "")
require.NoError(err)
svc2 := pstate2.Proxy.ProxyService
assert.Contains([]int{20000, 20001}, svc2.Port)
assert.NotEqual(svc.Port, svc2.Port)
// Should have a notification
assert.NotEmpty(notifyCh)
drainCh(notifyCh)
// Store this for later
p2token := state.Proxy(svc2.ID).ProxyToken
// Third proxy should fail as all ports are used
p3 := p1
p3.TargetServiceID = "db"
_, err = state.AddProxy(&p3, "fake-token", "")
require.Error(err)
// Should have a notification but we'll do nothing so that the next
// receive should block (we set cap == 1 above)
// But if we set a port explicitly it should be OK
p3.Config = map[string]interface{}{
"bind_port": 1234,
"bind_address": "0.0.0.0",
}
pstate3, err := state.AddProxy(&p3, "fake-token", "")
require.NoError(err)
svc3 := pstate3.Proxy.ProxyService
require.Equal("0.0.0.0", svc3.Address)
require.Equal(1234, svc3.Port)
// Should have a notification
assert.NotEmpty(notifyCh)
drainCh(notifyCh)
// Update config of an already registered proxy should work
p3updated := p3
p3updated.Config["foo"] = "bar"
// Setup multiple watchers who should all witness the change
gotP3 := state.Proxy(svc3.ID)
require.NotNil(gotP3)
var ws memdb.WatchSet
ws.Add(gotP3.WatchCh)
pstate3, err = state.AddProxy(&p3updated, "fake-token", "")
require.NoError(err)
svc3 = pstate3.Proxy.ProxyService
require.Equal("0.0.0.0", svc3.Address)
require.Equal(1234, svc3.Port)
gotProxy3 := state.Proxy(svc3.ID)
require.NotNil(gotProxy3)
require.Equal(p3updated.Config, gotProxy3.Proxy.Config)
assert.False(ws.Watch(time.After(500*time.Millisecond)),
"watch should have fired so ws.Watch should not timeout")
drainCh(notifyCh)
// Remove one of the auto-assigned proxies
_, err = state.RemoveProxy(svc2.ID)
require.NoError(err)
// Should have a notification
assert.NotEmpty(notifyCh)
drainCh(notifyCh)
// Should be able to create a new proxy for that service with the port (it
// should have been "freed").
p4 := p2
pstate4, err := state.AddProxy(&p4, "fake-token", "")
require.NoError(err)
svc4 := pstate4.Proxy.ProxyService
assert.Contains([]int{20000, 20001}, svc2.Port)
assert.Equal(svc4.Port, svc2.Port, "should get the same port back that we freed")
// Remove a proxy that doesn't exist should error
_, err = state.RemoveProxy("nope")
require.Error(err)
assert.Equal(&p4, state.Proxy(p4.ProxyService.ID).Proxy,
"should fetch the right proxy details")
assert.Nil(state.Proxy("nope"))
proxies := state.Proxies()
assert.Len(proxies, 3)
assert.Equal(&p1, proxies[svc.ID].Proxy)
assert.Equal(&p4, proxies[svc4.ID].Proxy)
assert.Equal(&p3, proxies[svc3.ID].Proxy)
tokens := make([]string, 4)
tokens[0] = state.Proxy(svc.ID).ProxyToken
// p2 not registered anymore but lets make sure p4 got a new token when it
// re-registered with same ID.
tokens[1] = p2token
tokens[2] = state.Proxy(svc2.ID).ProxyToken
tokens[3] = state.Proxy(svc3.ID).ProxyToken
// Quick check all are distinct
for i := 0; i < len(tokens)-1; i++ {
assert.Len(tokens[i], 36) // Sanity check for UUIDish thing.
for j := i + 1; j < len(tokens); j++ {
assert.NotEqual(tokens[i], tokens[j], "tokens for proxy %d and %d match",
i+1, j+1)
}
}
}
// Tests the logic for retaining tokens and ports through restore (i.e.
// proxy-service already restored and token passed in externally)
func TestStateProxyRestore(t *testing.T) {
t.Parallel()
state := local.NewState(local.Config{
// Wide random range to make it very unlikely to pass by chance
ProxyBindMinPort: 10000,
ProxyBindMaxPort: 20000,
}, log.New(os.Stderr, "", log.LstdFlags), &token.Store{})
// Stub state syncing
state.TriggerSyncChanges = func() {}
webSvc := structs.NodeService{
Service: "web",
}
p1 := structs.ConnectManagedProxy{
ExecMode: structs.ProxyExecModeDaemon,
Command: []string{"consul", "connect", "proxy"},
TargetServiceID: "web",
}
p2 := p1
require := require.New(t)
assert := assert.New(t)
// Add a target service
require.NoError(state.AddService(&webSvc, "fake-token-web"))
// Add the proxy for first time to get the proper service definition to
// register
pstate, err := state.AddProxy(&p1, "fake-token", "")
require.NoError(err)
// Now start again with a brand new state
state2 := local.NewState(local.Config{
// Wide random range to make it very unlikely to pass by chance
ProxyBindMinPort: 10000,
ProxyBindMaxPort: 20000,
}, log.New(os.Stderr, "", log.LstdFlags), &token.Store{})
// Stub state syncing
state2.TriggerSyncChanges = func() {}
// Register the target service
require.NoError(state2.AddService(&webSvc, "fake-token-web"))
// "Restore" the proxy service
require.NoError(state.AddService(p1.ProxyService, "fake-token-web"))
// Now we can AddProxy with the "restored" token
pstate2, err := state.AddProxy(&p2, "fake-token", pstate.ProxyToken)
require.NoError(err)
// Check it still has the same port and token as before
assert.Equal(pstate.ProxyToken, pstate2.ProxyToken)
assert.Equal(p1.ProxyService.Port, p2.ProxyService.Port)
}
// Test that alias check is updated after AddCheck, UpdateCheck, and RemoveCheck for the same service id
func TestAliasNotifications_local(t *testing.T) {
t.Parallel()

View File

@ -10,10 +10,7 @@ import (
// TestState returns a configured *State for testing.
func TestState(t testing.T) *State {
result := NewState(Config{
ProxyBindMinPort: 20000,
ProxyBindMaxPort: 20500,
}, log.New(os.Stderr, "", log.LstdFlags), &token.Store{})
result := NewState(Config{}, log.New(os.Stderr, "", log.LstdFlags), &token.Store{})
result.TriggerSyncChanges = func() {}
return result
}

View File

@ -139,9 +139,8 @@ func (m *Manager) syncState() {
// default to the port of the sidecar service, but only if it's already
// registered and once we get past here, we don't have enough context to
// know that so we'd need to set it here if not during registration of the
// proxy service. Sidecar Service and managed proxies in the interim can
// do that, but we should validate more generally that that is always
// true.
// proxy service. Sidecar Service in the interim can do that, but we should
// validate more generally that that is always true.
err := m.ensureProxyServiceLocked(svc, m.State.ServiceToken(svcID))
if err != nil {
m.Logger.Printf("[ERR] failed to watch proxy service %s: %s", svc.ID,

View File

@ -1,469 +0,0 @@
package proxyprocess
import (
"fmt"
"log"
"os"
"os/exec"
"reflect"
"strconv"
"sync"
"time"
"github.com/hashicorp/consul/lib/file"
"github.com/mitchellh/mapstructure"
)
// Constants related to restart timers with the daemon mode proxies. At some
// point we will probably want to expose these knobs to an end user, but
// reasonable defaults are chosen.
const (
DaemonRestartHealthy = 10 * time.Second // time before considering healthy
DaemonRestartBackoffMin = 3 // 3 attempts before backing off
DaemonRestartMaxWait = 1 * time.Minute // maximum backoff wait time
)
// Daemon is a long-running proxy process. It is expected to keep running
// and to use blocking queries to detect changes in configuration, certs,
// and more.
//
// Consul will ensure that if the daemon crashes, that it is restarted.
type Daemon struct {
// Command is the command to execute to start this daemon. This must
// be a Cmd that isn't yet started.
Command *exec.Cmd
// ProxyID is the ID of the proxy service. This is required for API
// requests (along with the token) and is passed via env var.
ProxyID string
// ProxyToken is the special local-only ACL token that allows a proxy
// to communicate to the Connect-specific endpoints.
ProxyToken string
// Logger is where logs will be sent around the management of this
// daemon. The actual logs for the daemon itself will be sent to
// a file.
Logger *log.Logger
// PidPath is the path where a pid file will be created storing the
// pid of the active process. If this is empty then a pid-file won't
// be created. Under erroneous conditions, the pid file may not be
// created but the error will be logged to the Logger.
PidPath string
// For tests, they can set this to change the default duration to wait
// for a graceful quit.
gracefulWait time.Duration
// process is the started process
lock sync.Mutex
stopped bool
stopCh chan struct{}
exitedCh chan struct{}
process *os.Process
}
// Start starts the daemon and keeps it running.
//
// This function returns after the process is successfully started.
func (p *Daemon) Start() error {
p.lock.Lock()
defer p.lock.Unlock()
// A stopped proxy cannot be restarted
if p.stopped {
return fmt.Errorf("stopped")
}
// If we're already running, that is okay
if p.process != nil {
return nil
}
// Setup our stop channel
stopCh := make(chan struct{})
exitedCh := make(chan struct{})
p.stopCh = stopCh
p.exitedCh = exitedCh
// Start the loop.
go p.keepAlive(stopCh, exitedCh)
return nil
}
// keepAlive starts and keeps the configured process alive until it
// is stopped via Stop.
func (p *Daemon) keepAlive(stopCh <-chan struct{}, exitedCh chan<- struct{}) {
defer close(exitedCh)
p.lock.Lock()
process := p.process
p.lock.Unlock()
// attemptsDeadline is the time at which we consider the daemon to have
// been alive long enough that we can reset the attempt counter.
//
// attempts keeps track of the number of restart attempts we've had and
// is used to calculate the wait time using an exponential backoff.
var attemptsDeadline time.Time
var attempts uint32
// Assume the process is adopted, we reset this when we start a new process
// ourselves below and use it to decide on a strategy for waiting.
adopted := true
for {
if process == nil {
// If we're passed the attempt deadline then reset the attempts
if !attemptsDeadline.IsZero() && time.Now().After(attemptsDeadline) {
attempts = 0
}
// Set ourselves a deadline - we have to make it at least this long before
// we come around the loop to consider it to have been a "successful"
// daemon startup and rest the counter above. Note that if the daemon
// fails before this, we reset the deadline to zero below so that backoff
// sleeps in the loop don't count as "success" time.
attemptsDeadline = time.Now().Add(DaemonRestartHealthy)
attempts++
// Calculate the exponential backoff and wait if we have to
if attempts > DaemonRestartBackoffMin {
exponent := (attempts - DaemonRestartBackoffMin)
if exponent > 31 {
exponent = 31
}
waitTime := (1 << exponent) * time.Second
if waitTime > DaemonRestartMaxWait {
waitTime = DaemonRestartMaxWait
}
if waitTime > 0 {
// If we are waiting, reset the success deadline so we don't
// accidentally interpret backoff sleep as successful runtime.
attemptsDeadline = time.Time{}
p.Logger.Printf(
"[WARN] agent/proxy: waiting %s before restarting daemon",
waitTime)
timer := time.NewTimer(waitTime)
select {
case <-timer.C:
// Timer is up, good!
case <-stopCh:
// During our backoff wait, we've been signaled to
// quit, so just quit.
timer.Stop()
return
}
}
}
p.lock.Lock()
// If we gracefully stopped then don't restart.
if p.stopped {
p.lock.Unlock()
return
}
// Process isn't started currently. We're restarting. Start it
// and save the process if we have it.
var err error
process, err = p.start()
if err == nil {
p.process = process
adopted = false
}
p.lock.Unlock()
if err != nil {
p.Logger.Printf("[ERR] agent/proxy: error restarting daemon: %s", err)
continue
}
}
var ps *os.ProcessState
var err error
if adopted {
// assign to err outside scope
_, err = findProcess(process.Pid)
if err == nil {
// Process appears to be running still, wait a bit before we poll again.
// We want a busy loop, but not too busy. 1 second between detecting a
// process death seems reasonable.
//
// SUBTLETY: we must NOT select on stopCh here since the Stop function
// assumes that as soon as this method returns and closes exitedCh, that
// the process is no longer running. If we are polling then we don't
// know that is true until we've polled again so we have to keep polling
// until the process goes away even if we know the Daemon is stopping.
time.Sleep(1 * time.Second)
// Restart the loop, process is still set so we effectively jump back to
// the findProcess call above.
continue
}
} else {
// Wait for child to exit
ps, err = process.Wait()
}
// Process exited somehow.
process = nil
if err != nil {
p.Logger.Printf("[INFO] agent/proxy: daemon exited with error: %s", err)
} else if ps != nil && !ps.Exited() {
p.Logger.Printf("[INFO] agent/proxy: daemon left running")
} else if status, ok := exitStatus(ps); ok {
p.Logger.Printf("[INFO] agent/proxy: daemon exited with exit code: %d", status)
}
}
}
// start starts and returns the process. This will create a copy of the
// configured *exec.Command with the modifications documented on Daemon
// such as setting the proxy token environmental variable.
func (p *Daemon) start() (*os.Process, error) {
cmd := *p.Command
// Add the proxy token to the environment. We first copy the env because it is
// a slice and therefore the "copy" above will only copy the slice reference.
// We allocate an exactly sized slice.
//
// Note that anything we add to the Env here is NOT persisted in the snapshot
// which only looks at p.Command.Env so it needs to be reconstructible exactly
// from data in the snapshot otherwise.
cmd.Env = make([]string, len(p.Command.Env), len(p.Command.Env)+2)
copy(cmd.Env, p.Command.Env)
cmd.Env = append(cmd.Env,
fmt.Sprintf("%s=%s", EnvProxyID, p.ProxyID),
fmt.Sprintf("%s=%s", EnvProxyToken, p.ProxyToken))
// Update the Daemon env
// Args must always contain a 0 entry which is usually the executed binary.
// To be safe and a bit more robust we default this, but only to prevent
// a panic below.
if len(cmd.Args) == 0 {
cmd.Args = []string{cmd.Path}
}
// Perform system-specific setup. In particular, Unix-like systems
// shuld set sid so that killing the agent doesn't kill the daemon.
configureDaemon(&cmd)
// Start it
p.Logger.Printf("[DEBUG] agent/proxy: starting proxy: %q %#v", cmd.Path, cmd.Args[1:])
if err := cmd.Start(); err != nil {
return nil, err
}
// Write the pid file. This might error and that's okay.
if p.PidPath != "" {
pid := strconv.FormatInt(int64(cmd.Process.Pid), 10)
if err := file.WriteAtomic(p.PidPath, []byte(pid)); err != nil {
p.Logger.Printf(
"[DEBUG] agent/proxy: error writing pid file %q: %s",
p.PidPath, err)
}
}
return cmd.Process, nil
}
// Stop stops the daemon.
//
// This will attempt a graceful stop (SIGINT) before force killing the
// process (SIGKILL). In either case, the process won't be automatically
// restarted unless Start is called again.
//
// This is safe to call multiple times. If the daemon is already stopped,
// then this returns no error.
func (p *Daemon) Stop() error {
p.lock.Lock()
// If we're already stopped or never started, then no problem.
if p.stopped || p.process == nil {
// In the case we never even started, calling Stop makes it so
// that we can't ever start in the future, either, so mark this.
p.stopped = true
p.lock.Unlock()
return nil
}
// Note that we've stopped
p.stopped = true
close(p.stopCh)
process := p.process
p.lock.Unlock()
gracefulWait := p.gracefulWait
if gracefulWait == 0 {
gracefulWait = 5 * time.Second
}
// Defer removing the pid file. Even under error conditions we
// delete the pid file since Stop means that the manager is no
// longer managing this proxy and therefore nothing else will ever
// clean it up.
if p.PidPath != "" {
defer func() {
if err := os.Remove(p.PidPath); err != nil && !os.IsNotExist(err) {
p.Logger.Printf(
"[DEBUG] agent/proxy: error removing pid file %q: %s",
p.PidPath, err)
}
}()
}
// First, try a graceful stop
err := process.Signal(os.Interrupt)
if err == nil {
select {
case <-p.exitedCh:
// Success!
return nil
case <-time.After(gracefulWait):
// Interrupt didn't work
p.Logger.Printf("[DEBUG] agent/proxy: graceful wait of %s passed, "+
"killing", gracefulWait)
}
} else if isProcessAlreadyFinishedErr(err) {
// This can happen due to races between signals and polling.
return nil
} else {
p.Logger.Printf("[DEBUG] agent/proxy: sigint failed, killing: %s", err)
}
// Graceful didn't work (e.g. on windows where SIGINT isn't implemented),
// forcibly kill
err = process.Kill()
if err != nil && isProcessAlreadyFinishedErr(err) {
return nil
}
return err
}
// Close implements Proxy by stopping the run loop but not killing the process.
// One Close is called, Stop has no effect.
func (p *Daemon) Close() error {
p.lock.Lock()
defer p.lock.Unlock()
// If we're already stopped or never started, then no problem.
if p.stopped || p.process == nil {
p.stopped = true
return nil
}
// Note that we've stopped
p.stopped = true
close(p.stopCh)
return nil
}
// Equal implements Proxy to check for equality.
func (p *Daemon) Equal(raw Proxy) bool {
p2, ok := raw.(*Daemon)
if !ok {
return false
}
// We compare equality on a subset of the command configuration
return p.ProxyToken == p2.ProxyToken &&
p.ProxyID == p2.ProxyID &&
p.Command.Path == p2.Command.Path &&
p.Command.Dir == p2.Command.Dir &&
reflect.DeepEqual(p.Command.Args, p2.Command.Args) &&
reflect.DeepEqual(p.Command.Env, p2.Command.Env)
}
// MarshalSnapshot implements Proxy
func (p *Daemon) MarshalSnapshot() map[string]interface{} {
p.lock.Lock()
defer p.lock.Unlock()
// If we're stopped or have no process, then nothing to snapshot.
if p.stopped || p.process == nil {
return nil
}
return map[string]interface{}{
"Pid": p.process.Pid,
"CommandPath": p.Command.Path,
"CommandArgs": p.Command.Args,
"CommandDir": p.Command.Dir,
"CommandEnv": p.Command.Env,
"ProxyToken": p.ProxyToken,
"ProxyID": p.ProxyID,
}
}
// UnmarshalSnapshot implements Proxy
func (p *Daemon) UnmarshalSnapshot(m map[string]interface{}) error {
var s daemonSnapshot
if err := mapstructure.Decode(m, &s); err != nil {
return err
}
p.lock.Lock()
defer p.lock.Unlock()
// Set the basic fields
p.ProxyToken = s.ProxyToken
p.ProxyID = s.ProxyID
p.Command = &exec.Cmd{
Path: s.CommandPath,
Args: s.CommandArgs,
Dir: s.CommandDir,
Env: s.CommandEnv,
}
// FindProcess on many systems returns no error even if the process
// is now dead. We perform an extra check that the process is alive.
proc, err := findProcess(s.Pid)
if err != nil {
return err
}
// "Start it"
stopCh := make(chan struct{})
exitedCh := make(chan struct{})
p.stopCh = stopCh
p.exitedCh = exitedCh
p.process = proc
go p.keepAlive(stopCh, exitedCh)
return nil
}
// daemonSnapshot is the structure of the marshaled data for snapshotting.
//
// Note we don't have to store the ProxyId because this is stored directly
// within the manager snapshot and is restored automatically.
type daemonSnapshot struct {
// Pid of the process. This is the only value actually required to
// regain management control. The remainder values are for Equal.
Pid int
// Command information
CommandPath string
CommandArgs []string
CommandDir string
CommandEnv []string
// NOTE(mitchellh): longer term there are discussions/plans to only
// store the hash of the token but for now we need the full token in
// case the process dies and has to be restarted.
ProxyToken string
ProxyID string
}

View File

@ -1,750 +0,0 @@
package proxyprocess
import (
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"strconv"
"syscall"
"testing"
"time"
"github.com/hashicorp/consul/sdk/testutil/retry"
"github.com/hashicorp/go-uuid"
"github.com/stretchr/testify/require"
)
func TestDaemon_impl(t *testing.T) {
var _ Proxy = new(Daemon)
}
func TestDaemonStartStop(t *testing.T) {
t.Parallel()
require := require.New(t)
td, closer := testTempDir(t)
defer closer()
path := filepath.Join(td, "file")
uuid, err := uuid.GenerateUUID()
require.NoError(err)
cmd, destroy := helperProcess("start-stop", path)
defer destroy()
d := &Daemon{
Command: cmd,
ProxyID: "tubes",
ProxyToken: uuid,
Logger: testLogger,
}
require.NoError(d.Start())
defer d.Stop()
// Wait for the file to exist
retry.Run(t, func(r *retry.R) {
_, err := os.Stat(path)
if err == nil {
return
}
r.Fatalf("error: %s", err)
})
// Verify that the contents of the file is the token. This verifies
// that we properly passed the token as an env var.
data, err := ioutil.ReadFile(path)
require.NoError(err)
require.Equal("tubes:"+uuid, string(data))
// Stop the process
require.NoError(d.Stop())
// File should no longer exist.
retry.Run(t, func(r *retry.R) {
_, err := os.Stat(path)
if os.IsNotExist(err) {
return
}
// err might be nil here but that's okay
r.Fatalf("should not exist: %s", err)
})
}
func TestDaemonRestart(t *testing.T) {
t.Parallel()
require := require.New(t)
td, closer := testTempDir(t)
defer closer()
path := filepath.Join(td, "file")
cmd, destroy := helperProcess("restart", path)
defer destroy()
d := &Daemon{
Command: cmd,
Logger: testLogger,
}
require.NoError(d.Start())
defer d.Stop()
// Wait for the file to exist. We save the func so we can reuse the test.
waitFile := func() {
retry.Run(t, func(r *retry.R) {
_, err := os.Stat(path)
if err == nil {
return
}
r.Fatalf("error waiting for path: %s", err)
})
}
waitFile()
// Delete the file
require.NoError(os.Remove(path))
// File should re-appear because the process is restart
waitFile()
}
func TestDaemonLaunchesNewProcessGroup(t *testing.T) {
t.Parallel()
require := require.New(t)
td, closer := testTempDir(t)
defer closer()
path := filepath.Join(td, "file")
pidPath := filepath.Join(td, "child.pid")
// Start the parent process wrapping a start-stop test. The parent is acting
// as our "agent". We need an extra indirection to be able to kill the "agent"
// and still be running the test process.
parentCmd, destroy := helperProcess("parent", pidPath, "start-stop", path)
defer destroy()
// We MUST run this as a separate process group otherwise the Kill below will
// kill this test process (and possibly your shell/editor that launched it!)
parentCmd.SysProcAttr = &syscall.SysProcAttr{Setsid: true}
require.NoError(parentCmd.Start())
// Wait for the pid file to exist so we know parent is running
retry.Run(t, func(r *retry.R) {
_, err := os.Stat(pidPath)
if err == nil {
return
}
r.Fatalf("error: %s", err)
})
// And wait for the actual file to be sure the child is running (it should be
// since parent doesn't write PID until child starts but the child might not
// have completed the write to disk yet which causes flakiness below).
retry.Run(t, func(r *retry.R) {
_, err := os.Stat(path)
if err == nil {
return
}
r.Fatalf("error: %s", err)
})
// Get the child PID
bs, err := ioutil.ReadFile(pidPath)
require.NoError(err)
pid, err := strconv.Atoi(string(bs))
require.NoError(err)
proc, err := os.FindProcess(pid)
require.NoError(err)
// Always cleanup child process after
defer func() {
if proc != nil {
proc.Kill()
}
}()
// Now kill the parent's whole process group and wait for it
pgid, err := syscall.Getpgid(parentCmd.Process.Pid)
require.NoError(err)
// Yep the minus PGid is how you kill a whole process group in unix... no idea
// how this works on windows. We TERM no KILL since we rely on the child
// catching the signal and deleting it's file to detect correct behavior.
require.NoError(syscall.Kill(-pgid, syscall.SIGTERM))
_, err = parentCmd.Process.Wait()
require.NoError(err)
// The child should still be running so file should still be there
_, err = os.Stat(path)
require.NoError(err, "child should still be running")
// TEST PART 2 - verify that adopting an existing process works and picks up
// monitoring even though it's not a child. We can't do this accurately with
// Restart test since even if we create a new `Daemon` object the test process
// is still the parent. We need the indirection of the `parent` test helper to
// actually verify "adoption" on restart works.
// Start a new parent that will "adopt" the existing child even though it will
// not be an actual child process.
fosterCmd, destroy := helperProcess("parent", pidPath, "start-stop", path)
defer destroy()
// Don't care about it being same process group this time as we will just kill
// it normally.
require.NoError(fosterCmd.Start())
defer func() {
// Clean up the daemon and wait for it to prevent it becoming a zombie.
fosterCmd.Process.Kill()
fosterCmd.Wait()
}()
// The child should still be running so file should still be there
_, err = os.Stat(path)
require.NoError(err, "child should still be running")
{
// Get the child PID - it shouldn't have changed and should be running
bs2, err := ioutil.ReadFile(pidPath)
require.NoError(err)
pid2, err := strconv.Atoi(string(bs2))
require.NoError(err)
// Defer a cleanup (til end of test function)
proc, err := os.FindProcess(pid)
require.NoError(err)
defer func() { proc.Kill() }()
require.Equal(pid, pid2)
t.Logf("Child PID was %d and still %d", pid, pid2)
}
// Now killing the child directly should still be restarted by the Daemon
require.NoError(proc.Kill())
proc = nil
retry.Run(t, func(r *retry.R) {
// Get the child PID - it should have changed
bs, err := ioutil.ReadFile(pidPath)
r.Check(err)
newPid, err := strconv.Atoi(string(bs))
r.Check(err)
if newPid == pid {
r.Fatalf("Child PID file not changed, Daemon not restarting it")
}
t.Logf("Child PID was %d and is now %d", pid, newPid)
})
// I had to run through this test in debugger a lot of times checking ps state
// by hand at different points to convince myself it was doing the right
// thing. It doesn't help that with verbose logs on it seems that the stdio
// from the `parent` process can sometimes miss lines out due to timing. For
// example the `[INFO] agent/proxy: daemon exited...` log from Daemon that
// indicates that the child was detected to have failed and is restarting is
// never output on my Mac at full speed. But if I run in debugger and have it
// pause at the step after the child is killed above, then it shows. The
// `[DEBUG] agent/proxy: starting proxy:` for the restart does always come
// through though which is odd. I assume this is some odd quirk of timing
// between processes and stdio or something but it makes debugging this stuff
// even harder!
// Let defer clean up the child process(es)
// Get the NEW child PID
bs, err = ioutil.ReadFile(pidPath)
require.NoError(err)
pid, err = strconv.Atoi(string(bs))
require.NoError(err)
proc2, err := os.FindProcess(pid)
require.NoError(err)
// Always cleanup child process after
defer func() {
if proc2 != nil {
proc2.Kill()
}
}()
}
func TestDaemonStop_kill(t *testing.T) {
t.Parallel()
require := require.New(t)
td, closer := testTempDir(t)
defer closer()
path := filepath.Join(td, "file")
cmd, destroy := helperProcess("stop-kill", path)
defer destroy()
d := &Daemon{
Command: cmd,
ProxyToken: "hello",
Logger: testLogger,
gracefulWait: 200 * time.Millisecond,
}
require.NoError(d.Start())
// Wait for the file to exist
retry.Run(t, func(r *retry.R) {
_, err := os.Stat(path)
if err == nil {
return
}
r.Fatalf("error: %s", err)
})
// Stop the process
require.NoError(d.Stop())
// Stat the file so that we can get the mtime
fi, err := os.Stat(path)
require.NoError(err)
mtime := fi.ModTime()
// The mtime shouldn't change
time.Sleep(100 * time.Millisecond)
fi, err = os.Stat(path)
require.NoError(err)
require.Equal(mtime, fi.ModTime())
}
func TestDaemonStop_killAdopted(t *testing.T) {
t.Parallel()
require := require.New(t)
td, closer := testTempDir(t)
defer closer()
path := filepath.Join(td, "file")
// In this test we want to ensure that graceful/ungraceful stop works with
// processes that were adopted by current process but not started by it. (i.e.
// we have to poll them not use Wait).
//
// We could use `parent` indirection to get a child that is actually not
// started by this process but that's a lot of hoops to jump through on top of
// an already complex multi-process test case.
//
// For now we rely on an implementation detail of Daemon which is potentially
// brittle but beats lots of extra complexity here. Currently, if
// Daemon.process is non-nil, the keepAlive loop will explicitly assume it's
// not a child and so will use polling to monitor it. If we ever change that
// it might invalidate this test and we would either need more indirection
// here, or an alternative explicit signal on Daemon like Daemon.forcePoll to
// ensure we are exercising that code path.
// Start the "child" process
childCmd, destroy := helperProcess("stop-kill", path)
defer destroy()
require.NoError(childCmd.Start())
go func() { childCmd.Wait() }() // Prevent it becoming a zombie when killed
defer func() { childCmd.Process.Kill() }()
// Create the Daemon
cmd, destroy := helperProcess("stop-kill", path)
defer destroy()
d := &Daemon{
Command: cmd,
ProxyToken: "hello",
Logger: testLogger,
gracefulWait: 200 * time.Millisecond,
// Can't just set process as it will bypass intializing stopCh etc.
}
// Adopt the pid from a fake state snapshot (this correctly initializes Daemon
// for adoption)
fakeSnap := map[string]interface{}{
"Pid": childCmd.Process.Pid,
"CommandPath": childCmd.Path,
"CommandArgs": childCmd.Args,
"CommandDir": childCmd.Dir,
"CommandEnv": childCmd.Env,
"ProxyToken": d.ProxyToken,
}
require.NoError(d.UnmarshalSnapshot(fakeSnap))
require.NoError(d.Start())
// Wait for the file to exist (child was already running so this doesn't
// guarantee that Daemon is in "polling" state)
retry.Run(t, func(r *retry.R) {
_, err := os.Stat(path)
if err == nil {
return
}
r.Fatalf("error: %s", err)
})
// Stop the process
require.NoError(d.Stop())
// Stat the file so that we can get the mtime
fi, err := os.Stat(path)
require.NoError(err)
mtime := fi.ModTime()
// The mtime shouldn't change
time.Sleep(100 * time.Millisecond)
fi, err = os.Stat(path)
require.NoError(err)
require.Equal(mtime, fi.ModTime())
}
func TestDaemonStart_pidFile(t *testing.T) {
t.Parallel()
require := require.New(t)
td, closer := testTempDir(t)
defer closer()
path := filepath.Join(td, "file")
pidPath := filepath.Join(td, "pid")
uuid, err := uuid.GenerateUUID()
require.NoError(err)
cmd, destroy := helperProcess("start-once", path)
defer destroy()
d := &Daemon{
Command: cmd,
ProxyToken: uuid,
Logger: testLogger,
PidPath: pidPath,
}
require.NoError(d.Start())
defer d.Stop()
// Wait for the file to exist
retry.Run(t, func(r *retry.R) {
_, err := os.Stat(pidPath)
if err == nil {
return
}
r.Fatalf("error: %s", err)
})
// Check the pid file
pidRaw, err := ioutil.ReadFile(pidPath)
require.NoError(err)
require.NotEmpty(pidRaw)
// Stop
require.NoError(d.Stop())
// Pid file should be gone
_, err = os.Stat(pidPath)
require.True(os.IsNotExist(err))
}
// Verify the pid file changes on restart
func TestDaemonRestart_pidFile(t *testing.T) {
t.Parallel()
require := require.New(t)
td, closer := testTempDir(t)
defer closer()
path := filepath.Join(td, "file")
pidPath := filepath.Join(td, "pid")
cmd, destroy := helperProcess("restart", path)
defer destroy()
d := &Daemon{
Command: cmd,
Logger: testLogger,
PidPath: pidPath,
}
require.NoError(d.Start())
defer d.Stop()
// Wait for the file to exist. We save the func so we can reuse the test.
waitFile := func(path string) {
retry.Run(t, func(r *retry.R) {
_, err := os.Stat(path)
if err == nil {
return
}
r.Fatalf("error waiting for path: %s", err)
})
}
waitFile(path)
waitFile(pidPath)
// Check the pid file
pidRaw, err := ioutil.ReadFile(pidPath)
require.NoError(err)
require.NotEmpty(pidRaw)
// Delete the file
require.NoError(os.Remove(pidPath))
require.NoError(os.Remove(path))
// File should re-appear because the process is restart
waitFile(path)
waitFile(pidPath)
// Check the pid file and it should not equal
pidRaw2, err := ioutil.ReadFile(pidPath)
require.NoError(err)
require.NotEmpty(pidRaw2)
require.NotEqual(pidRaw, pidRaw2)
}
func TestDaemonEqual(t *testing.T) {
cases := []struct {
Name string
D1, D2 Proxy
Expected bool
}{
{
"Different type",
&Daemon{
Command: &exec.Cmd{},
},
&Noop{},
false,
},
{
"Nil",
&Daemon{
Command: &exec.Cmd{},
},
nil,
false,
},
{
"Equal",
&Daemon{
Command: &exec.Cmd{},
},
&Daemon{
Command: &exec.Cmd{},
},
true,
},
{
"Different proxy ID",
&Daemon{
Command: &exec.Cmd{Path: "/foo"},
ProxyID: "web",
},
&Daemon{
Command: &exec.Cmd{Path: "/foo"},
ProxyID: "db",
},
false,
},
{
"Different path",
&Daemon{
Command: &exec.Cmd{Path: "/foo"},
},
&Daemon{
Command: &exec.Cmd{Path: "/bar"},
},
false,
},
{
"Different dir",
&Daemon{
Command: &exec.Cmd{Dir: "/foo"},
},
&Daemon{
Command: &exec.Cmd{Dir: "/bar"},
},
false,
},
{
"Different args",
&Daemon{
Command: &exec.Cmd{Args: []string{"foo"}},
},
&Daemon{
Command: &exec.Cmd{Args: []string{"bar"}},
},
false,
},
{
"Different token",
&Daemon{
Command: &exec.Cmd{},
ProxyToken: "one",
},
&Daemon{
Command: &exec.Cmd{},
ProxyToken: "two",
},
false,
},
}
for _, tc := range cases {
t.Run(tc.Name, func(t *testing.T) {
actual := tc.D1.Equal(tc.D2)
require.Equal(t, tc.Expected, actual)
})
}
}
func TestDaemonMarshalSnapshot(t *testing.T) {
cases := []struct {
Name string
Proxy Proxy
Expected map[string]interface{}
}{
{
"stopped daemon",
&Daemon{
Command: &exec.Cmd{Path: "/foo"},
},
nil,
},
{
"basic",
&Daemon{
Command: &exec.Cmd{Path: "/foo"},
ProxyID: "web",
process: &os.Process{Pid: 42},
},
map[string]interface{}{
"Pid": 42,
"CommandPath": "/foo",
"CommandArgs": []string(nil),
"CommandDir": "",
"CommandEnv": []string(nil),
"ProxyToken": "",
"ProxyID": "web",
},
},
}
for _, tc := range cases {
t.Run(tc.Name, func(t *testing.T) {
actual := tc.Proxy.MarshalSnapshot()
require.Equal(t, tc.Expected, actual)
})
}
}
func TestDaemonUnmarshalSnapshot(t *testing.T) {
t.Parallel()
require := require.New(t)
td, closer := testTempDir(t)
defer closer()
path := filepath.Join(td, "file")
uuid, err := uuid.GenerateUUID()
require.NoError(err)
cmd, destroy := helperProcess("start-stop", path)
defer destroy()
d := &Daemon{
Command: cmd,
ProxyToken: uuid,
Logger: testLogger,
}
defer d.Stop()
require.NoError(d.Start())
// Wait for the file to exist
retry.Run(t, func(r *retry.R) {
_, err := os.Stat(path)
if err == nil {
return
}
r.Fatalf("error: %s", err)
})
// Snapshot
snap := d.MarshalSnapshot()
// Stop the original daemon but keep it alive
require.NoError(d.Close())
// Restore the second daemon
d2 := &Daemon{Logger: testLogger}
require.NoError(d2.UnmarshalSnapshot(snap))
// Verify the daemon is still running
_, err = os.Stat(path)
require.NoError(err)
// Stop the process
require.NoError(d2.Stop())
// File should no longer exist.
retry.Run(t, func(r *retry.R) {
_, err := os.Stat(path)
if os.IsNotExist(err) {
return
}
// err might be nil here but that's okay
r.Fatalf("should not exist: %s", err)
})
}
func TestDaemonUnmarshalSnapshot_notRunning(t *testing.T) {
t.Parallel()
require := require.New(t)
td, closer := testTempDir(t)
defer closer()
path := filepath.Join(td, "file")
uuid, err := uuid.GenerateUUID()
require.NoError(err)
cmd, destroy := helperProcess("start-stop", path)
defer destroy()
d := &Daemon{
Command: cmd,
ProxyToken: uuid,
Logger: testLogger,
}
defer d.Stop()
require.NoError(d.Start())
// Wait for the file to exist
retry.Run(t, func(r *retry.R) {
_, err := os.Stat(path)
if err == nil {
return
}
r.Fatalf("error: %s", err)
})
// Snapshot
snap := d.MarshalSnapshot()
// Stop the original daemon
require.NoError(d.Stop())
// Restore the second daemon
d2 := &Daemon{Logger: testLogger}
require.Error(d2.UnmarshalSnapshot(snap))
}

View File

@ -1,10 +0,0 @@
// +build !darwin,!linux,!windows
package proxyprocess
import "os"
// exitStatus for other platforms where we don't know how to extract it.
func exitStatus(ps *os.ProcessState) (int, bool) {
return 0, false
}

View File

@ -1,18 +0,0 @@
// +build darwin linux windows
package proxyprocess
import (
"os"
"syscall"
)
// exitStatus for platforms with syscall.WaitStatus which are listed
// at the top of this file in the build constraints.
func exitStatus(ps *os.ProcessState) (int, bool) {
if status, ok := ps.Sys().(syscall.WaitStatus); ok {
return status.ExitStatus(), true
}
return 0, false
}

View File

@ -1,519 +0,0 @@
package proxyprocess
import (
"fmt"
"log"
"os"
"os/exec"
"path/filepath"
"sync"
"time"
"github.com/hashicorp/consul/agent/local"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/go-multierror"
)
const (
// ManagerCoalescePeriod and ManagerQuiescentPeriod relate to how
// notifications in updates from the local state are colaesced to prevent
// lots of churn in the manager.
//
// When the local state updates, the manager will wait for quiescence.
// For each update, the quiscence timer is reset. If the coalesce period
// is reached, the manager will update proxies regardless of the frequent
// changes. Then the whole cycle resets.
ManagerCoalescePeriod = 5 * time.Second
ManagerQuiescentPeriod = 500 * time.Millisecond
// ManagerSnapshotPeriod is the interval that snapshots are taken.
// The last snapshot state is preserved and if it matches a file isn't
// written, so its safe for this to be reasonably frequent.
ManagerSnapshotPeriod = 1 * time.Second
)
// Manager starts, stops, snapshots, and restores managed proxies.
//
// The manager will not start or stop any processes until Start is called.
// Prior to this, any configuration, snapshot loading, etc. can be done.
// Even if a process is no longer running after loading the snapshot, it
// will not be restarted until Start is called.
//
// The Manager works by subscribing to change notifications on a local.State
// structure. Whenever a change is detected, the Manager syncs its internal
// state with the local.State and starts/stops any necessary proxies. The
// manager never holds a lock on local.State (except to read the proxies)
// and state updates may occur while the Manger is syncing. This is okay,
// since a change notification will be queued to trigger another sync.
//
// The change notifications from the local state are coalesced (see
// ManagerCoalescePeriod) so that frequent changes within the local state
// do not trigger dozens of proxy resyncs.
type Manager struct {
// State is the local state that is the source of truth for all
// configured managed proxies.
State *local.State
// Logger is the logger for information about manager behavior.
// Output for proxies will not go here generally but varies by proxy
// implementation type.
Logger *log.Logger
// DataDir is the path to the directory where data for proxies is
// written, including snapshots for any state changes in the manager.
// Within the data dir, files will be written in the following locatins:
//
// * logs/ - log files named <service id>-std{out|err}.log
// * pids/ - pid files for daemons named <service id>.pid
// * snapshot.json - the state of the manager
//
DataDir string
// Extra environment variables to set for the proxies
ProxyEnv []string
// SnapshotPeriod is the duration between snapshots. This can be set
// relatively low to ensure accuracy, because if the new snapshot matches
// the last snapshot taken, no file will be written. Therefore, setting
// this low causes only slight CPU/memory usage but doesn't result in
// disk IO. If this isn't set, ManagerSnapshotPeriod will be the default.
//
// This only has an effect if snapshots are enabled (DataDir is set).
SnapshotPeriod time.Duration
// CoalescePeriod and QuiescencePeriod control the timers for coalescing
// updates from the local state. See the defaults at the top of this
// file for more documentation. These will be set to those defaults
// by NewManager.
CoalescePeriod time.Duration
QuiescentPeriod time.Duration
// AllowRoot configures whether proxies can be executed as root (EUID == 0).
// If this is false then the manager will run and proxies can be added
// and removed but none will be started an errors will be logged
// to the logger.
AllowRoot bool
// lock is held while reading/writing any internal state of the manager.
// cond is a condition variable on lock that is broadcasted for runState
// changes.
lock *sync.Mutex
cond *sync.Cond
// runState is the current state of the manager. To read this the
// lock must be held. The condition variable cond can be waited on
// for changes to this value.
runState managerRunState
// lastSnapshot stores a pointer to the last snapshot that successfully
// wrote to disk. This is used for dup detection to prevent rewriting
// the same snapshot multiple times. snapshots should never be that
// large so keeping it in-memory should be cheap even for thousands of
// proxies (unlikely scenario).
lastSnapshot *snapshot
proxies map[string]Proxy
}
// NewManager initializes a Manager. After initialization, the exported
// fields should be configured as desired. To start the Manager, execute
// Run in a goroutine.
func NewManager() *Manager {
var lock sync.Mutex
return &Manager{
Logger: defaultLogger,
SnapshotPeriod: ManagerSnapshotPeriod,
CoalescePeriod: ManagerCoalescePeriod,
QuiescentPeriod: ManagerQuiescentPeriod,
lock: &lock,
cond: sync.NewCond(&lock),
proxies: make(map[string]Proxy),
}
}
// defaultLogger is the defaultLogger for NewManager so there it is never nil
var defaultLogger = log.New(os.Stderr, "", log.LstdFlags)
// managerRunState is the state of the Manager.
//
// This is a basic state machine with the following transitions:
//
// * idle => running, stopped
// * running => stopping, stopped
// * stopping => stopped
// * stopped => <>
//
type managerRunState uint8
const (
managerStateIdle managerRunState = iota
managerStateRunning
managerStateStopping
managerStateStopped
)
// Close stops the manager. Managed processes are NOT stopped.
func (m *Manager) Close() error {
m.lock.Lock()
defer m.lock.Unlock()
return m.stop(func(p Proxy) error {
return p.Close()
})
}
// Kill will Close the manager and Kill all proxies that were being managed.
// Only ONE of Kill or Close must be called. If Close has been called already
// then this will have no effect.
func (m *Manager) Kill() error {
m.lock.Lock()
defer m.lock.Unlock()
return m.stop(func(p Proxy) error {
return p.Stop()
})
}
// stop stops the run loop and cleans up all the proxies by calling
// the given cleaner. If the cleaner returns an error the proxy won't be
// removed from the map.
//
// The lock must be held while this is called.
func (m *Manager) stop(cleaner func(Proxy) error) error {
for {
// Special case state that exits the for loop
if m.runState == managerStateStopped {
break
}
switch m.runState {
case managerStateIdle:
// Idle so just set it to stopped and return. We notify
// the condition variable in case others are waiting.
m.runState = managerStateStopped
m.cond.Broadcast()
return nil
case managerStateRunning:
// Set the state to stopping and broadcast to all waiters,
// since Run is sitting on cond.Wait.
m.runState = managerStateStopping
m.cond.Broadcast()
m.cond.Wait() // Wait on the stopping event
case managerStateStopping:
// Still stopping, wait...
m.cond.Wait()
}
}
// Clean up all the proxies
var err error
for id, proxy := range m.proxies {
if err := cleaner(proxy); err != nil {
err = multierror.Append(
err, fmt.Errorf("failed to stop proxy %q: %s", id, err))
continue
}
// Remove it since it is already stopped successfully
delete(m.proxies, id)
}
return err
}
// Run syncs with the local state and supervises existing proxies.
//
// This blocks and should be run in a goroutine. If another Run is already
// executing, this will do nothing and return.
func (m *Manager) Run() {
m.lock.Lock()
if m.runState != managerStateIdle {
m.lock.Unlock()
return
}
// Set the state to running
m.runState = managerStateRunning
m.lock.Unlock()
// Start a goroutine that just waits for a stop request
stopCh := make(chan struct{})
go func() {
defer close(stopCh)
m.lock.Lock()
defer m.lock.Unlock()
// We wait for anything not running, just so we're more resilient
// in the face of state machine issues. Basically any state change
// will cause us to quit.
for m.runState == managerStateRunning {
m.cond.Wait()
}
}()
// When we exit, we set the state to stopped and broadcast to any
// waiting Close functions that they can return.
defer func() {
m.lock.Lock()
m.runState = managerStateStopped
m.cond.Broadcast()
m.lock.Unlock()
}()
// Register for proxy catalog change notifications
notifyCh := make(chan struct{}, 1)
m.State.NotifyProxy(notifyCh)
defer m.State.StopNotifyProxy(notifyCh)
// Start the timer for snapshots. We don't use a ticker because disk
// IO can be slow and we don't want overlapping notifications. So we only
// reset the timer once the snapshot is complete rather than continuously.
snapshotTimer := time.NewTimer(m.SnapshotPeriod)
defer snapshotTimer.Stop()
m.Logger.Println("[DEBUG] agent/proxy: managed Connect proxy manager started")
SYNC:
for {
// Sync first, before waiting on further notifications so that
// we can start with a known-current state.
m.sync()
// Note for these variables we don't use a time.Timer because both
// periods are relatively short anyways so they end up being eligible
// for GC very quickly, so overhead is not a concern.
var quiescent, quantum <-chan time.Time
// Start a loop waiting for events from the local state store. This
// loops rather than just `select` so we can coalesce many state
// updates over a period of time.
for {
select {
case <-notifyCh:
// If this is our first notification since the last sync,
// reset the quantum timer which is the max time we'll wait.
if quantum == nil {
quantum = time.After(m.CoalescePeriod)
}
// Always reset the quiescent timer
quiescent = time.After(m.QuiescentPeriod)
case <-quantum:
continue SYNC
case <-quiescent:
continue SYNC
case <-snapshotTimer.C:
// Perform a snapshot
if path := m.SnapshotPath(); path != "" {
if err := m.snapshot(path, true); err != nil {
m.Logger.Printf("[WARN] agent/proxy: failed to snapshot state: %s", err)
}
}
// Reset
snapshotTimer.Reset(m.SnapshotPeriod)
case <-stopCh:
// Stop immediately, no cleanup
m.Logger.Println("[DEBUG] agent/proxy: Stopping managed Connect proxy manager")
return
}
}
}
}
// sync syncs data with the local state store to update the current manager
// state and start/stop necessary proxies.
func (m *Manager) sync() {
m.lock.Lock()
defer m.lock.Unlock()
// If we don't allow root and we're root, then log a high sev message.
if !m.AllowRoot && isRoot() {
m.Logger.Println("[WARN] agent/proxy: running as root, will not start managed proxies")
return
}
// Get the current set of proxies
state := m.State.Proxies()
// Go through our existing proxies that we're currently managing to
// determine if they're still in the state or not. If they're in the
// state, we need to diff to determine if we're starting a new proxy
// If they're not in the state, then we need to stop the proxy since it
// is now orphaned.
for id, proxy := range m.proxies {
// Get the proxy.
stateProxy, ok := state[id]
if ok {
// Remove the proxy from the state so we don't start it new.
delete(state, id)
// Make the proxy so we can compare. This does not start it.
proxy2, err := m.newProxy(stateProxy)
if err != nil {
m.Logger.Printf("[ERROR] agent/proxy: failed to initialize proxy for %q: %s", id, err)
continue
}
// If the proxies are equal, then do nothing
if proxy.Equal(proxy2) {
continue
}
// Proxies are not equal, so we should stop it. We add it
// back to the state here (unlikely case) so the loop below starts
// the new one.
state[id] = stateProxy
// Continue out of `if` as if proxy didn't exist so we stop it
}
// Proxy is deregistered. Remove it from our map and stop it
delete(m.proxies, id)
if err := proxy.Stop(); err != nil {
m.Logger.Printf("[ERROR] agent/proxy: failed to stop deregistered proxy for %q: %s", id, err)
}
}
// Remaining entries in state are new proxies. Start them!
for id, stateProxy := range state {
proxy, err := m.newProxy(stateProxy)
if err != nil {
m.Logger.Printf("[ERROR] agent/proxy: failed to initialize proxy for %q: %s", id, err)
continue
}
if err := proxy.Start(); err != nil {
m.Logger.Printf("[ERROR] agent/proxy: failed to start proxy for %q: %s", id, err)
continue
}
m.proxies[id] = proxy
}
}
// newProxy creates the proper Proxy implementation for the configured
// local managed proxy.
func (m *Manager) newProxy(mp *local.ManagedProxy) (Proxy, error) {
// Defensive because the alternative is to panic which is not desired
if mp == nil || mp.Proxy == nil {
return nil, fmt.Errorf("internal error: nil *local.ManagedProxy or Proxy field")
}
p := mp.Proxy
// We reuse the service ID a few times
id := p.ProxyService.ID
// Create the Proxy. We could just as easily switch on p.ExecMode
// but I wanted there to be only location where ExecMode => Proxy so
// it lowers the chance that is wrong.
proxy, err := m.newProxyFromMode(p.ExecMode, id)
if err != nil {
return nil, err
}
// Depending on the proxy type we configure the rest from our ManagedProxy
switch proxy := proxy.(type) {
case *Daemon:
command := p.Command
// This should never happen since validation should happen upstream
// but verify it because the alternative is to panic below.
if len(command) == 0 {
return nil, fmt.Errorf("daemon mode managed proxy requires command")
}
// Build the command to execute.
var cmd exec.Cmd
cmd.Path = command[0]
cmd.Args = command // idx 0 is path but preserved since it should be
if err := m.configureLogDir(id, &cmd); err != nil {
return nil, fmt.Errorf("error configuring proxy logs: %s", err)
}
// Pass in the environmental variables for the proxy process
cmd.Env = append(m.ProxyEnv, os.Environ()...)
// Build the daemon structure
proxy.Command = &cmd
proxy.ProxyID = id
proxy.ProxyToken = mp.ProxyToken
return proxy, nil
default:
return nil, fmt.Errorf("unsupported managed proxy type: %q", p.ExecMode)
}
}
// newProxyFromMode just initializes the proxy structure from only the mode
// and the service ID. This is a shared method between newProxy and Restore
// so that we only have one location where we turn ExecMode into a Proxy.
func (m *Manager) newProxyFromMode(mode structs.ProxyExecMode, id string) (Proxy, error) {
switch mode {
case structs.ProxyExecModeDaemon:
return &Daemon{
Logger: m.Logger,
PidPath: pidPath(filepath.Join(m.DataDir, "pids"), id),
}, nil
default:
return nil, fmt.Errorf("unsupported managed proxy type: %q", mode)
}
}
// configureLogDir sets up the file descriptors to stdout/stderr so that
// they log to the proper file path for the given service ID.
func (m *Manager) configureLogDir(id string, cmd *exec.Cmd) error {
// Create the log directory
logDir := ""
if m.DataDir != "" {
logDir = filepath.Join(m.DataDir, "logs")
if err := os.MkdirAll(logDir, 0700); err != nil {
return err
}
}
// Configure the stdout, stderr paths
stdoutPath := logPath(logDir, id, "stdout")
stderrPath := logPath(logDir, id, "stderr")
// Open the files. We want to append to each. We expect these files
// to be rotated by some external process.
stdoutF, err := os.OpenFile(stdoutPath, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0600)
if err != nil {
return fmt.Errorf("error creating stdout file: %s", err)
}
stderrF, err := os.OpenFile(stderrPath, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0600)
if err != nil {
// Don't forget to close stdoutF which successfully opened
stdoutF.Close()
return fmt.Errorf("error creating stderr file: %s", err)
}
cmd.Stdout = stdoutF
cmd.Stderr = stderrF
return nil
}
// logPath is a helper to return the path to the log file for the given
// directory, service ID, and stream type (stdout or stderr).
func logPath(dir, id, stream string) string {
return filepath.Join(dir, fmt.Sprintf("%s-%s.log", id, stream))
}
// pidPath is a helper to return the path to the pid file for the given
// directory and service ID.
func pidPath(dir, id string) string {
// If no directory is given we do not write a pid
if dir == "" {
return ""
}
return filepath.Join(dir, fmt.Sprintf("%s.pid", id))
}

View File

@ -1,585 +0,0 @@
package proxyprocess
import (
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"sort"
"strings"
"testing"
"time"
"github.com/hashicorp/consul/agent/local"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/sdk/testutil/retry"
"github.com/stretchr/testify/require"
)
func TestManagerClose_noRun(t *testing.T) {
t.Parallel()
// Really we're testing that it doesn't deadlock here.
m, closer := testManager(t)
defer closer()
require.NoError(t, m.Close())
// Close again for sanity
require.NoError(t, m.Close())
}
// Test that Run performs an initial sync (if local.State is already set)
// rather than waiting for a notification from the local state.
func TestManagerRun_initialSync(t *testing.T) {
t.Parallel()
state := local.TestState(t)
m, closer := testManager(t)
defer closer()
m.State = state
defer m.Kill()
// Add the proxy before we start the manager to verify initial sync
td, closer := testTempDir(t)
defer closer()
path := filepath.Join(td, "file")
cmd, destroy := helperProcess("restart", path)
defer destroy()
testStateProxy(t, state, "web", cmd)
// Start the manager
go m.Run()
// We should see the path appear shortly
retry.Run(t, func(r *retry.R) {
_, err := os.Stat(path)
if err == nil {
return
}
r.Fatalf("error waiting for path: %s", err)
})
}
func TestManagerRun_syncNew(t *testing.T) {
t.Parallel()
state := local.TestState(t)
m, closer := testManager(t)
defer closer()
m.State = state
defer m.Kill()
// Start the manager
go m.Run()
// Sleep a bit, this is just an attempt for Run to already be running.
// Its not a big deal if this sleep doesn't happen (slow CI).
time.Sleep(100 * time.Millisecond)
// Add the first proxy
td, closer := testTempDir(t)
defer closer()
path := filepath.Join(td, "file")
cmd, destroy := helperProcess("restart", path)
defer destroy()
testStateProxy(t, state, "web", cmd)
// We should see the path appear shortly
retry.Run(t, func(r *retry.R) {
_, err := os.Stat(path)
if err == nil {
return
}
r.Fatalf("error waiting for path: %s", err)
})
// Add another proxy
path = path + "2"
cmd, destroy = helperProcess("restart", path)
defer destroy()
testStateProxy(t, state, "db", cmd)
retry.Run(t, func(r *retry.R) {
_, err := os.Stat(path)
if err == nil {
return
}
r.Fatalf("error waiting for path: %s", err)
})
}
func TestManagerRun_syncDelete(t *testing.T) {
t.Parallel()
state := local.TestState(t)
m, closer := testManager(t)
defer closer()
m.State = state
defer m.Kill()
// Start the manager
go m.Run()
// Add the first proxy
td, closer := testTempDir(t)
defer closer()
path := filepath.Join(td, "file")
cmd, destroy := helperProcess("restart", path)
defer destroy()
id := testStateProxy(t, state, "web", cmd)
// We should see the path appear shortly
retry.Run(t, func(r *retry.R) {
_, err := os.Stat(path)
if err == nil {
return
}
r.Fatalf("error waiting for path: %s", err)
})
// Remove the proxy
_, err := state.RemoveProxy(id)
require.NoError(t, err)
// File should disappear as process is killed
retry.Run(t, func(r *retry.R) {
_, err := os.Stat(path)
if err == nil {
r.Fatalf("path exists")
}
})
}
func TestManagerRun_syncUpdate(t *testing.T) {
t.Parallel()
state := local.TestState(t)
m, closer := testManager(t)
defer closer()
m.State = state
defer m.Kill()
// Start the manager
go m.Run()
// Add the first proxy
td, closer := testTempDir(t)
defer closer()
path := filepath.Join(td, "file")
cmd, destroy := helperProcess("restart", path)
defer destroy()
testStateProxy(t, state, "web", cmd)
// We should see the path appear shortly
retry.Run(t, func(r *retry.R) {
_, err := os.Stat(path)
if err == nil {
return
}
r.Fatalf("error waiting for path: %s", err)
})
// Update the proxy with a new path
oldPath := path
path = path + "2"
cmd, destroy = helperProcess("restart", path)
defer destroy()
testStateProxy(t, state, "web", cmd)
retry.Run(t, func(r *retry.R) {
_, err := os.Stat(path)
if err == nil {
return
}
r.Fatalf("error waiting for path: %s", err)
})
// Old path should be gone
retry.Run(t, func(r *retry.R) {
_, err := os.Stat(oldPath)
if err == nil {
r.Fatalf("old path exists")
}
})
}
func TestManagerRun_daemonLogs(t *testing.T) {
t.Parallel()
require := require.New(t)
state := local.TestState(t)
m, closer := testManager(t)
defer closer()
m.State = state
defer m.Kill()
// Configure a log dir so that we can read the logs
logDir := filepath.Join(m.DataDir, "logs")
// Create the service and calculate the log paths
path := filepath.Join(m.DataDir, "notify")
cmd, destroy := helperProcess("output", path)
defer destroy()
id := testStateProxy(t, state, "web", cmd)
stdoutPath := logPath(logDir, id, "stdout")
stderrPath := logPath(logDir, id, "stderr")
// Start the manager
go m.Run()
// We should see the path appear shortly
retry.Run(t, func(r *retry.R) {
if _, err := os.Stat(path); err != nil {
r.Fatalf("error waiting for stdout path: %s", err)
}
})
expectedOut := "hello stdout\n"
actual, err := ioutil.ReadFile(stdoutPath)
require.NoError(err)
require.Equal([]byte(expectedOut), actual)
expectedErr := "hello stderr\n"
actual, err = ioutil.ReadFile(stderrPath)
require.NoError(err)
require.Equal([]byte(expectedErr), actual)
}
func TestManagerRun_daemonPid(t *testing.T) {
t.Parallel()
require := require.New(t)
state := local.TestState(t)
m, closer := testManager(t)
defer closer()
m.State = state
defer m.Kill()
// Configure a log dir so that we can read the logs
pidDir := filepath.Join(m.DataDir, "pids")
// Create the service and calculate the log paths
path := filepath.Join(m.DataDir, "notify")
cmd, destroy := helperProcess("output", path)
defer destroy()
id := testStateProxy(t, state, "web", cmd)
pidPath := pidPath(pidDir, id)
// Start the manager
go m.Run()
// We should see the path appear shortly
retry.Run(t, func(r *retry.R) {
if _, err := os.Stat(path); err != nil {
r.Fatalf("error waiting for stdout path: %s", err)
}
})
// Verify the pid file is not empty
pidRaw, err := ioutil.ReadFile(pidPath)
require.NoError(err)
require.NotEmpty(pidRaw)
}
// Test to check if the parent and the child processes
// have the same environmental variables
func TestManagerPassesEnvironment(t *testing.T) {
t.Parallel()
require := require.New(t)
state := local.TestState(t)
m, closer := testManager(t)
defer closer()
m.State = state
defer m.Kill()
// Add Proxy for the test
td, closer := testTempDir(t)
defer closer()
path := filepath.Join(td, "env-variables")
cmd, destroy := helperProcess("environ", path)
defer destroy()
testStateProxy(t, state, "environTest", cmd)
//Run the manager
go m.Run()
//Get the environmental variables from the OS
var fileContent []byte
var err error
var data []byte
envData := os.Environ()
sort.Strings(envData)
for _, envVariable := range envData {
if strings.HasPrefix(envVariable, "CONSUL") || strings.HasPrefix(envVariable, "CONNECT") {
continue
}
data = append(data, envVariable...)
data = append(data, "\n"...)
}
// Check if the file written to from the spawned process
// has the necessary environmental variable data
retry.Run(t, func(r *retry.R) {
if fileContent, err = ioutil.ReadFile(path); err != nil {
r.Fatalf("No file ya dummy")
}
})
require.Equal(data, fileContent)
}
// Test to check if the parent and the child processes
// have the same environmental variables
func TestManagerPassesProxyEnv(t *testing.T) {
t.Parallel()
require := require.New(t)
state := local.TestState(t)
m, closer := testManager(t)
defer closer()
m.State = state
defer m.Kill()
penv := make([]string, 0, 2)
penv = append(penv, "HTTP_ADDR=127.0.0.1:8500")
penv = append(penv, "HTTP_SSL=false")
m.ProxyEnv = penv
// Add Proxy for the test
td, closer := testTempDir(t)
defer closer()
path := filepath.Join(td, "env-variables")
cmd, destroy := helperProcess("environ", path)
defer destroy()
testStateProxy(t, state, "environTest", cmd)
//Run the manager
go m.Run()
//Get the environmental variables from the OS
var fileContent []byte
var err error
var data []byte
envData := os.Environ()
envData = append(envData, "HTTP_ADDR=127.0.0.1:8500")
envData = append(envData, "HTTP_SSL=false")
sort.Strings(envData)
for _, envVariable := range envData {
if strings.HasPrefix(envVariable, "CONSUL") || strings.HasPrefix(envVariable, "CONNECT") {
continue
}
data = append(data, envVariable...)
data = append(data, "\n"...)
}
// Check if the file written to from the spawned process
// has the necessary environmental variable data
retry.Run(t, func(r *retry.R) {
if fileContent, err = ioutil.ReadFile(path); err != nil {
r.Fatalf("No file ya dummy")
}
})
require.Equal(data, fileContent)
}
// Test the Snapshot/Restore works.
func TestManagerRun_snapshotRestore(t *testing.T) {
t.Parallel()
require := require.New(t)
state := local.TestState(t)
m, closer := testManager(t)
defer closer()
m.State = state
defer m.Kill()
// Add the proxy
td, closer := testTempDir(t)
defer closer()
path := filepath.Join(td, "file")
cmd, destroy := helperProcess("start-stop", path)
defer destroy()
testStateProxy(t, state, "web", cmd)
// Set a low snapshot period so we get a snapshot
m.SnapshotPeriod = 10 * time.Millisecond
// Start the manager
go m.Run()
// We should see the path appear shortly
retry.Run(t, func(r *retry.R) {
_, err := os.Stat(path)
if err == nil {
return
}
r.Fatalf("error waiting for path: %s", err)
})
// Wait for the snapshot
snapPath := m.SnapshotPath()
retry.Run(t, func(r *retry.R) {
raw, err := ioutil.ReadFile(snapPath)
if err != nil {
r.Fatalf("error waiting for path: %s", err)
}
if len(raw) < 30 {
r.Fatalf("snapshot too small")
}
})
// Stop the sync
require.NoError(m.Close())
// File should still exist
_, err := os.Stat(path)
require.NoError(err)
// Restore a manager from a snapshot
m2, closer := testManager(t)
m2.State = state
defer closer()
defer m2.Kill()
require.NoError(m2.Restore(snapPath))
// Start
go m2.Run()
// Add a second proxy so that we can determine when we're up
// and running.
path2 := filepath.Join(td, "file2")
cmd, destroy = helperProcess("start-stop", path2)
defer destroy()
testStateProxy(t, state, "db", cmd)
retry.Run(t, func(r *retry.R) {
_, err := os.Stat(path2)
if err == nil {
return
}
r.Fatalf("error waiting for path: %s", err)
})
// Kill m2, which should kill our main process
require.NoError(m2.Kill())
// File should no longer exist
retry.Run(t, func(r *retry.R) {
_, err := os.Stat(path)
if err != nil {
return
}
r.Fatalf("file still exists: %s", path)
})
}
// Manager should not run any proxies if we're running as root. Tests
// stub the value.
func TestManagerRun_rootDisallow(t *testing.T) {
// Pretend we are root
defer testSetRootValue(true)()
state := local.TestState(t)
m, closer := testManager(t)
defer closer()
m.State = state
defer m.Kill()
// Add the proxy before we start the manager to verify initial sync
td, closer := testTempDir(t)
defer closer()
path := filepath.Join(td, "file")
cmd, destroy := helperProcess("restart", path)
defer destroy()
testStateProxy(t, state, "web", cmd)
// Start the manager
go m.Run()
// Sleep a bit just to verify
time.Sleep(100 * time.Millisecond)
// We should see the path appear shortly
retry.Run(t, func(r *retry.R) {
_, err := os.Stat(path)
if err != nil {
return
}
r.Fatalf("path exists")
})
}
func testManager(t *testing.T) (*Manager, func()) {
m := NewManager()
// Setup a default state
m.State = local.TestState(t)
// Set these periods low to speed up tests
m.CoalescePeriod = 1 * time.Millisecond
m.QuiescentPeriod = 1 * time.Millisecond
// Setup a temporary directory for logs
td, closer := testTempDir(t)
m.DataDir = td
return m, func() { closer() }
}
// testStateProxy registers a proxy with the given local state and the command
// (expected to be from the helperProcess function call). It returns the
// ID for deregistration.
func testStateProxy(t *testing.T, state *local.State, service string, cmd *exec.Cmd) string {
// *exec.Cmd must manually set args[0] to the binary. We automatically
// set this when constructing the command for the proxy, so we must strip
// the zero index. We do this unconditionally (anytime len is > 0) because
// index zero should ALWAYS be the binary.
if len(cmd.Args) > 0 {
cmd.Args = cmd.Args[1:]
}
command := []string{cmd.Path}
command = append(command, cmd.Args...)
require.NoError(t, state.AddService(&structs.NodeService{
Service: service,
}, "token"))
p, err := state.AddProxy(&structs.ConnectManagedProxy{
ExecMode: structs.ProxyExecModeDaemon,
Command: command,
TargetServiceID: service,
}, "token", "")
require.NoError(t, err)
return p.Proxy.ProxyService.ID
}

View File

@ -1,11 +0,0 @@
package proxyprocess
// Noop implements Proxy and does nothing.
type Noop struct{}
func (p *Noop) Start() error { return nil }
func (p *Noop) Stop() error { return nil }
func (p *Noop) Close() error { return nil }
func (p *Noop) Equal(Proxy) bool { return true }
func (p *Noop) MarshalSnapshot() map[string]interface{} { return nil }
func (p *Noop) UnmarshalSnapshot(map[string]interface{}) error { return nil }

View File

@ -1,9 +0,0 @@
package proxyprocess
import (
"testing"
)
func TestNoop_impl(t *testing.T) {
var _ Proxy = new(Noop)
}

View File

@ -1,14 +0,0 @@
package proxyprocess
import (
"strings"
)
// isProcessAlreadyFinishedErr does a janky comparison with an error string
// defined in os/exec_unix.go and os/exec_windows.go which we encounter due to
// races with polling the external process. These case tests to fail since Stop
// returns an error sometimes so we should notice if this string stops matching
// the error in a future go version.
func isProcessAlreadyFinishedErr(err error) bool {
return strings.Contains(err.Error(), "os: process already finished")
}

View File

@ -1,38 +0,0 @@
// +build !windows
package proxyprocess
import (
"fmt"
"os"
"os/exec"
"syscall"
)
// findProcess for non-Windows. Note that this very likely doesn't
// work for all non-Windows platforms Go supports and we should expand
// support as we experience it.
func findProcess(pid int) (*os.Process, error) {
// FindProcess never fails on unix-like systems.
p, err := os.FindProcess(pid)
if err != nil {
return nil, err
}
// On Unix-like systems, we can verify a process is alive by sending
// a 0 signal. This will do nothing to the process but will still
// return errors if the process is gone.
err = p.Signal(syscall.Signal(0))
if err == nil {
return p, nil
}
return nil, fmt.Errorf("process %d is dead or running as another user", pid)
}
// configureDaemon is called prior to Start to allow system-specific setup.
func configureDaemon(cmd *exec.Cmd) {
// Start it in a new sessions (and hence process group) so that killing agent
// (even with Ctrl-C) won't kill proxy.
cmd.SysProcAttr = &syscall.SysProcAttr{Setsid: true}
}

View File

@ -1,19 +0,0 @@
// +build windows
package proxyprocess
import (
"os"
"os/exec"
)
func findProcess(pid int) (*os.Process, error) {
// On Windows, os.FindProcess will error if the process is not alive,
// so we don't have to do any further checking. The nature of it being
// non-nil means it seems to be healthy.
return os.FindProcess(pid)
}
func configureDaemon(cmd *exec.Cmd) {
// Do nothing
}

View File

@ -1,101 +0,0 @@
// Package proxy contains logic for agent interaction with proxies,
// primarily "managed" proxies. Managed proxies are proxy processes for
// Connect-compatible endpoints that Consul owns and controls the lifecycle
// for.
//
// This package does not contain the built-in proxy for Connect. The source
// for that is available in the "connect/proxy" package.
package proxyprocess
import (
"github.com/hashicorp/consul/agent/structs"
)
const (
// EnvProxyID is the name of the environment variable that is set for
// managed proxies containing the proxy service ID. This is required along
// with the token to make API requests related to the proxy.
EnvProxyID = "CONNECT_PROXY_ID"
// EnvProxyToken is the name of the environment variable that is passed
// to managed proxies containing the proxy token.
EnvProxyToken = "CONNECT_PROXY_TOKEN"
// EnvSidecarFor is the name of the environment variable that is set for
// sidecar proxies containing the service ID of their target on the local
// agent
EnvSidecarFor = "CONNECT_SIDECAR_FOR"
)
// Proxy is the interface implemented by all types of managed proxies.
//
// Calls to all the functions on this interface must be concurrency safe.
// Please read the documentation carefully on top of each function for expected
// behavior.
//
// Whenever a new proxy type is implemented, please also update proxyExecMode
// and newProxyFromMode and newProxy to support the new proxy.
type Proxy interface {
// Start starts the proxy. If an error is returned then the managed
// proxy registration is rejected. Therefore, this should only fail if
// the configuration of the proxy itself is irrecoverable, and should
// retry starting for other failures.
//
// Starting an already-started proxy should not return an error.
Start() error
// Stop stops the proxy and disallows it from ever being started again.
// This should also clean up any resources used by this Proxy.
//
// If the proxy is not started yet, this should not return an error, but
// it should disallow Start from working again. If the proxy is already
// stopped, this should not return an error.
Stop() error
// Close should clean up any resources associated with this proxy but
// keep it running in the background. Only one of Close or Stop can be
// called.
Close() error
// Equal returns true if the argument is equal to the proxy being called.
// This is called by the manager to determine if a change in configuration
// results in a proxy that needs to be restarted or not. If Equal returns
// false, then the manager will stop the old proxy and start the new one.
// If Equal returns true, the old proxy will remain running and the new
// one will be ignored.
Equal(Proxy) bool
// MarshalSnapshot returns the state that will be stored in a snapshot
// so that Consul can recover the proxy process after a restart. The
// result should only contain primitive values and containers (lists/maps).
//
// MarshalSnapshot does NOT need to store the following fields, since they
// are part of the manager snapshot and will be automatically restored
// for any proxies: proxy ID.
//
// UnmarshalSnapshot is called to restore the receiving Proxy from its
// marshaled state. If UnmarshalSnapshot returns an error, the snapshot
// is ignored and the marshaled snapshot will be lost. The manager will
// log.
//
// This should save/restore enough state to be able to regain management
// of a proxy process as well as to perform the Equal method above. The
// Equal method will be called when a local state sync happens to determine
// if the recovered process should be restarted or not.
MarshalSnapshot() map[string]interface{}
UnmarshalSnapshot(map[string]interface{}) error
}
// proxyExecMode returns the ProxyExecMode for a Proxy instance.
func proxyExecMode(p Proxy) structs.ProxyExecMode {
switch p.(type) {
case *Daemon:
return structs.ProxyExecModeDaemon
case *Noop:
return structs.ProxyExecModeTest
default:
return structs.ProxyExecModeUnspecified
}
}

View File

@ -1,282 +0,0 @@
package proxyprocess
import (
"fmt"
"io/ioutil"
"log"
"os"
"os/exec"
"os/signal"
"sort"
"strconv"
"strings"
"syscall"
"testing"
"time"
)
// testLogger is a logger that can be used by tests that require a
// *log.Logger instance.
var testLogger = log.New(os.Stderr, "logger: ", log.LstdFlags)
// testTempDir returns a temporary directory and a cleanup function.
func testTempDir(t *testing.T) (string, func()) {
t.Helper()
td, err := ioutil.TempDir("", "test-agent-proxy")
if err != nil {
t.Fatalf("err: %s", err)
}
return td, func() {
if err := os.RemoveAll(td); err != nil {
t.Fatalf("err: %s", err)
}
}
}
// helperProcessSentinel is a sentinel value that is put as the first
// argument following "--" and is used to determine if TestHelperProcess
// should run.
const helperProcessSentinel = "WANT_HELPER_PROCESS"
// helperProcess returns an *exec.Cmd that can be used to execute the
// TestHelperProcess function below. This can be used to test multi-process
// interactions.
func helperProcess(s ...string) (*exec.Cmd, func()) {
cs := []string{"-test.run=TestHelperProcess", "--", helperProcessSentinel}
cs = append(cs, s...)
cmd := exec.Command(os.Args[0], cs...)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
destroy := func() {
if p := cmd.Process; p != nil {
p.Kill()
}
}
return cmd, destroy
}
// This is not a real test. This is just a helper process kicked off by tests
// using the helperProcess helper function.
func TestHelperProcess(t *testing.T) {
args := os.Args
for len(args) > 0 {
if args[0] == "--" {
args = args[1:]
break
}
args = args[1:]
}
if len(args) == 0 || args[0] != helperProcessSentinel {
return
}
defer os.Exit(0)
args = args[1:] // strip sentinel value
cmd, args := args[0], args[1:]
switch cmd {
// While running, this creates a file in the given directory (args[0])
// and deletes it only when it is stopped.
case "start-stop":
limitProcessLifetime(2 * time.Minute)
ch := make(chan os.Signal, 1)
signal.Notify(ch, os.Interrupt, syscall.SIGTERM)
defer signal.Stop(ch)
path := args[0]
var data []byte
data = append(data, []byte(os.Getenv(EnvProxyID))...)
data = append(data, ':')
data = append(data, []byte(os.Getenv(EnvProxyToken))...)
if err := ioutil.WriteFile(path, data, 0644); err != nil {
t.Fatalf("err: %s", err)
}
defer os.Remove(path)
<-ch
// Restart writes to a file and keeps running while that file still
// exists. When that file is removed, this process exits. This can be
// used to test restarting.
case "restart":
limitProcessLifetime(2 * time.Minute)
ch := make(chan os.Signal, 1)
signal.Notify(ch, os.Interrupt)
defer signal.Stop(ch)
// Write the file
path := args[0]
if err := ioutil.WriteFile(path, []byte("hello"), 0644); err != nil {
fmt.Fprintf(os.Stderr, "Error: %s\n", err)
os.Exit(1)
}
// While the file still exists, do nothing. When the file no longer
// exists, we exit.
for {
time.Sleep(25 * time.Millisecond)
if _, err := os.Stat(path); os.IsNotExist(err) {
break
}
select {
case <-ch:
// We received an interrupt, clean exit
os.Remove(path)
break
default:
}
}
case "stop-kill":
limitProcessLifetime(2 * time.Minute)
// Setup listeners so it is ignored
ch := make(chan os.Signal, 1)
signal.Notify(ch, os.Interrupt)
defer signal.Stop(ch)
path := args[0]
data := []byte(os.Getenv(EnvProxyToken))
for {
if err := ioutil.WriteFile(path, data, 0644); err != nil {
t.Fatalf("err: %s", err)
}
time.Sleep(25 * time.Millisecond)
}
// Check if the external process can access the enivironmental variables
case "environ":
limitProcessLifetime(2 * time.Minute)
stop := make(chan os.Signal, 1)
signal.Notify(stop, os.Interrupt)
defer signal.Stop(stop)
//Get the path for the file to be written to
path := args[0]
var data []byte
//Get the environmental variables
envData := os.Environ()
//Sort the env data for easier comparison
sort.Strings(envData)
for _, envVariable := range envData {
if strings.HasPrefix(envVariable, "CONSUL") || strings.HasPrefix(envVariable, "CONNECT") {
continue
}
data = append(data, envVariable...)
data = append(data, "\n"...)
}
if err := ioutil.WriteFile(path, data, 0644); err != nil {
t.Fatalf("[Error] File write failed : %s", err)
}
// Clean up after we receive the signal to exit
defer os.Remove(path)
<-stop
case "output":
limitProcessLifetime(2 * time.Minute)
fmt.Fprintf(os.Stdout, "hello stdout\n")
fmt.Fprintf(os.Stderr, "hello stderr\n")
// Sync to be sure it is written out of buffers
os.Stdout.Sync()
os.Stderr.Sync()
// Output a file to signal we've written to stdout/err
path := args[0]
if err := ioutil.WriteFile(path, []byte("hello"), 0644); err != nil {
fmt.Fprintf(os.Stderr, "Error: %s\n", err)
os.Exit(1)
}
<-make(chan struct{})
// Parent runs the given process in a Daemon and then sleeps until the test
// code kills it. It exists to test that the Daemon-managed child process
// survives it's parent exiting which we can't test directly without exiting
// the test process so we need an extra level of indirection. The test code
// using this must pass a file path as the first argument for the child
// processes PID to be written and then must take care to clean up that PID
// later or the child will be left running forever.
//
// If the PID file already exists, it will "adopt" the child rather than
// launch a new one.
case "parent":
limitProcessLifetime(2 * time.Minute)
// We will write the PID for the child to the file in the first argument
// then pass rest of args through to command.
pidFile := args[0]
cmd, destroyChild := helperProcess(args[1:]...)
defer destroyChild()
d := &Daemon{
Command: cmd,
Logger: testLogger,
PidPath: pidFile,
}
_, err := os.Stat(pidFile)
if err == nil {
// pidFile exists, read it and "adopt" the process
bs, err := ioutil.ReadFile(pidFile)
if err != nil {
log.Printf("Error: %s", err)
os.Exit(1)
}
pid, err := strconv.Atoi(string(bs))
if err != nil {
log.Printf("Error: %s", err)
os.Exit(1)
}
// Make a fake snapshot to load
snapshot := map[string]interface{}{
"Pid": pid,
"CommandPath": d.Command.Path,
"CommandArgs": d.Command.Args,
"CommandDir": d.Command.Dir,
"CommandEnv": d.Command.Env,
"ProxyToken": "",
}
d.UnmarshalSnapshot(snapshot)
}
if err := d.Start(); err != nil {
log.Printf("Error: %s", err)
os.Exit(1)
}
log.Println("Started child")
// Wait "forever" (calling test chooses when we exit with signal/Wait to
// minimize coordination).
for {
time.Sleep(time.Hour)
}
default:
fmt.Fprintf(os.Stderr, "Unknown command: %q\n", cmd)
os.Exit(2)
}
}
// limitProcessLifetime installs a background goroutine that self-exits after
// the specified duration elapses to prevent leaking processes from tests that
// may spawn them.
func limitProcessLifetime(dur time.Duration) {
go time.AfterFunc(dur, func() {
os.Exit(99)
})
}

View File

@ -1,24 +0,0 @@
package proxyprocess
import (
"os"
)
// isRoot returns true if the process is executing as root.
func isRoot() bool {
if testRootValue != nil {
return *testRootValue
}
return os.Geteuid() == 0
}
// testSetRootValue is a test helper for setting the root value.
func testSetRootValue(v bool) func() {
testRootValue = &v
return func() { testRootValue = nil }
}
// testRootValue should be set to a non-nil value to return it as a stub
// from isRoot. This should only be used in tests.
var testRootValue *bool

View File

@ -1,171 +0,0 @@
package proxyprocess
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"reflect"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/lib/file"
)
// snapshot is the structure of the snapshot file. This is unexported because
// we don't want this being a public API.
//
// The snapshot doesn't contain any configuration for the manager. We only
// want to restore the proxies that we're managing, and we use the config
// set at runtime to sync and reconcile what proxies we should start,
// restart, stop, or have already running.
type snapshot struct {
// Version is the version of the snapshot format and can be used
// to safely update the format in the future if necessary.
Version int
// Proxies are the set of proxies that the manager has.
Proxies map[string]snapshotProxy
}
// snapshotProxy represents a single proxy.
type snapshotProxy struct {
// Mode corresponds to the type of proxy running.
Mode structs.ProxyExecMode
// Config is an opaque mapping of primitive values that the proxy
// implementation uses to restore state.
Config map[string]interface{}
}
// snapshotVersion is the current version to encode within the snapshot.
const snapshotVersion = 1
// SnapshotPath returns the default snapshot path for this manager. This
// will return empty if DataDir is not set. This file may not exist yet.
func (m *Manager) SnapshotPath() string {
if m.DataDir == "" {
return ""
}
return filepath.Join(m.DataDir, "snapshot.json")
}
// Snapshot will persist a snapshot of the proxy manager state that
// can be restored with Restore.
//
// If DataDir is non-empty, then the Manager will automatically snapshot
// whenever the set of managed proxies changes. This method generally doesn't
// need to be called manually.
func (m *Manager) Snapshot(path string) error {
m.lock.Lock()
defer m.lock.Unlock()
return m.snapshot(path, false)
}
// snapshot is the internal function analogous to Snapshot but expects
// a lock to already be held.
//
// checkDup when set will store the snapshot on lastSnapshot and use
// reflect.DeepEqual to verify that its not writing an identical snapshot.
func (m *Manager) snapshot(path string, checkDup bool) error {
// Build the snapshot
s := snapshot{
Version: snapshotVersion,
Proxies: make(map[string]snapshotProxy, len(m.proxies)),
}
for id, p := range m.proxies {
// Get the snapshot configuration. If the configuration is nil or
// empty then we don't persist this proxy.
config := p.MarshalSnapshot()
if len(config) == 0 {
continue
}
s.Proxies[id] = snapshotProxy{
Mode: proxyExecMode(p),
Config: config,
}
}
// Dup detection, if the snapshot is identical to the last, do nothing
if checkDup && reflect.DeepEqual(m.lastSnapshot, &s) {
return nil
}
// Encode as JSON
encoded, err := json.Marshal(&s)
if err != nil {
return err
}
// Write the file
err = file.WriteAtomic(path, encoded)
// If we are checking for dups and we had a successful write, store
// it so we don't rewrite the same value.
if checkDup && err == nil {
m.lastSnapshot = &s
}
return err
}
// Restore restores the manager state from a snapshot at path. If path
// doesn't exist, this does nothing and no error is returned.
//
// This restores proxy state but does not restore any Manager configuration
// such as DataDir, Logger, etc. All of those should be set _before_ Restore
// is called.
//
// Restore must be called before Run. Restore will immediately start
// supervising the restored processes but will not sync with the local
// state store until Run is called.
//
// If an error is returned the manager state is left untouched.
func (m *Manager) Restore(path string) error {
buf, err := ioutil.ReadFile(path)
if err != nil {
if os.IsNotExist(err) {
return nil
}
return err
}
var s snapshot
if err := json.Unmarshal(buf, &s); err != nil {
return err
}
// Verify the version matches so we can be more confident that we're
// decoding a structure that we expect.
if s.Version != snapshotVersion {
return fmt.Errorf("unknown snapshot version, expecting %d", snapshotVersion)
}
// Build the proxies from the snapshot
proxies := make(map[string]Proxy, len(s.Proxies))
for id, sp := range s.Proxies {
p, err := m.newProxyFromMode(sp.Mode, id)
if err != nil {
return err
}
// Unmarshal the proxy. If there is an error we just continue on and
// ignore it. Errors restoring proxies should be exceptionally rare
// and only under scenarios where the proxy isn't running anymore or
// we won't have permission to access it. We log and continue.
if err := p.UnmarshalSnapshot(sp.Config); err != nil {
m.Logger.Printf("[WARN] agent/proxy: error restoring proxy %q: %s", id, err)
continue
}
proxies[id] = p
}
// Overwrite the proxies. The documentation notes that this will happen.
m.lock.Lock()
defer m.lock.Unlock()
m.proxies = proxies
return nil
}

View File

@ -1,13 +0,0 @@
package proxyprocess
// defaultTestProxy is the test proxy that is instantiated for proxies with
// an execution mode of ProxyExecModeTest.
var defaultTestProxy = testProxy{}
// testProxy is a Proxy implementation that stores state in-memory and
// is only used for unit testing. It is in a non _test.go file because the
// factory for initializing it is exported (newProxy).
type testProxy struct {
Start uint32
Stop uint32
}

View File

@ -1,11 +1,5 @@
package structs
import (
"fmt"
"github.com/mitchellh/mapstructure"
)
// ConnectAuthorizeRequest is the structure of a request to authorize
// a connection.
type ConnectAuthorizeRequest struct {
@ -21,121 +15,3 @@ type ConnectAuthorizeRequest struct {
ClientCertURI string
ClientCertSerial string
}
// ProxyExecMode encodes the mode for running a managed connect proxy.
type ProxyExecMode int
const (
// ProxyExecModeUnspecified uses the global default proxy mode.
ProxyExecModeUnspecified ProxyExecMode = iota
// ProxyExecModeDaemon executes a proxy process as a supervised daemon.
ProxyExecModeDaemon
// ProxyExecModeScript executes a proxy config script on each change to it's
// config.
ProxyExecModeScript
// ProxyExecModeTest tracks the start/stop of the proxy in-memory
// and is only used for tests. This shouldn't be set outside of tests,
// but even if it is it has no external effect.
ProxyExecModeTest
)
// NewProxyExecMode returns the proper ProxyExecMode for the given string value.
func NewProxyExecMode(raw string) (ProxyExecMode, error) {
switch raw {
case "":
return ProxyExecModeUnspecified, nil
case "daemon":
return ProxyExecModeDaemon, nil
case "script":
return ProxyExecModeScript, nil
default:
return 0, fmt.Errorf("invalid exec mode: %s", raw)
}
}
// String implements Stringer
func (m ProxyExecMode) String() string {
switch m {
case ProxyExecModeUnspecified:
return "global_default"
case ProxyExecModeDaemon:
return "daemon"
case ProxyExecModeScript:
return "script"
case ProxyExecModeTest:
return "test"
default:
return "unknown"
}
}
// ConnectManagedProxy represents the agent-local state for a configured proxy
// instance. This is never stored or sent to the servers and is only used to
// store the config for the proxy that the agent needs to track. For now it's
// really generic with only the fields the agent needs to act on defined while
// the rest of the proxy config is passed as opaque bag of attributes to support
// arbitrary config params for third-party proxy integrations. "External"
// proxies by definition register themselves and manage their own config
// externally so are never represented in agent state.
type ConnectManagedProxy struct {
// ExecMode is one of daemon or script.
ExecMode ProxyExecMode
// Command is the command to execute. Empty defaults to self-invoking the same
// consul binary with proxy subcomand for ProxyExecModeDaemon and is an error
// for ProxyExecModeScript.
Command []string
// Config is the arbitrary configuration data provided with the registration.
Config map[string]interface{}
// Upstreams are the dependencies the proxy should setup outgoing listeners for.
Upstreams Upstreams
// ProxyService is a pointer to the local proxy's service record for
// convenience. The proxies ID and name etc. can be read from there. It may be
// nil if the agent is starting up and hasn't registered the service yet. We
// ignore it when calculating the hash value since the only thing that effects
// the proxy's config is the ID of the target service which is already
// represented below.
ProxyService *NodeService `hash:"ignore"`
// TargetServiceID is the ID of the target service on the localhost. It may
// not exist yet since bootstrapping is allowed to happen in either order.
TargetServiceID string
}
// ConnectManagedProxyConfig represents the parts of the proxy config the agent
// needs to understand. It's bad UX to make the user specify these separately
// just to make parsing simpler for us so this encapsulates the fields in
// ConnectManagedProxy.Config that we care about. They are all optional anyway
// and this is used to decode them with mapstructure.
type ConnectManagedProxyConfig struct {
BindAddress string `mapstructure:"bind_address"`
BindPort int `mapstructure:"bind_port"`
LocalServiceAddress string `mapstructure:"local_service_address"`
LocalServicePort int `mapstructure:"local_service_port"`
}
// ParseConfig attempts to read the fields we care about from the otherwise
// opaque config map. They are all optional but it may fail if one is specified
// but an invalid value.
func (p *ConnectManagedProxy) ParseConfig() (*ConnectManagedProxyConfig, error) {
var cfg ConnectManagedProxyConfig
d, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{
ErrorUnused: false,
WeaklyTypedInput: true, // allow string port etc.
Result: &cfg,
})
if err != nil {
return nil, err
}
err = d.Decode(p.Config)
if err != nil {
return nil, err
}
return &cfg, nil
}

View File

@ -1,115 +0,0 @@
package structs
import (
"reflect"
"testing"
)
func TestConnectManagedProxy_ParseConfig(t *testing.T) {
tests := []struct {
name string
config map[string]interface{}
want *ConnectManagedProxyConfig
wantErr bool
}{
{
name: "empty",
config: nil,
want: &ConnectManagedProxyConfig{},
wantErr: false,
},
{
name: "specified",
config: map[string]interface{}{
"bind_address": "127.0.0.1",
"bind_port": 1234,
},
want: &ConnectManagedProxyConfig{
BindAddress: "127.0.0.1",
BindPort: 1234,
},
wantErr: false,
},
{
name: "stringy port",
config: map[string]interface{}{
"bind_address": "127.0.0.1",
"bind_port": "1234",
},
want: &ConnectManagedProxyConfig{
BindAddress: "127.0.0.1",
BindPort: 1234,
},
wantErr: false,
},
{
name: "empty addr",
config: map[string]interface{}{
"bind_address": "",
"bind_port": "1234",
},
want: &ConnectManagedProxyConfig{
BindAddress: "",
BindPort: 1234,
},
wantErr: false,
},
{
name: "empty port",
config: map[string]interface{}{
"bind_address": "127.0.0.1",
"bind_port": "",
},
want: nil,
wantErr: true,
},
{
name: "junk address",
config: map[string]interface{}{
"bind_address": 42,
"bind_port": "",
},
want: nil,
wantErr: true,
},
{
name: "zero port, missing addr",
config: map[string]interface{}{
"bind_port": 0,
},
want: &ConnectManagedProxyConfig{
BindPort: 0,
},
wantErr: false,
},
{
name: "extra fields present",
config: map[string]interface{}{
"bind_port": 1234,
"flamingos": true,
"upstream": []map[string]interface{}{
{"foo": "bar"},
},
},
want: &ConnectManagedProxyConfig{
BindPort: 1234,
},
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
p := &ConnectManagedProxy{
Config: tt.config,
}
got, err := p.ParseConfig()
if (err != nil) != tt.wantErr {
t.Errorf("ConnectManagedProxy.ParseConfig() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("ConnectManagedProxy.ParseConfig() = %v, want %v", got, tt.want)
}
})
}
}

View File

@ -1,14 +1,7 @@
package structs
import (
"encoding/json"
"fmt"
"reflect"
"github.com/hashicorp/go-multierror"
"github.com/mitchellh/copystructure"
"github.com/mitchellh/mapstructure"
"github.com/mitchellh/reflectwalk"
)
// ServiceDefinition is used to JSON decode the Service definitions. For
@ -27,19 +20,11 @@ type ServiceDefinition struct {
Weights *Weights
Token string
EnableTagOverride bool
// DEPRECATED (ProxyDestination) - remove this when removing ProxyDestination
// ProxyDestination is deprecated in favor of Proxy.DestinationServiceName
ProxyDestination string `json:",omitempty"`
// Proxy is the configuration set for Kind = connect-proxy. It is mandatory in
// that case and an error to be set for any other kind. This config is part of
// a proxy service definition and is distinct from but shares some fields with
// the Connect.Proxy which configures a managed proxy as part of the actual
// service's definition. This duplication is ugly but seemed better than the
// alternative which was to re-use the same struct fields for both cases even
// though the semantics are different and the non-shared fields make no sense
// in the other case. ProxyConfig may be a more natural name here, but it's
// confusing for the UX because one of the fields in ConnectProxyConfig is
// a proxy service definition. ProxyConfig may be a more natural name here, but
// it's confusing for the UX because one of the fields in ConnectProxyConfig is
// also called just "Config"
Proxy *ConnectProxyConfig
@ -69,10 +54,6 @@ func (s *ServiceDefinition) NodeService() *NodeService {
ns.Proxy.Upstreams[i].DestinationType = UpstreamDestTypeService
}
}
} else {
// DEPRECATED (ProxyDestination) - remove this when removing ProxyDestination
// Legacy convert ProxyDestination into a Proxy config
ns.Proxy.DestinationServiceName = s.ProxyDestination
}
if ns.ID == "" && ns.Service != "" {
ns.ID = ns.Service
@ -88,120 +69,6 @@ func (s *ServiceDefinition) NodeService() *NodeService {
return ns
}
// ConnectManagedProxy returns a ConnectManagedProxy from the ServiceDefinition
// if one is configured validly. Note that is may return nil if no proxy is
// configured and will also return nil error in this case too as it's an
// expected case. The error returned indicates that there was an attempt to
// configure a proxy made but that it was invalid input, e.g. invalid
// "exec_mode".
func (s *ServiceDefinition) ConnectManagedProxy() (*ConnectManagedProxy, error) {
if s.Connect == nil || s.Connect.Proxy == nil {
return nil, nil
}
// NodeService performs some simple normalization like copying ID from Name
// which we shouldn't hard code ourselves here...
ns := s.NodeService()
execMode, err := NewProxyExecMode(s.Connect.Proxy.ExecMode)
if err != nil {
return nil, err
}
// If upstreams were set in the config and NOT in the actual Upstreams field,
// extract them out to the new explicit Upstreams and unset in config to make
// transition smooth.
if deprecatedUpstreams, ok := s.Connect.Proxy.Config["upstreams"]; ok {
if len(s.Connect.Proxy.Upstreams) == 0 {
if slice, ok := deprecatedUpstreams.([]interface{}); ok {
for _, raw := range slice {
var oldU deprecatedBuiltInProxyUpstreamConfig
var decMeta mapstructure.Metadata
decCfg := &mapstructure.DecoderConfig{
Metadata: &decMeta,
Result: &oldU,
}
dec, err := mapstructure.NewDecoder(decCfg)
if err != nil {
// Just skip it - we never used to parse this so never failed
// invalid stuff till it hit the proxy. This is a best-effort
// attempt to not break existing service definitions so it's not the
// end of the world if we don't have exactly the same failure mode
// for invalid input.
continue
}
err = dec.Decode(raw)
if err != nil {
// same logic as above
continue
}
newT := UpstreamDestTypeService
if oldU.DestinationType == "prepared_query" {
newT = UpstreamDestTypePreparedQuery
}
u := Upstream{
DestinationType: newT,
DestinationName: oldU.DestinationName,
DestinationNamespace: oldU.DestinationNamespace,
Datacenter: oldU.DestinationDatacenter,
LocalBindAddress: oldU.LocalBindAddress,
LocalBindPort: oldU.LocalBindPort,
}
// Any unrecognized keys should be copied into the config map
if len(decMeta.Unused) > 0 {
u.Config = make(map[string]interface{})
// Paranoid type assertion - mapstructure would have errored if this
// wasn't safe but panics are bad...
if rawMap, ok := raw.(map[string]interface{}); ok {
for _, k := range decMeta.Unused {
u.Config[k] = rawMap[k]
}
}
}
s.Connect.Proxy.Upstreams = append(s.Connect.Proxy.Upstreams, u)
}
}
}
// Remove upstreams even if we didn't add them for consistency.
delete(s.Connect.Proxy.Config, "upstreams")
}
p := &ConnectManagedProxy{
ExecMode: execMode,
Command: s.Connect.Proxy.Command,
Config: s.Connect.Proxy.Config,
Upstreams: s.Connect.Proxy.Upstreams,
// ProxyService will be setup when the agent registers the configured
// proxies and starts them etc.
TargetServiceID: ns.ID,
}
// Ensure the Upstream type is defaulted
for i := range p.Upstreams {
if p.Upstreams[i].DestinationType == "" {
p.Upstreams[i].DestinationType = UpstreamDestTypeService
}
}
return p, nil
}
// deprecatedBuiltInProxyUpstreamConfig is a struct for extracting old
// connect/proxy.UpstreamConfiguration syntax upstreams from existing managed
// proxy configs to convert them to new first-class Upstreams.
type deprecatedBuiltInProxyUpstreamConfig struct {
LocalBindAddress string `json:"local_bind_address" hcl:"local_bind_address,attr" mapstructure:"local_bind_address"`
LocalBindPort int `json:"local_bind_port" hcl:"local_bind_port,attr" mapstructure:"local_bind_port"`
DestinationName string `json:"destination_name" hcl:"destination_name,attr" mapstructure:"destination_name"`
DestinationNamespace string `json:"destination_namespace" hcl:"destination_namespace,attr" mapstructure:"destination_namespace"`
DestinationType string `json:"destination_type" hcl:"destination_type,attr" mapstructure:"destination_type"`
DestinationDatacenter string `json:"destination_datacenter" hcl:"destination_datacenter,attr" mapstructure:"destination_datacenter"`
// ConnectTimeoutMs is removed explicitly because any additional config we
// find including this field should be put into the opaque Config map in
// Upstream.
}
// Validate validates the service definition. This also calls the underlying
// Validate method on the NodeService.
//
@ -210,22 +77,6 @@ type deprecatedBuiltInProxyUpstreamConfig struct {
func (s *ServiceDefinition) Validate() error {
var result error
if s.Kind == ServiceKindTypical {
if s.Connect != nil {
if s.Connect.Proxy != nil {
if s.Connect.Native {
result = multierror.Append(result, fmt.Errorf(
"Services that are Connect native may not have a proxy configuration"))
}
if s.Port == 0 {
result = multierror.Append(result, fmt.Errorf(
"Services with a Connect managed proxy must have a port set"))
}
}
}
}
// Validate the NodeService which covers a lot
if err := s.NodeService().Validate(); err != nil {
result = multierror.Append(result, err)
@ -250,177 +101,3 @@ func (s *ServiceDefinition) CheckTypes() (checks CheckTypes, err error) {
}
return checks, nil
}
// ServiceDefinitionConnectProxy is the connect proxy config within a service
// registration. Note this is duplicated in config.ServiceConnectProxy and needs
// to be kept in sync.
type ServiceDefinitionConnectProxy struct {
Command []string `json:",omitempty"`
ExecMode string `json:",omitempty"`
Config map[string]interface{} `json:",omitempty"`
Upstreams []Upstream `json:",omitempty"`
}
func (p *ServiceDefinitionConnectProxy) MarshalJSON() ([]byte, error) {
type typeCopy ServiceDefinitionConnectProxy
copy := typeCopy(*p)
// If we have config, then we want to run it through our proxyConfigWalker
// which is a reflectwalk implementation that attempts to turn arbitrary
// interface{} values into JSON-safe equivalents (more or less). This
// should always work because the config input is either HCL or JSON and
// both are JSON compatible.
if copy.Config != nil {
configCopyRaw, err := copystructure.Copy(copy.Config)
if err != nil {
return nil, err
}
configCopy, ok := configCopyRaw.(map[string]interface{})
if !ok {
// This should never fail because we KNOW the input type,
// but we don't ever want to risk the panic.
return nil, fmt.Errorf("internal error: config copy is not right type")
}
if err := reflectwalk.Walk(configCopy, &proxyConfigWalker{}); err != nil {
return nil, err
}
copy.Config = configCopy
}
return json.Marshal(&copy)
}
var typMapIfaceIface = reflect.TypeOf(map[interface{}]interface{}{})
// proxyConfigWalker implements interfaces for the reflectwalk package
// (github.com/mitchellh/reflectwalk) that can be used to automatically
// make the proxy configuration safe for JSON usage.
//
// Most of the implementation here is just keeping track of where we are
// in the reflectwalk process, so that we can replace values. The key logic
// is in Slice() and SliceElem().
//
// In particular we're looking to replace two cases the msgpack codec causes:
//
// 1.) String values get turned into byte slices. JSON will base64-encode
// this and we don't want that, so we convert them back to strings.
//
// 2.) Nested maps turn into map[interface{}]interface{}. JSON cannot
// encode this, so we need to turn it back into map[string]interface{}.
//
// This is tested via the TestServiceDefinitionConnectProxy_json test.
type proxyConfigWalker struct {
lastValue reflect.Value // lastValue of map, required for replacement
loc, lastLoc reflectwalk.Location // locations
cs []reflect.Value // container stack
csKey []reflect.Value // container keys (maps) stack
csData interface{} // current container data
sliceIndex []int // slice index stack (one for each slice in cs)
}
func (w *proxyConfigWalker) Enter(loc reflectwalk.Location) error {
w.lastLoc = w.loc
w.loc = loc
return nil
}
func (w *proxyConfigWalker) Exit(loc reflectwalk.Location) error {
w.loc = reflectwalk.None
w.lastLoc = reflectwalk.None
switch loc {
case reflectwalk.Map:
w.cs = w.cs[:len(w.cs)-1]
case reflectwalk.MapValue:
w.csKey = w.csKey[:len(w.csKey)-1]
case reflectwalk.Slice:
// Split any values that need to be split
w.cs = w.cs[:len(w.cs)-1]
case reflectwalk.SliceElem:
w.csKey = w.csKey[:len(w.csKey)-1]
w.sliceIndex = w.sliceIndex[:len(w.sliceIndex)-1]
}
return nil
}
func (w *proxyConfigWalker) Map(m reflect.Value) error {
w.cs = append(w.cs, m)
return nil
}
func (w *proxyConfigWalker) MapElem(m, k, v reflect.Value) error {
w.csData = k
w.csKey = append(w.csKey, k)
w.lastValue = v
return nil
}
func (w *proxyConfigWalker) Slice(v reflect.Value) error {
// If we find a []byte slice, it is an HCL-string converted to []byte.
// Convert it back to a Go string and replace the value so that JSON
// doesn't base64-encode it.
if v.Type() == reflect.TypeOf([]byte{}) {
resultVal := reflect.ValueOf(string(v.Interface().([]byte)))
switch w.lastLoc {
case reflectwalk.MapKey:
m := w.cs[len(w.cs)-1]
// Delete the old value
var zero reflect.Value
m.SetMapIndex(w.csData.(reflect.Value), zero)
// Set the new key with the existing value
m.SetMapIndex(resultVal, w.lastValue)
// Set the key to be the new key
w.csData = resultVal
case reflectwalk.MapValue:
// If we're in a map, then the only way to set a map value is
// to set it directly.
m := w.cs[len(w.cs)-1]
mk := w.csData.(reflect.Value)
m.SetMapIndex(mk, resultVal)
case reflectwalk.Slice:
s := w.cs[len(w.cs)-1]
s.Index(w.sliceIndex[len(w.sliceIndex)-1]).Set(resultVal)
default:
return fmt.Errorf("cannot convert []byte")
}
}
w.cs = append(w.cs, v)
return nil
}
func (w *proxyConfigWalker) SliceElem(i int, elem reflect.Value) error {
w.csKey = append(w.csKey, reflect.ValueOf(i))
w.sliceIndex = append(w.sliceIndex, i)
// We're looking specifically for map[interface{}]interface{}, but the
// values in a slice are wrapped up in interface{} so we need to unwrap
// that first. Therefore, we do three checks: 1.) is it valid? so we
// don't panic, 2.) is it an interface{}? so we can unwrap it and 3.)
// after unwrapping the interface do we have the map we expect?
if !elem.IsValid() {
return nil
}
if elem.Kind() != reflect.Interface {
return nil
}
if inner := elem.Elem(); inner.Type() == typMapIfaceIface {
// map[interface{}]interface{}, attempt to weakly decode into string keys
var target map[string]interface{}
if err := mapstructure.WeakDecode(inner.Interface(), &target); err != nil {
return err
}
elem.Set(reflect.ValueOf(target))
}
return nil
}

View File

@ -1,7 +1,6 @@
package structs
import (
"encoding/json"
"fmt"
"strings"
"testing"
@ -69,40 +68,6 @@ func TestServiceDefinitionValidate(t *testing.T) {
func(x *ServiceDefinition) {},
"",
},
{
"managed proxy with a port set",
func(x *ServiceDefinition) {
x.Port = 8080
x.Connect = &ServiceConnect{
Proxy: &ServiceDefinitionConnectProxy{},
}
},
"",
},
{
"managed proxy with no port set",
func(x *ServiceDefinition) {
x.Port = 0 // Explicitly unset this as the test default sets it sanely
x.Connect = &ServiceConnect{
Proxy: &ServiceDefinitionConnectProxy{},
}
},
"must have a port",
},
{
"managed proxy with native set",
func(x *ServiceDefinition) {
x.Port = 8080
x.Connect = &ServiceConnect{
Native: true,
Proxy: &ServiceDefinitionConnectProxy{},
}
},
"may not have a proxy",
},
}
for _, tc := range cases {
@ -121,86 +86,3 @@ func TestServiceDefinitionValidate(t *testing.T) {
})
}
}
func TestServiceDefinitionConnectProxy_json(t *testing.T) {
cases := []struct {
Name string
Input *ServiceDefinitionConnectProxy
Expected string
Err string
}{
{
"no config",
&ServiceDefinitionConnectProxy{
Command: []string{"foo"},
ExecMode: "bar",
},
`
{
"Command": [
"foo"
],
"ExecMode": "bar"
}
`,
"",
},
{
"basic config",
&ServiceDefinitionConnectProxy{
Config: map[string]interface{}{
"foo": "bar",
},
},
`
{
"Config": {
"foo": "bar"
}
}
`,
"",
},
{
"config with upstreams",
&ServiceDefinitionConnectProxy{
Config: map[string]interface{}{
"upstreams": []interface{}{
map[interface{}]interface{}{
"key": []byte("value"),
},
},
},
},
`
{
"Config": {
"upstreams": [
{
"key": "value"
}
]
}
}
`,
"",
},
}
for _, tc := range cases {
t.Run(tc.Name, func(t *testing.T) {
require := require.New(t)
result, err := json.MarshalIndent(tc.Input, "", "\t")
t.Logf("error: %s", err)
require.Equal(err != nil, tc.Err != "")
if err != nil {
require.Contains(strings.ToLower(err.Error()), strings.ToLower(tc.Err))
return
}
require.Equal(strings.TrimSpace(tc.Expected), strings.TrimSpace(string(result)))
})
}
}

View File

@ -673,10 +673,8 @@ type ServiceNode struct {
ServiceMeta map[string]string
ServicePort int
ServiceEnableTagOverride bool
// DEPRECATED (ProxyDestination) - remove this when removing ProxyDestination
ServiceProxyDestination string `bexpr:"-"`
ServiceProxy ConnectProxyConfig
ServiceConnect ServiceConnect
ServiceProxy ConnectProxyConfig
ServiceConnect ServiceConnect
RaftIndex `bexpr:"-"`
}
@ -714,10 +712,8 @@ func (s *ServiceNode) PartialClone() *ServiceNode {
ServiceMeta: nsmeta,
ServiceWeights: s.ServiceWeights,
ServiceEnableTagOverride: s.ServiceEnableTagOverride,
// DEPRECATED (ProxyDestination) - remove this when removing ProxyDestination
ServiceProxyDestination: s.ServiceProxyDestination,
ServiceProxy: s.ServiceProxy,
ServiceConnect: s.ServiceConnect,
ServiceProxy: s.ServiceProxy,
ServiceConnect: s.ServiceConnect,
RaftIndex: RaftIndex{
CreateIndex: s.CreateIndex,
ModifyIndex: s.ModifyIndex,
@ -817,29 +813,10 @@ type NodeService struct {
Weights *Weights
EnableTagOverride bool
// ProxyDestination is DEPRECATED in favor of Proxy.DestinationServiceName.
// It's retained since this struct is used to parse input for
// /catalog/register but nothing else internal should use it - once
// request/config definitions are passes all internal uses of NodeService
// should have this empty and use the Proxy.DestinationServiceNames field
// below.
//
// It used to store the name of the service that this service is a Connect
// proxy for. This is only valid if Kind is "connect-proxy". The destination
// may be a service that isn't present in the catalog. This is expected and
// allowed to allow for proxies to come up earlier than their target services.
// DEPRECATED (ProxyDestination) - remove this when removing ProxyDestination
ProxyDestination string `bexpr:"-"`
// Proxy is the configuration set for Kind = connect-proxy. It is mandatory in
// that case and an error to be set for any other kind. This config is part of
// a proxy service definition and is distinct from but shares some fields with
// the Connect.Proxy which configures a managed proxy as part of the actual
// service's definition. This duplication is ugly but seemed better than the
// alternative which was to re-use the same struct fields for both cases even
// though the semantics are different and the non-shred fields make no sense
// in the other case. ProxyConfig may be a more natural name here, but it's
// confusing for the UX because one of the fields in ConnectProxyConfig is
// a proxy service definition. ProxyConfig may be a more natural name here, but
// it's confusing for the UX because one of the fields in ConnectProxyConfig is
// also called just "Config"
Proxy ConnectProxyConfig
@ -890,12 +867,6 @@ type ServiceConnect struct {
// Native is true when this service can natively understand Connect.
Native bool `json:",omitempty"`
// DEPRECATED(managed-proxies) - Remove with the rest of managed proxies
// Proxy configures a connect proxy instance for the service. This is
// only used for agent service definitions and is invalid for non-agent
// (catalog API) definitions.
Proxy *ServiceDefinitionConnectProxy `json:",omitempty" bexpr:"-"`
// SidecarService is a nested Service Definition to register at the same time.
// It's purely a convenience mechanism to allow specifying a sidecar service
// along with the application service definition. It's nested nature allows
@ -927,13 +898,6 @@ func (s *NodeService) Validate() error {
// ConnectProxy validation
if s.Kind == ServiceKindConnectProxy {
// DEPRECATED (ProxyDestination) - remove this when removing ProxyDestination
// Fixup legacy requests that specify the ProxyDestination still
if s.ProxyDestination != "" && s.Proxy.DestinationServiceName == "" {
s.Proxy.DestinationServiceName = s.ProxyDestination
s.ProxyDestination = ""
}
if strings.TrimSpace(s.Proxy.DestinationServiceName) == "" {
result = multierror.Append(result, fmt.Errorf(
"Proxy.DestinationServiceName must be non-empty for Connect proxy "+
@ -996,10 +960,6 @@ func (s *NodeService) Validate() error {
result = multierror.Append(result, fmt.Errorf("Mesh Gateways cannot have a sidecar service defined"))
}
if s.Connect.Proxy != nil {
result = multierror.Append(result, fmt.Errorf("The Connect.Proxy configuration is invalid for Mesh Gateways"))
}
if s.Proxy.DestinationServiceName != "" {
result = multierror.Append(result, fmt.Errorf("The Proxy.DestinationServiceName configuration is invalid for Mesh Gateways"))
}
@ -1033,10 +993,6 @@ func (s *NodeService) Validate() error {
result = multierror.Append(result, fmt.Errorf(
"A SidecarService cannot have a nested SidecarService"))
}
if s.Connect.SidecarService.Connect.Proxy != nil {
result = multierror.Append(result, fmt.Errorf(
"A SidecarService cannot have a managed proxy"))
}
}
}
@ -1091,7 +1047,6 @@ func (s *ServiceNode) IsSameService(other *ServiceNode) bool {
!reflect.DeepEqual(s.ServiceMeta, other.ServiceMeta) ||
!reflect.DeepEqual(s.ServiceWeights, other.ServiceWeights) ||
s.ServiceEnableTagOverride != other.ServiceEnableTagOverride ||
s.ServiceProxyDestination != other.ServiceProxyDestination ||
!reflect.DeepEqual(s.ServiceProxy, other.ServiceProxy) ||
!reflect.DeepEqual(s.ServiceConnect, other.ServiceConnect) {
return false
@ -1111,11 +1066,6 @@ func (s *NodeService) ToServiceNode(node string) *ServiceNode {
theWeights = *s.Weights
}
}
// DEPRECATED (ProxyDestination) - remove this when removing ProxyDestination
legacyProxyDest := s.Proxy.DestinationServiceName
if legacyProxyDest == "" {
legacyProxyDest = s.ProxyDestination
}
return &ServiceNode{
// Skip ID, see ServiceNode definition.
Node: node,
@ -1132,7 +1082,6 @@ func (s *NodeService) ToServiceNode(node string) *ServiceNode {
ServiceWeights: theWeights,
ServiceEnableTagOverride: s.EnableTagOverride,
ServiceProxy: s.Proxy,
ServiceProxyDestination: legacyProxyDest,
ServiceConnect: s.Connect,
RaftIndex: RaftIndex{
CreateIndex: s.CreateIndex,

View File

@ -165,12 +165,6 @@ func testServiceNode(t *testing.T) *ServiceNode {
ModifyIndex: 2,
},
ServiceProxy: TestConnectProxyConfig(t),
// DEPRECATED (ProxyDestination) - remove this when removing ProxyDestination
// ServiceProxyDestination is deprecated bit must be set consistently with
// the value of ServiceProxy.DestinationServiceName otherwise a round-trip
// through ServiceNode -> NodeService and back will not match and fail
// tests.
ServiceProxyDestination: "web",
ServiceConnect: ServiceConnect{
Native: true,
},
@ -249,7 +243,6 @@ func TestStructs_ServiceNode_IsSameService(t *testing.T) {
serviceTags := sn.ServiceTags
serviceWeights := Weights{Passing: 2, Warning: 1}
sn.ServiceWeights = serviceWeights
serviceProxyDestination := sn.ServiceProxyDestination
serviceProxy := sn.ServiceProxy
serviceConnect := sn.ServiceConnect
serviceTaggedAddresses := sn.ServiceTaggedAddresses
@ -282,7 +275,6 @@ func TestStructs_ServiceNode_IsSameService(t *testing.T) {
check(func() { other.ServiceMeta = map[string]string{"my": "meta"} }, func() { other.ServiceMeta = serviceMeta })
check(func() { other.ServiceName = "duck" }, func() { other.ServiceName = serviceName })
check(func() { other.ServicePort = 65534 }, func() { other.ServicePort = servicePort })
check(func() { other.ServiceProxyDestination = "duck" }, func() { other.ServiceProxyDestination = serviceProxyDestination })
check(func() { other.ServiceTags = []string{"new", "tags"} }, func() { other.ServiceTags = serviceTags })
check(func() { other.ServiceWeights = Weights{Passing: 42, Warning: 41} }, func() { other.ServiceWeights = serviceWeights })
check(func() { other.ServiceProxy = ConnectProxyConfig{} }, func() { other.ServiceProxy = serviceProxy })
@ -385,10 +377,6 @@ func TestStructs_NodeService_ValidateMeshGateway(t *testing.T) {
func(x *NodeService) { x.Connect.SidecarService = &ServiceDefinition{} },
"cannot have a sidecar service",
},
"connect-managed-proxy": testCase{
func(x *NodeService) { x.Connect.Proxy = &ServiceDefinitionConnectProxy{} },
"Connect.Proxy configuration is invalid",
},
"proxy-destination-name": testCase{
func(x *NodeService) { x.Proxy.DestinationServiceName = "foo" },
"Proxy.DestinationServiceName configuration is invalid",
@ -439,19 +427,19 @@ func TestStructs_NodeService_ValidateConnectProxy(t *testing.T) {
},
{
"connect-proxy: no ProxyDestination",
"connect-proxy: no Proxy.DestinationServiceName",
func(x *NodeService) { x.Proxy.DestinationServiceName = "" },
"Proxy.DestinationServiceName must be",
},
{
"connect-proxy: whitespace ProxyDestination",
"connect-proxy: whitespace Proxy.DestinationServiceName",
func(x *NodeService) { x.Proxy.DestinationServiceName = " " },
"Proxy.DestinationServiceName must be",
},
{
"connect-proxy: valid ProxyDestination",
"connect-proxy: valid Proxy.DestinationServiceName",
func(x *NodeService) { x.Proxy.DestinationServiceName = "hello" },
"",
},
@ -713,16 +701,6 @@ func TestStructs_NodeService_ValidateSidecarService(t *testing.T) {
},
"SidecarService cannot have a nested SidecarService",
},
{
"Sidecar can't have managed proxy",
func(x *NodeService) {
x.Connect.SidecarService.Connect = &ServiceConnect{
Proxy: &ServiceDefinitionConnectProxy{},
}
},
"SidecarService cannot have a managed proxy",
},
}
for _, tc := range cases {

View File

@ -377,9 +377,6 @@ func TestConfig(sources ...config.Source) *config.RuntimeConfig {
fmt.Println("WARNING:", w)
}
// Disable connect proxy execution since it causes all kinds of problems with
// self-executing tests etc.
cfg.ConnectTestDisableManagedProxies = true
// Effectively disables the delay after root rotation before requesting CSRs
// to make test deterministic. 0 results in default jitter being applied but a
// tiny delay is effectively thre same.

View File

@ -30,23 +30,6 @@ const (
ServiceKindMeshGateway ServiceKind = "mesh-gateway"
)
// ProxyExecMode is the execution mode for a managed Connect proxy.
type ProxyExecMode string
const (
// ProxyExecModeDaemon indicates that the proxy command should be long-running
// and should be started and supervised by the agent until it's target service
// is deregistered.
ProxyExecModeDaemon ProxyExecMode = "daemon"
// ProxyExecModeScript indicates that the proxy command should be invoke to
// completion on each change to the configuration of lifecycle event. The
// script typically fetches the config and certificates from the agent API and
// then configures an externally managed daemon, perhaps starting and stopping
// it if necessary.
ProxyExecModeScript ProxyExecMode = "script"
)
// UpstreamDestType is the type of upstream discovery mechanism.
type UpstreamDestType string
@ -90,13 +73,11 @@ type AgentService struct {
TaggedAddresses map[string]ServiceAddress `json:",omitempty"`
Weights AgentWeights
EnableTagOverride bool
CreateIndex uint64 `json:",omitempty" bexpr:"-"`
ModifyIndex uint64 `json:",omitempty" bexpr:"-"`
ContentHash string `json:",omitempty" bexpr:"-"`
// DEPRECATED (ProxyDestination) - remove this field
ProxyDestination string `json:",omitempty" bexpr:"-"`
Proxy *AgentServiceConnectProxyConfig `json:",omitempty"`
Connect *AgentServiceConnect `json:",omitempty"`
CreateIndex uint64 `json:",omitempty" bexpr:"-"`
ModifyIndex uint64 `json:",omitempty" bexpr:"-"`
ContentHash string `json:",omitempty" bexpr:"-"`
Proxy *AgentServiceConnectProxyConfig `json:",omitempty"`
Connect *AgentServiceConnect `json:",omitempty"`
}
// AgentServiceChecksInfo returns information about a Service and its checks
@ -109,19 +90,9 @@ type AgentServiceChecksInfo struct {
// AgentServiceConnect represents the Connect configuration of a service.
type AgentServiceConnect struct {
Native bool `json:",omitempty"`
Proxy *AgentServiceConnectProxy `json:",omitempty" bexpr:"-"`
SidecarService *AgentServiceRegistration `json:",omitempty" bexpr:"-"`
}
// AgentServiceConnectProxy represents the Connect Proxy configuration of a
// service.
type AgentServiceConnectProxy struct {
ExecMode ProxyExecMode `json:",omitempty"`
Command []string `json:",omitempty"`
Config map[string]interface{} `json:",omitempty" bexpr:"-"`
Upstreams []Upstream `json:",omitempty"`
}
// AgentServiceConnectProxyConfig is the proxy configuration in a connect-proxy
// ServiceDefinition or response.
type AgentServiceConnectProxyConfig struct {
@ -176,10 +147,8 @@ type AgentServiceRegistration struct {
Weights *AgentWeights `json:",omitempty"`
Check *AgentServiceCheck
Checks AgentServiceChecks
// DEPRECATED (ProxyDestination) - remove this field
ProxyDestination string `json:",omitempty"`
Proxy *AgentServiceConnectProxyConfig `json:",omitempty"`
Connect *AgentServiceConnect `json:",omitempty"`
Proxy *AgentServiceConnectProxyConfig `json:",omitempty"`
Connect *AgentServiceConnect `json:",omitempty"`
}
// AgentCheckRegistration is used to register a new check
@ -284,12 +253,8 @@ type ConnectProxyConfig struct {
TargetServiceID string
TargetServiceName string
ContentHash string
// DEPRECATED(managed-proxies) - this struct is re-used for sidecar configs
// but they don't need ExecMode or Command
ExecMode ProxyExecMode `json:",omitempty"`
Command []string `json:",omitempty"`
Config map[string]interface{} `bexpr:"-"`
Upstreams []Upstream
Config map[string]interface{} `bexpr:"-"`
Upstreams []Upstream
}
// Upstream is the response structure for a proxy upstream configuration.
@ -824,31 +789,6 @@ func (a *Agent) ConnectCALeaf(serviceID string, q *QueryOptions) (*LeafCert, *Qu
return &out, qm, nil
}
// ConnectProxyConfig gets the configuration for a local managed proxy instance.
//
// Note that this uses an unconventional blocking mechanism since it's
// agent-local state. That means there is no persistent raft index so we block
// based on object hash instead.
func (a *Agent) ConnectProxyConfig(proxyServiceID string, q *QueryOptions) (*ConnectProxyConfig, *QueryMeta, error) {
r := a.c.newRequest("GET", "/v1/agent/connect/proxy/"+proxyServiceID)
r.setQueryOptions(q)
rtt, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var out ConnectProxyConfig
if err := decodeBody(resp, &out); err != nil {
return nil, nil, err
}
return &out, qm, nil
}
// EnableServiceMaintenance toggles service maintenance mode on
// for the given service ID.
func (a *Agent) EnableServiceMaintenance(serviceID, reason string) error {

View File

@ -274,144 +274,6 @@ func TestAPI_AgentServicesWithFilter(t *testing.T) {
require.True(t, ok)
}
func TestAPI_AgentServices_ManagedConnectProxy(t *testing.T) {
t.Parallel()
c, s := makeClient(t)
defer s.Stop()
agent := c.Agent()
reg := &AgentServiceRegistration{
Name: "foo",
Tags: []string{"bar", "baz"},
Port: 8000,
Check: &AgentServiceCheck{
TTL: "15s",
},
Connect: &AgentServiceConnect{
Proxy: &AgentServiceConnectProxy{
ExecMode: ProxyExecModeScript,
Command: []string{"foo.rb"},
Config: map[string]interface{}{
"foo": "bar",
},
Upstreams: []Upstream{{
DestinationType: "prepared_query",
DestinationName: "bar",
LocalBindPort: 9191,
}},
},
},
}
if err := agent.ServiceRegister(reg); err != nil {
t.Fatalf("err: %v", err)
}
services, err := agent.Services()
if err != nil {
t.Fatalf("err: %v", err)
}
if _, ok := services["foo"]; !ok {
t.Fatalf("missing service: %v", services)
}
checks, err := agent.Checks()
if err != nil {
t.Fatalf("err: %v", err)
}
chk, ok := checks["service:foo"]
if !ok {
t.Fatalf("missing check: %v", checks)
}
// Checks should default to critical
if chk.Status != HealthCritical {
t.Fatalf("Bad: %#v", chk)
}
// Proxy config should be correct
require.Equal(t, reg.Connect, services["foo"].Connect)
if err := agent.ServiceDeregister("foo"); err != nil {
t.Fatalf("err: %v", err)
}
}
func TestAPI_AgentServices_ManagedConnectProxyDeprecatedUpstreams(t *testing.T) {
t.Parallel()
c, s := makeClient(t)
defer s.Stop()
agent := c.Agent()
s.WaitForSerfCheck(t)
reg := &AgentServiceRegistration{
Name: "foo",
Tags: []string{"bar", "baz"},
Port: 8000,
Check: &AgentServiceCheck{
TTL: "15s",
},
Connect: &AgentServiceConnect{
Proxy: &AgentServiceConnectProxy{
ExecMode: ProxyExecModeScript,
Command: []string{"foo.rb"},
Config: map[string]interface{}{
"foo": "bar",
"upstreams": []interface{}{
map[string]interface{}{
"destination_type": "prepared_query",
"destination_name": "bar",
"local_bind_port": 9191,
"connect_timeout_ms": 1000,
},
},
},
},
},
}
if err := agent.ServiceRegister(reg); err != nil {
t.Fatalf("err: %v", err)
}
services, err := agent.Services()
if err != nil {
t.Fatalf("err: %v", err)
}
if _, ok := services["foo"]; !ok {
t.Fatalf("missing service: %v", services)
}
checks, err := agent.Checks()
if err != nil {
t.Fatalf("err: %v", err)
}
chk, ok := checks["service:foo"]
if !ok {
t.Fatalf("missing check: %v", checks)
}
// Checks should default to critical
if chk.Status != HealthCritical {
t.Fatalf("Bad: %#v", chk)
}
// Proxy config should be present in response, minus the upstreams
delete(reg.Connect.Proxy.Config, "upstreams")
// Upstreams should be translated into proper field
reg.Connect.Proxy.Upstreams = []Upstream{{
DestinationType: "prepared_query",
DestinationName: "bar",
LocalBindPort: 9191,
Config: map[string]interface{}{
"connect_timeout_ms": float64(1000),
},
}}
require.Equal(t, reg.Connect, services["foo"].Connect)
if err := agent.ServiceDeregister("foo"); err != nil {
t.Fatalf("err: %v", err)
}
}
func TestAPI_AgentServices_SidecarService(t *testing.T) {
t.Parallel()
c, s := makeClient(t)
@ -779,7 +641,7 @@ func TestAPI_AgentService(t *testing.T) {
ID: "foo",
Service: "foo",
Tags: []string{"bar", "baz"},
ContentHash: "325d9e4891696c34",
ContentHash: "6b13684bfe179e67",
Port: 8000,
Weights: AgentWeights{
Passing: 1,
@ -1540,55 +1402,6 @@ func TestAPI_AgentConnectAuthorize(t *testing.T) {
require.Equal(auth.Reason, "ACLs disabled, access is allowed by default")
}
func TestAPI_AgentConnectProxyConfig(t *testing.T) {
t.Parallel()
c, s := makeClientWithConfig(t, nil, func(c *testutil.TestServerConfig) {
// Force auto port range to 1 port so we have deterministic response.
c.Ports.ProxyMinPort = 20000
c.Ports.ProxyMaxPort = 20000
})
defer s.Stop()
agent := c.Agent()
reg := &AgentServiceRegistration{
Name: "foo",
Tags: []string{"bar", "baz"},
Port: 8000,
Connect: &AgentServiceConnect{
Proxy: &AgentServiceConnectProxy{
Command: []string{"consul", "connect", "proxy"},
Config: map[string]interface{}{
"foo": "bar",
},
Upstreams: testUpstreams(t),
},
},
}
if err := agent.ServiceRegister(reg); err != nil {
t.Fatalf("err: %v", err)
}
config, qm, err := agent.ConnectProxyConfig("foo-proxy", nil)
require.NoError(t, err)
expectConfig := &ConnectProxyConfig{
ProxyServiceID: "foo-proxy",
TargetServiceID: "foo",
TargetServiceName: "foo",
ContentHash: "b58a7e24130d3058",
ExecMode: "daemon",
Command: []string{"consul", "connect", "proxy"},
Config: map[string]interface{}{
"bind_address": "127.0.0.1",
"bind_port": float64(20000),
"foo": "bar",
"local_service_address": "127.0.0.1:8000",
},
Upstreams: testExpectUpstreamsWithDefaults(t, reg.Connect.Proxy.Upstreams),
}
require.Equal(t, expectConfig, config)
require.Equal(t, expectConfig.ContentHash, qm.LastContentHash)
}
func TestAPI_AgentHealthService(t *testing.T) {
t.Parallel()
c, s := makeClient(t)

View File

@ -42,12 +42,10 @@ type CatalogService struct {
ServicePort int
ServiceWeights Weights
ServiceEnableTagOverride bool
// DEPRECATED (ProxyDestination) - remove the next comment!
// We forgot to ever add ServiceProxyDestination here so no need to deprecate!
ServiceProxy *AgentServiceConnectProxyConfig
CreateIndex uint64
Checks HealthChecks
ModifyIndex uint64
ServiceProxy *AgentServiceConnectProxyConfig
CreateIndex uint64
Checks HealthChecks
ModifyIndex uint64
}
type CatalogNode struct {

View File

@ -532,11 +532,6 @@ func TestAPI_CatalogConnect(t *testing.T) {
proxy := proxyReg.Service
// DEPRECATED (ProxyDestination) - remove this case when the field is removed
deprecatedProxyReg := testUnmanagedProxyRegistration(t)
deprecatedProxyReg.Service.ProxyDestination = deprecatedProxyReg.Service.Proxy.DestinationServiceName
deprecatedProxyReg.Service.Proxy = nil
service := &AgentService{
ID: proxyReg.Service.Proxy.DestinationServiceID,
Service: proxyReg.Service.Proxy.DestinationServiceName,
@ -563,10 +558,6 @@ func TestAPI_CatalogConnect(t *testing.T) {
if _, err := catalog.Register(reg, nil); err != nil {
r.Fatal(err)
}
// First try to register deprecated proxy, shouldn't error
if _, err := catalog.Register(deprecatedProxyReg, nil); err != nil {
r.Fatal(err)
}
if _, err := catalog.Register(proxyReg, nil); err != nil {
r.Fatal(err)
}

View File

@ -16,17 +16,16 @@ var watchFuncFactory map[string]watchFactory
func init() {
watchFuncFactory = map[string]watchFactory{
"key": keyWatch,
"keyprefix": keyPrefixWatch,
"services": servicesWatch,
"nodes": nodesWatch,
"service": serviceWatch,
"checks": checksWatch,
"event": eventWatch,
"connect_roots": connectRootsWatch,
"connect_leaf": connectLeafWatch,
"connect_proxy_config": connectProxyConfigWatch,
"agent_service": agentServiceWatch,
"key": keyWatch,
"keyprefix": keyPrefixWatch,
"services": servicesWatch,
"nodes": nodesWatch,
"service": serviceWatch,
"checks": checksWatch,
"event": eventWatch,
"connect_roots": connectRootsWatch,
"connect_leaf": connectLeafWatch,
"agent_service": agentServiceWatch,
}
}
@ -281,33 +280,6 @@ func connectLeafWatch(params map[string]interface{}) (WatcherFunc, error) {
return fn, nil
}
// connectProxyConfigWatch is used to watch for changes to Connect managed proxy
// configuration. Note that this state is agent-local so the watch mechanism
// uses `hash` rather than `index` for deciding whether to block.
func connectProxyConfigWatch(params map[string]interface{}) (WatcherFunc, error) {
// We don't support consistency modes since it's agent local data
var proxyServiceID string
if err := assignValue(params, "proxy_service_id", &proxyServiceID); err != nil {
return nil, err
}
fn := func(p *Plan) (BlockingParamVal, interface{}, error) {
agent := p.client.Agent()
opts := makeQueryOptionsWithContext(p, false)
defer p.cancelFunc()
config, _, err := agent.ConnectProxyConfig(proxyServiceID, &opts)
if err != nil {
return nil, nil, err
}
// Return string ContentHash since we don't have Raft indexes to block on.
return WaitHashVal(config.ContentHash), config, err
}
return fn, nil
}
// agentServiceWatch is used to watch for changes to a single service instance
// on the local agent. Note that this state is agent-local so the watch
// mechanism uses `hash` rather than `index` for deciding whether to block.

View File

@ -823,69 +823,6 @@ func TestConnectLeafWatch(t *testing.T) {
wg.Wait()
}
func TestConnectProxyConfigWatch(t *testing.T) {
t.Parallel()
c, s := makeClient(t)
defer s.Stop()
// Register a local agent service with a managed proxy
reg := &api.AgentServiceRegistration{
Name: "web",
Port: 8080,
Connect: &api.AgentServiceConnect{
Proxy: &api.AgentServiceConnectProxy{
Config: map[string]interface{}{
"foo": "bar",
},
},
},
}
agent := c.Agent()
err := agent.ServiceRegister(reg)
require.NoError(t, err)
invoke := makeInvokeCh()
plan := mustParse(t, `{"type":"connect_proxy_config", "proxy_service_id":"web-proxy"}`)
plan.HybridHandler = func(blockParamVal watch.BlockingParamVal, raw interface{}) {
if raw == nil {
return // ignore
}
v, ok := raw.(*api.ConnectProxyConfig)
if !ok || v == nil {
return // ignore
}
invoke <- nil
}
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
time.Sleep(20 * time.Millisecond)
// Change the proxy's config
reg.Connect.Proxy.Config["foo"] = "buzz"
reg.Connect.Proxy.Config["baz"] = "qux"
err := agent.ServiceRegister(reg)
require.NoError(t, err)
}()
wg.Add(1)
go func() {
defer wg.Done()
if err := plan.Run(s.HTTPAddr); err != nil {
t.Fatalf("err: %v", err)
}
}()
if err := <-invoke; err != nil {
t.Fatalf("err: %v", err)
}
plan.Stop()
wg.Wait()
}
func TestAgentServiceWatch(t *testing.T) {
t.Parallel()
c, s := makeClient(t)

View File

@ -12,7 +12,6 @@ import (
"github.com/mitchellh/mapstructure"
proxyAgent "github.com/hashicorp/consul/agent/proxyprocess"
"github.com/hashicorp/consul/agent/xds"
"github.com/hashicorp/consul/api"
proxyCmd "github.com/hashicorp/consul/command/connect/proxy"
@ -212,21 +211,14 @@ func (c *cmd) Run(args []string) int {
// Load the proxy ID and token from env vars if they're set
if c.proxyID == "" {
c.proxyID = os.Getenv(proxyAgent.EnvProxyID)
c.proxyID = os.Getenv("CONNECT_PROXY_ID")
}
if c.sidecarFor == "" {
c.sidecarFor = os.Getenv(proxyAgent.EnvSidecarFor)
c.sidecarFor = os.Getenv("CONNECT_SIDECAR_FOR")
}
if c.grpcAddr == "" {
c.grpcAddr = os.Getenv(api.GRPCAddrEnvName)
}
if c.http.Token() == "" && c.http.TokenFile() == "" {
// Extra check needed since CONSUL_HTTP_TOKEN has not been consulted yet but
// calling SetToken with empty will force that to override the
if proxyToken := os.Getenv(proxyAgent.EnvProxyToken); proxyToken != "" {
c.http.SetToken(proxyToken)
}
}
// Setup Consul client
client, err := c.http.APIClient()

View File

@ -13,7 +13,6 @@ import (
"strconv"
"strings"
proxyAgent "github.com/hashicorp/consul/agent/proxyprocess"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/command/flags"
proxyImpl "github.com/hashicorp/consul/connect/proxy"
@ -124,13 +123,10 @@ func (c *cmd) Run(args []string) int {
// Load the proxy ID and token from env vars if they're set
if c.proxyID == "" {
c.proxyID = os.Getenv(proxyAgent.EnvProxyID)
c.proxyID = os.Getenv("CONNECT_PROXY_ID")
}
if c.sidecarFor == "" {
c.sidecarFor = os.Getenv(proxyAgent.EnvSidecarFor)
}
if c.http.Token() == "" && c.http.TokenFile() == "" {
c.http.SetToken(os.Getenv(proxyAgent.EnvProxyToken))
c.sidecarFor = os.Getenv("CONNECT_SIDECAR_FOR")
}
// Setup the log outputs

View File

@ -79,104 +79,6 @@ func TestUpstreamResolverFuncFromClient(t *testing.T) {
}
}
func TestAgentConfigWatcherManagedProxy(t *testing.T) {
t.Parallel()
a := agent.NewTestAgent(t, "agent_smith", `
connect {
enabled = true
proxy {
allow_managed_api_registration = true
}
}
`)
defer a.Shutdown()
client := a.Client()
agent := client.Agent()
// Register a local agent service with a managed proxy
reg := &api.AgentServiceRegistration{
Name: "web",
Port: 8080,
Connect: &api.AgentServiceConnect{
Proxy: &api.AgentServiceConnectProxy{
Config: map[string]interface{}{
"bind_address": "10.10.10.10",
"bind_port": 1010,
"local_service_address": "127.0.0.1:5000",
"handshake_timeout_ms": 999,
},
Upstreams: []api.Upstream{
{
DestinationName: "db",
LocalBindPort: 9191,
},
},
},
},
}
err := agent.ServiceRegister(reg)
require.NoError(t, err)
w, err := NewAgentConfigWatcher(client, "web-proxy",
log.New(os.Stderr, "", log.LstdFlags))
require.NoError(t, err)
cfg := testGetConfigValTimeout(t, w, 500*time.Millisecond)
expectCfg := &Config{
ProxiedServiceName: "web",
ProxiedServiceNamespace: "default",
PublicListener: PublicListenerConfig{
BindAddress: "10.10.10.10",
BindPort: 1010,
LocalServiceAddress: "127.0.0.1:5000",
HandshakeTimeoutMs: 999,
LocalConnectTimeoutMs: 1000, // from applyDefaults
},
Upstreams: []UpstreamConfig{
{
DestinationName: "db",
DestinationNamespace: "default",
DestinationType: "service",
LocalBindPort: 9191,
LocalBindAddress: "127.0.0.1",
},
},
}
assert.Equal(t, expectCfg, cfg)
// Now keep watching and update the config.
go func() {
// Wait for watcher to be watching
time.Sleep(20 * time.Millisecond)
reg.Connect.Proxy.Upstreams = append(reg.Connect.Proxy.Upstreams,
api.Upstream{
DestinationName: "cache",
LocalBindPort: 9292,
LocalBindAddress: "127.10.10.10",
})
reg.Connect.Proxy.Config["local_connect_timeout_ms"] = 444
err := agent.ServiceRegister(reg)
require.NoError(t, err)
}()
cfg = testGetConfigValTimeout(t, w, 2*time.Second)
expectCfg.Upstreams = append(expectCfg.Upstreams, UpstreamConfig{
DestinationName: "cache",
DestinationNamespace: "default",
DestinationType: "service",
LocalBindPort: 9292,
LocalBindAddress: "127.10.10.10",
})
expectCfg.PublicListener.LocalConnectTimeoutMs = 444
assert.Equal(t, expectCfg, cfg)
}
func TestAgentConfigWatcherSidecarProxy(t *testing.T) {
t.Parallel()
@ -186,7 +88,7 @@ func TestAgentConfigWatcherSidecarProxy(t *testing.T) {
client := a.Client()
agent := client.Agent()
// Register a local agent service with a managed proxy
// Register a local agent service with a sidecar proxy
reg := &api.AgentServiceRegistration{
Name: "web",
Port: 8080,

View File

@ -135,7 +135,7 @@ func TestService_ServerTLSConfig(t *testing.T) {
// NewTestAgent setup a CA already by default
// Register a local agent service with a managed proxy
// Register a local agent service
reg := &api.AgentServiceRegistration{
Name: "web",
Port: 8080,

View File

@ -18,14 +18,13 @@ import (
// recursing into any key matching the left hand side. In this case the left
// hand side must use periods to specify a full path e.g.
// `connect.proxy.config`. The path must be the canonical key names (i.e.
// CamelCase) AFTER translation so ExecMode not exec_mode. These are still match
// CamelCase) AFTER translation so NodeName not node_name. These are still match
// in a case-insensitive way.
//
// This is needed for example because parts of the Service Definition are
// "opaque" maps of metadata or config passed to another process or component.
// If we allow translation to recurse we might mangle the "opaque" keys given
// where the clash with key names in other parts of the definition (and they do
// in practice with deprecated managed proxy upstreams) :sob:
// where the clash with key names in other parts of the definition :sob:
//
// Example:
// m - TranslateKeys(m, map[string]string{

View File

@ -166,9 +166,6 @@ func defaultServerConfig() *TestServerConfig {
// const TestClusterID causes import cycle so hard code it here.
"cluster_id": "11111111-2222-3333-4444-555555555555",
},
"proxy": map[string]interface{}{
"allow_managed_api_registration": true,
},
},
}
}

View File

@ -217,105 +217,3 @@ $ curl \
- `ValidBefore` `(string)` - The time before which the certificate is valid.
Used with `ValidAfter` this can determine the validity period of the certificate.
## Managed Proxy Configuration ([Deprecated](/docs/connect/proxies/managed-deprecated.html))
This endpoint returns the configuration for a [managed
proxy](/docs/connect/proxies.html). This endpoint is only useful for _managed
proxies_ and not relevant for unmanaged proxies. This endpoint will be removed
in a future major release as part of [managed proxy
deprecation](/docs/connect/proxies/managed-deprecated.html). The equivalent API
for use will all future proxies is the more generic `
Managed proxy configuration is set in the service definition. When Consul
starts the managed proxy, it provides the service ID and ACL token. The proxy
is expected to call this endpoint to retrieve its configuration. It may use
a blocking query to detect any configuration changes.
| Method | Path | Produces |
| ------ | ---------------------------- | -------------------------- |
| `GET` | `/agent/connect/proxy/:id` | `application/json` |
The table below shows this endpoint's support for
[blocking queries](/api/features/blocking.html),
[consistency modes](/api/features/consistency.html),
[agent caching](/api/features/caching.html), and
[required ACLs](/api/index.html#authentication).
| Blocking Queries | Consistency Modes | Agent Caching | ACL Required |
| ---------------- | ----------------- | ------------- | ---------------------------- |
| `YES`<sup>1</sup>| `all` | `none` | `service:write, proxy token` |
<sup>1</sup> Supports [hash-based
blocking](/api/features/blocking.html#hash-based-blocking-queries) only.
### Parameters
- `ID` `(string: <required>)` - The ID (not the name) of the proxy service
in the local agent catalog. For managed proxies, this is provided in the
`CONSUL_PROXY_ID` environment variable by Consul.
### Sample Request
```text
$ curl \
http://127.0.0.1:8500/v1/agent/connect/proxy/web-proxy
```
### Sample Response
```json
{
"ProxyServiceID": "web-proxy",
"TargetServiceID": "web",
"TargetServiceName": "web",
"ContentHash": "cffa5f4635b134b9",
"ExecMode": "daemon",
"Command": [
"/usr/local/bin/consul",
"connect",
"proxy"
],
"Config": {
"bind_address": "127.0.0.1",
"bind_port": 20199,
"local_service_address": "127.0.0.1:8181"
},
"Upstreams": [
{
"DestinationType": "service",
"DestinationName": "db",
"LocalBindPort": 1234,
"Config": {
"connect_timeout_ms": 1000
}
},
{
"DestinationType": "prepared_query",
"DestinationName": "geo-cache",
"LocalBindPort": 1235
}
]
}
```
- `ProxyServiceID` `string` - The ID of the proxy service.
- `TargetServiceID` `(string)` - The ID of the target service the proxy represents.
- `TargetServiceName` `(string)` - The name of the target service the proxy represents.
- `ContentHash` `(string)` - The content hash of the response used for hash-based
blocking queries.
- `ExecMode` `(string)` - The execution mode of the managed proxy.
- `Command` `(array<string>)` - The command for the managed proxy.
- `Config` `(map<string|any>)` - The configuration for the managed proxy. This
is a map of primitive values (including arrays and maps) that is set by the
user.
- `Upstreams` `(array<Upstream>)` - The configured upstreams for the proxy. See
[Upstream Configuration Reference](/docs/connect/registration/service-registration.html#upstream-configuration-reference)
for more details on the format.

View File

@ -308,37 +308,6 @@ curl localhost:8500/v1/agent/health/service/name/web
"Meta": null,
"Port": 80,
"EnableTagOverride": false,
"ProxyDestination": "",
"Connect": {
"Native": false,
"Proxy": null
},
"CreateIndex": 0,
"ModifyIndex": 0
}
],
"passing": [
{
"ID": "web1",
"Service": "web",
"Tags": [
"rails"
],
"Address": "",
"TaggedAddresses": {
"lan": {
"address": "127.0.0.1",
"port": 8000
},
"wan": {
"address": "198.18.0.53",
"port": 80
}
},
"Meta": null,
"Port": 80,
"EnableTagOverride": false,
"ProxyDestination": "",
"Connect": {
"Native": false,
"Proxy": null
@ -390,7 +359,6 @@ curl localhost:8500/v1/agent/health/service/id/web2
"Meta": null,
"Port": 80,
"EnableTagOverride": false,
"ProxyDestination": "",
"Connect": {
"Native": false,
"Proxy": null
@ -438,7 +406,6 @@ curl localhost:8500/v1/agent/health/service/id/web1
"Meta": null,
"Port": 80,
"EnableTagOverride": false,
"ProxyDestination": "",
"Connect": {
"Native": false,
"Proxy": null
@ -523,12 +490,6 @@ service definition keys for compatibility with the config file format.
proxies representing another service or "mesh-gateway" for instances of
a [mesh gateway](/docs/connect/mesh_gateway.html)
- `ProxyDestination` `(string: "")` - **Deprecated** From 1.2.0 to 1.2.3 this
was used for "connect-proxy" `Kind` services however the equivalent field is
now in `Proxy.DestinationServiceName`. Registrations using this field will
continue to work until some later major version where this will be removed
entirely. It's strongly recommended to switch to using the new field.
- `Proxy` `(Proxy: nil)` - From 1.2.3 on, specifies the configuration for a
Connect proxy instance. This is only valid if `Kind == "connect-proxy"` or
`Kind == "mesh-gateway"`. See the [Proxy documentation](/docs/connect/registration/service-registration.html)

View File

@ -498,7 +498,6 @@ $ curl \
"ServiceTags": [
"tacos"
],
"ServiceProxyDestination": "",
"ServiceProxy": {
"DestinationServiceName": "",
"DestinationServiceID": "",
@ -555,10 +554,6 @@ $ curl \
- `ServiceKind` is the kind of service, usually "". See the Agent
registration API for more information.
- `ServiceProxyDestination` **Deprecated** this field duplicates
`ServiceProxy.DestinationServiceName` for backwards compatibility. It will be
removed in a future major version release.
- `ServiceProxy` is the proxy config as specified in
[Connect Proxies](/docs/connect/proxies.html).

View File

@ -272,7 +272,6 @@ curl -X GET localhost:8500/v1/catalog/service/api-internal
},
"ServicePort": 9090,
"ServiceEnableTagOverride": false,
"ServiceProxyDestination": "",
"ServiceProxy": {},
"ServiceConnect": {},
"CreateIndex": 30,
@ -305,7 +304,6 @@ curl -X GET localhost:8500/v1/catalog/service/api-internal
},
"ServicePort": 9090,
"ServiceEnableTagOverride": false,
"ServiceProxyDestination": "",
"ServiceProxy": {},
"ServiceConnect": {},
"CreateIndex": 29,
@ -335,7 +333,6 @@ curl -X GET localhost:8500/v1/catalog/service/api-internal
},
"ServicePort": 9090,
"ServiceEnableTagOverride": false,
"ServiceProxyDestination": "",
"ServiceProxy": {},
"ServiceConnect": {},
"CreateIndex": 28,
@ -381,7 +378,6 @@ curl -G localhost:8500/v1/catalog/service/api-internal --data-urlencode 'filter=
},
"ServicePort": 9090,
"ServiceEnableTagOverride": false,
"ServiceProxyDestination": "",
"ServiceProxy": {},
"ServiceConnect": {},
"CreateIndex": 29,

View File

@ -941,34 +941,6 @@ default will automatically work with some tooling.
CSR resources this way without artificially slowing down rotations.
Added in 1.4.1.
* <a name="connect_proxy"></a><a href="#connect_proxy">`proxy`</a>
[**Deprecated**](/docs/connect/proxies/managed-deprecated.html) This
object allows setting options for the Connect proxies. The following
sub-keys are available:
* <a name="connect_proxy_allow_managed_registration"></a><a
href="#connect_proxy_allow_managed_registration">`allow_managed_api_registration`</a>
[**Deprecated**](/docs/connect/proxies/managed-deprecated.html)
Allows managed proxies to be configured with services that are
registered via the Agent HTTP API. Enabling this would allow anyone
with permission to register a service to define a command to execute
for the proxy. By default, this is false to protect against
arbitrary process execution.
* <a name="connect_proxy_allow_managed_root"></a><a
href="#connect_proxy_allow_managed_root">`allow_managed_root`</a>
[**Deprecated**](/docs/connect/proxies/managed-deprecated.html)
Allows Consul to start managed proxies if Consul is running as root
(EUID of the process is zero). We recommend running Consul as a
non-root user. By default, this is false to protect inadvertently
running external processes as root.
* <a name="connect_proxy_defaults"></a><a
href="#connect_proxy_defaults">`proxy_defaults`</a>
[**Deprecated**](/docs/connect/proxies/managed-deprecated.html) This
object configures the default proxy settings for service definitions
with [managed proxies](/docs/connect/proxies/managed-deprecated.html)
(now deprecated). It accepts the fields `exec_mode`, `daemon_command`,
and `config`. These are used as default values for the respective
fields in the service definition.
* <a name="datacenter"></a><a href="#datacenter">`datacenter`</a> Equivalent to the
[`-datacenter` command-line flag](#_datacenter).
@ -1391,8 +1363,6 @@ default will automatically work with some tooling.
to disable. **Note**: this will disable WAN federation which is not recommended. Various catalog and WAN related
endpoints will return errors or empty results. TCP and UDP.
* <a name="server_rpc_port"></a><a href="#server_rpc_port">`server`</a> - Server RPC address. Default 8300. TCP only.
* <a name="proxy_min_port"></a><a href="#proxy_min_port">`proxy_min_port`</a> [**Deprecated**](/docs/connect/proxies/managed-deprecated.html) - Minimum port number to use for automatically assigned [managed proxies](/docs/connect/proxies/managed-deprecated.html). Default 20000.
* <a name="proxy_max_port"></a><a href="#proxy_max_port">`proxy_max_port`</a> [**Deprecated**](/docs/connect/proxies/managed-deprecated.html) - Maximum port number to use for automatically assigned [managed proxies](/docs/connect/proxies/managed-deprecated.html). Default 20255.
* <a name="sidecar_min_port"></a><a
href="#sidecar_min_port">`sidecar_min_port`</a> - Inclusive minimum port
number to use for automatically assigned [sidecar service

View File

@ -1011,8 +1011,7 @@ These metrics give insight into the health of the cluster as a whole.
## Connect Built-in Proxy Metrics
Consul Connect's built-in proxy is by default configured to log metrics to the
same sink as the agent that starts it when running as a [managed
proxy](/docs/connect/proxies.html#managed-proxies).
same sink as the agent that starts it.
When running in this mode it emits some basic metrics. These will be expanded
upon in the future.

View File

@ -20,8 +20,8 @@ To ensure that services only allow external connections established via
the Connect protocol, you should configure all services to only accept connections on a loopback address.
~> **Deprecation Note:** Managed Proxies are a deprecated method for deploying
sidecar proxies, as of Consul 1.3. See [managed proxy
deprecation](/docs/connect/proxies/managed-deprecated.html) for more
sidecar proxies, and have been removed in Consul 1.6. See [managed proxy
deprecation](/docs/connect/proxies/managed-deprecated.html) for more
information. If you are using managed proxies we strongly recommend that you
switch service definitions for registering proxies.

View File

@ -12,10 +12,7 @@ Consul comes with a built-in L4 proxy for testing and development with Consul
Connect.
Below is a complete example of all the configuration options available
for the built-in proxy. Note that only the `service.connect.proxy.config` and
`service.connect.proxy.upsteams[].config` maps are being described here, the
rest of the service definition is shown for context but is [described
elsewhere](/docs/connect/proxies.html#managed-proxies).
for the built-in proxy.
~> **Note:** Although you can configure the built-in proxy using configuration
entries, it doesn't have the L7 capability necessary for the observability

View File

@ -16,18 +16,17 @@ Connect proxies where the proxy process was started, configured, and stopped by
Consul. They were enabled via basic configurations within the service
definition.
-> **Consul 1.3.0 deprecates Managed Proxies completely.** It's _strongly_
recommended you do not build anything using Managed proxies and consider using
!> **Consul 1.6.0 removes Managed Proxies completely.**
This documentation is provided for prior versions only. You may consider using
[sidecar service
registrations](/docs/connect/proxies/sidecar-service.html) instead.
Even though this was a beta feature, managed proxies will continue to work at
least until Consul 1.6 to prevent disruption to demonstration and
proof-of-concept deployments of Consul Connect. Anyone using managed proxies
though should aim to change their workflow as soon as possible to avoid issues
with a later upgrade.
Managed proxies have been deprecated since Consul 1.3 and have been fully removed
in Consul 1.6. Anyone using Managed Proxies should aim to change their workflow
as soon as possible to avoid issues with a later upgrade.
After transitioning away from all managed proxy usage, the `proxy` subdirectory inside [`data_dir`](https://www.consul.io/docs/agent/options.html#_data_dir) (specified in Consul config) can be deleted to remove extraneous configuration files and free up disk space.
While the current functionality will remain present for a few major releases,
**new and known issues will not be fixed**.
## Deprecation Rationale

View File

@ -127,17 +127,6 @@ registering a proxy instance.
The following examples show all possible upstream configuration parameters.
Note that in versions 1.2.0 to 1.3.0, managed proxy upstreams were specified
inside the opaque `connect.proxy.config` map. The format is almost unchanged
however managed proxy upstreams are now defined a level up in the
`connect.proxy.upstreams`. The old location is deprecated and will be
automatically converted into the new for an interim period before support is
dropped in a future major release. The only difference in format between the
upstream definitions is that the field `destination_datacenter` has been renamed
to `datacenter` to reflect that it's the discovery target and not necessarily
the same as the instance that will be returned in the case of a prepared query
that fails over to another datacenter.
Note that `snake_case` is used here as it works in both [config file and API
registrations](/docs/agent/services.html#service-definition-parameter-case).