979e1c9c94
- A new endpoint `/v1/agent/service/:service_id` which is a generic way to look up the service for a single instance. The primary value here is that it: - **supports hash-based blocking** and so; - **replaces `/agent/connect/proxy/:proxy_id`** as the mechanism the built-in proxy uses to read its config. - It's not proxy specific and so works for any service. - It has a temporary shim to call through to the existing endpoint to preserve current managed proxy config defaulting behaviour until that is removed entirely (tested). - The built-in proxy now uses the new endpoint exclusively for it's config - The built-in proxy now has a `-sidecar-for` flag that allows the service ID of the _target_ service to be specified, on the condition that there is exactly one "sidecar" proxy (that is one that has `Proxy.DestinationServiceID` set) for the service registered. - Several fixes for edge cases for SidecarService - A fix for `Alias` checks - when running locally they didn't update their state until some external thing updated the target. If the target service has no checks registered as below, then the alias never made it past critical.
155 lines
4.1 KiB
Go
155 lines
4.1 KiB
Go
package proxy
|
|
|
|
import (
|
|
"crypto/x509"
|
|
"log"
|
|
|
|
"github.com/hashicorp/consul/api"
|
|
"github.com/hashicorp/consul/connect"
|
|
"github.com/hashicorp/consul/lib"
|
|
)
|
|
|
|
// Proxy implements the built-in connect proxy.
|
|
type Proxy struct {
|
|
client *api.Client
|
|
cfgWatcher ConfigWatcher
|
|
stopChan chan struct{}
|
|
logger *log.Logger
|
|
service *connect.Service
|
|
}
|
|
|
|
// New returns a proxy with the given configuration source.
|
|
//
|
|
// The ConfigWatcher can be used to update the configuration of the proxy.
|
|
// Whenever a new configuration is detected, the proxy will reconfigure itself.
|
|
func New(client *api.Client, cw ConfigWatcher, logger *log.Logger) (*Proxy, error) {
|
|
return &Proxy{
|
|
client: client,
|
|
cfgWatcher: cw,
|
|
stopChan: make(chan struct{}),
|
|
logger: logger,
|
|
}, nil
|
|
}
|
|
|
|
// Serve the proxy instance until a fatal error occurs or proxy is closed.
|
|
func (p *Proxy) Serve() error {
|
|
var cfg *Config
|
|
|
|
// failCh is used to stop Serve and return an error from another goroutine we
|
|
// spawn.
|
|
failCh := make(chan error, 1)
|
|
|
|
// Watch for config changes (initial setup happens on first "change")
|
|
for {
|
|
select {
|
|
case err := <-failCh:
|
|
// don't log here, we can log with better context at the point where we
|
|
// write the err to the chan
|
|
return err
|
|
|
|
case newCfg := <-p.cfgWatcher.Watch():
|
|
p.logger.Printf("[DEBUG] got new config")
|
|
|
|
if cfg == nil {
|
|
// Initial setup
|
|
|
|
// Setup telemetry if configured
|
|
_, err := lib.InitTelemetry(newCfg.Telemetry)
|
|
if err != nil {
|
|
p.logger.Printf("[ERR] proxy telemetry config error: %s", err)
|
|
}
|
|
|
|
// Setup Service instance now we know target ID etc
|
|
service, err := newCfg.Service(p.client, p.logger)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
p.service = service
|
|
|
|
go func() {
|
|
<-service.ReadyWait()
|
|
p.logger.Printf("[INFO] Proxy loaded config and ready to serve")
|
|
tcfg := service.ServerTLSConfig()
|
|
cert, _ := tcfg.GetCertificate(nil)
|
|
leaf, _ := x509.ParseCertificate(cert.Certificate[0])
|
|
p.logger.Printf("[INFO] TLS Identity: %s", leaf.URIs[0])
|
|
roots, err := connect.CommonNamesFromCertPool(tcfg.RootCAs)
|
|
if err != nil {
|
|
p.logger.Printf("[ERR] Failed to parse root subjects: %s", err)
|
|
} else {
|
|
p.logger.Printf("[INFO] TLS Roots : %v", roots)
|
|
}
|
|
|
|
// Only start a listener if we have a port set. This allows
|
|
// the configuration to disable our public listener.
|
|
if newCfg.PublicListener.BindPort != 0 {
|
|
newCfg.PublicListener.applyDefaults()
|
|
l := NewPublicListener(p.service, newCfg.PublicListener, p.logger)
|
|
err = p.startListener("public listener", l)
|
|
if err != nil {
|
|
// This should probably be fatal.
|
|
p.logger.Printf("[ERR] failed to start public listener: %s", err)
|
|
failCh <- err
|
|
}
|
|
|
|
}
|
|
}()
|
|
}
|
|
|
|
// TODO(banks) update/remove upstreams properly based on a diff with current. Can
|
|
// store a map of uc.String() to Listener here and then use it to only
|
|
// start one of each and stop/modify if changes occur.
|
|
for _, uc := range newCfg.Upstreams {
|
|
uc.applyDefaults()
|
|
|
|
if uc.LocalBindPort < 1 {
|
|
p.logger.Printf("[ERR] upstream %s has no local_bind_port. "+
|
|
"Can't start upstream.", uc.String())
|
|
continue
|
|
}
|
|
|
|
l := NewUpstreamListener(p.service, p.client, uc, p.logger)
|
|
err := p.startListener(uc.String(), l)
|
|
if err != nil {
|
|
p.logger.Printf("[ERR] failed to start upstream %s: %s", uc.String(),
|
|
err)
|
|
}
|
|
}
|
|
cfg = newCfg
|
|
|
|
case <-p.stopChan:
|
|
return nil
|
|
}
|
|
}
|
|
}
|
|
|
|
// startPublicListener is run from the internal state machine loop
|
|
func (p *Proxy) startListener(name string, l *Listener) error {
|
|
p.logger.Printf("[INFO] %s starting on %s", name, l.BindAddr())
|
|
go func() {
|
|
err := l.Serve()
|
|
if err != nil {
|
|
p.logger.Printf("[ERR] %s stopped with error: %s", name, err)
|
|
return
|
|
}
|
|
p.logger.Printf("[INFO] %s stopped", name)
|
|
}()
|
|
|
|
go func() {
|
|
<-p.stopChan
|
|
l.Close()
|
|
|
|
}()
|
|
|
|
return nil
|
|
}
|
|
|
|
// Close stops the proxy and terminates all active connections. It must be
|
|
// called only once.
|
|
func (p *Proxy) Close() {
|
|
close(p.stopChan)
|
|
if p.service != nil {
|
|
p.service.Close()
|
|
}
|
|
}
|