2020-12-24 19:11:13 +00:00
|
|
|
package proxycfg
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"fmt"
|
|
|
|
|
|
|
|
"github.com/hashicorp/consul/agent/cache"
|
|
|
|
cachetype "github.com/hashicorp/consul/agent/cache-types"
|
|
|
|
"github.com/hashicorp/consul/agent/structs"
|
|
|
|
)
|
|
|
|
|
|
|
|
type handlerIngressGateway struct {
|
|
|
|
handlerState
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *handlerIngressGateway) initialize(ctx context.Context) (ConfigSnapshot, error) {
|
|
|
|
snap := newConfigSnapshotFromServiceInstance(s.serviceInstance, s.stateConfig)
|
|
|
|
// Watch for root changes
|
|
|
|
err := s.cache.Notify(ctx, cachetype.ConnectCARootName, &structs.DCSpecificRequest{
|
|
|
|
Datacenter: s.source.Datacenter,
|
|
|
|
QueryOptions: structs.QueryOptions{Token: s.token},
|
|
|
|
Source: *s.source,
|
|
|
|
}, rootsWatchID, s.ch)
|
|
|
|
if err != nil {
|
|
|
|
return snap, err
|
|
|
|
}
|
|
|
|
|
2022-03-30 18:43:59 +00:00
|
|
|
// Get information about the entire service mesh.
|
|
|
|
err = s.cache.Notify(ctx, cachetype.ConfigEntryName, &structs.ConfigEntryQuery{
|
|
|
|
Kind: structs.MeshConfig,
|
|
|
|
Name: structs.MeshConfigMesh,
|
|
|
|
Datacenter: s.source.Datacenter,
|
|
|
|
QueryOptions: structs.QueryOptions{Token: s.token},
|
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInPartition(s.proxyID.PartitionOrDefault()),
|
|
|
|
}, meshConfigEntryID, s.ch)
|
|
|
|
if err != nil {
|
|
|
|
return snap, err
|
|
|
|
}
|
|
|
|
|
2020-12-24 19:11:13 +00:00
|
|
|
// Watch this ingress gateway's config entry
|
|
|
|
err = s.cache.Notify(ctx, cachetype.ConfigEntryName, &structs.ConfigEntryQuery{
|
|
|
|
Kind: structs.IngressGateway,
|
|
|
|
Name: s.service,
|
|
|
|
Datacenter: s.source.Datacenter,
|
|
|
|
QueryOptions: structs.QueryOptions{Token: s.token},
|
|
|
|
EnterpriseMeta: s.proxyID.EnterpriseMeta,
|
|
|
|
}, gatewayConfigWatchID, s.ch)
|
|
|
|
if err != nil {
|
|
|
|
return snap, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Watch the ingress-gateway's list of upstreams
|
|
|
|
err = s.cache.Notify(ctx, cachetype.GatewayServicesName, &structs.ServiceSpecificRequest{
|
|
|
|
Datacenter: s.source.Datacenter,
|
|
|
|
QueryOptions: structs.QueryOptions{Token: s.token},
|
|
|
|
ServiceName: s.service,
|
|
|
|
EnterpriseMeta: s.proxyID.EnterpriseMeta,
|
|
|
|
}, gatewayServicesWatchID, s.ch)
|
|
|
|
if err != nil {
|
|
|
|
return snap, err
|
|
|
|
}
|
|
|
|
|
2022-01-20 16:12:04 +00:00
|
|
|
snap.IngressGateway.WatchedDiscoveryChains = make(map[UpstreamID]context.CancelFunc)
|
|
|
|
snap.IngressGateway.DiscoveryChain = make(map[UpstreamID]*structs.CompiledDiscoveryChain)
|
|
|
|
snap.IngressGateway.WatchedUpstreams = make(map[UpstreamID]map[string]context.CancelFunc)
|
|
|
|
snap.IngressGateway.WatchedUpstreamEndpoints = make(map[UpstreamID]map[string]structs.CheckServiceNodes)
|
|
|
|
snap.IngressGateway.WatchedGateways = make(map[UpstreamID]map[string]context.CancelFunc)
|
|
|
|
snap.IngressGateway.WatchedGatewayEndpoints = make(map[UpstreamID]map[string]structs.CheckServiceNodes)
|
2021-07-13 12:53:59 +00:00
|
|
|
snap.IngressGateway.Listeners = make(map[IngressListenerKey]structs.IngressListener)
|
2020-12-24 19:11:13 +00:00
|
|
|
return snap, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *handlerIngressGateway) handleUpdate(ctx context.Context, u cache.UpdateEvent, snap *ConfigSnapshot) error {
|
|
|
|
if u.Err != nil {
|
|
|
|
return fmt.Errorf("error filling agent cache: %v", u.Err)
|
|
|
|
}
|
|
|
|
|
|
|
|
switch {
|
|
|
|
case u.CorrelationID == rootsWatchID:
|
|
|
|
roots, ok := u.Result.(*structs.IndexedCARoots)
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("invalid type for response: %T", u.Result)
|
|
|
|
}
|
|
|
|
snap.Roots = roots
|
|
|
|
case u.CorrelationID == gatewayConfigWatchID:
|
|
|
|
resp, ok := u.Result.(*structs.ConfigEntryResponse)
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("invalid type for response: %T", u.Result)
|
|
|
|
}
|
|
|
|
gatewayConf, ok := resp.Entry.(*structs.IngressGatewayConfigEntry)
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("invalid type for config entry: %T", resp.Entry)
|
|
|
|
}
|
|
|
|
|
2021-08-24 13:09:01 +00:00
|
|
|
snap.IngressGateway.GatewayConfigLoaded = true
|
|
|
|
snap.IngressGateway.TLSConfig = gatewayConf.TLS
|
2020-12-24 19:11:13 +00:00
|
|
|
|
2021-07-13 12:53:59 +00:00
|
|
|
// Load each listener's config from the config entry so we don't have to
|
|
|
|
// pass listener config through "upstreams" types as that grows.
|
|
|
|
for _, l := range gatewayConf.Listeners {
|
2021-08-24 13:09:01 +00:00
|
|
|
key := IngressListenerKeyFromListener(l)
|
2021-07-13 12:53:59 +00:00
|
|
|
snap.IngressGateway.Listeners[key] = l
|
|
|
|
}
|
|
|
|
|
2020-12-24 19:11:13 +00:00
|
|
|
if err := s.watchIngressLeafCert(ctx, snap); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
case u.CorrelationID == gatewayServicesWatchID:
|
|
|
|
services, ok := u.Result.(*structs.IndexedGatewayServices)
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("invalid type for response: %T", u.Result)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update our upstreams and watches.
|
|
|
|
var hosts []string
|
2022-01-20 16:12:04 +00:00
|
|
|
watchedSvcs := make(map[UpstreamID]struct{})
|
2020-12-24 19:11:13 +00:00
|
|
|
upstreamsMap := make(map[IngressListenerKey]structs.Upstreams)
|
|
|
|
for _, service := range services.Services {
|
|
|
|
u := makeUpstream(service)
|
|
|
|
|
2022-01-20 16:12:04 +00:00
|
|
|
uid := NewUpstreamID(&u)
|
|
|
|
|
2020-12-24 19:11:13 +00:00
|
|
|
watchOpts := discoveryChainWatchOpts{
|
2022-01-20 16:12:04 +00:00
|
|
|
id: uid,
|
2020-12-24 19:11:13 +00:00
|
|
|
name: u.DestinationName,
|
|
|
|
namespace: u.DestinationNamespace,
|
2021-08-20 16:57:45 +00:00
|
|
|
partition: u.DestinationPartition,
|
2020-12-24 19:11:13 +00:00
|
|
|
datacenter: s.source.Datacenter,
|
|
|
|
}
|
|
|
|
up := &handlerUpstreams{handlerState: s.handlerState}
|
|
|
|
err := up.watchDiscoveryChain(ctx, snap, watchOpts)
|
|
|
|
if err != nil {
|
2022-01-20 16:12:04 +00:00
|
|
|
return fmt.Errorf("failed to watch discovery chain for %s: %v", uid, err)
|
2020-12-24 19:11:13 +00:00
|
|
|
}
|
2022-01-20 16:12:04 +00:00
|
|
|
watchedSvcs[uid] = struct{}{}
|
2020-12-24 19:11:13 +00:00
|
|
|
|
|
|
|
hosts = append(hosts, service.Hosts...)
|
|
|
|
|
2021-08-24 13:09:01 +00:00
|
|
|
id := IngressListenerKeyFromGWService(*service)
|
2020-12-24 19:11:13 +00:00
|
|
|
upstreamsMap[id] = append(upstreamsMap[id], u)
|
|
|
|
}
|
|
|
|
|
|
|
|
snap.IngressGateway.Upstreams = upstreamsMap
|
2021-12-13 23:40:04 +00:00
|
|
|
snap.IngressGateway.UpstreamsSet = watchedSvcs
|
2020-12-24 19:11:13 +00:00
|
|
|
snap.IngressGateway.Hosts = hosts
|
|
|
|
snap.IngressGateway.HostsSet = true
|
|
|
|
|
2022-01-20 16:12:04 +00:00
|
|
|
for uid, cancelFn := range snap.IngressGateway.WatchedDiscoveryChains {
|
|
|
|
if _, ok := watchedSvcs[uid]; !ok {
|
2020-12-24 19:11:13 +00:00
|
|
|
cancelFn()
|
2022-01-20 16:12:04 +00:00
|
|
|
delete(snap.IngressGateway.WatchedDiscoveryChains, uid)
|
2020-12-24 19:11:13 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := s.watchIngressLeafCert(ctx, snap); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
default:
|
|
|
|
return (*handlerUpstreams)(s).handleUpdateUpstreams(ctx, u, snap)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Note: Ingress gateways are always bound to ports and never unix sockets.
|
|
|
|
// This means LocalBindPort is the only possibility
|
|
|
|
func makeUpstream(g *structs.GatewayService) structs.Upstream {
|
|
|
|
upstream := structs.Upstream{
|
|
|
|
DestinationName: g.Service.Name,
|
2021-09-13 11:38:12 +00:00
|
|
|
DestinationNamespace: g.Service.NamespaceOrDefault(),
|
2021-11-08 18:36:09 +00:00
|
|
|
DestinationPartition: g.Service.PartitionOrDefault(),
|
2020-12-24 19:11:13 +00:00
|
|
|
LocalBindPort: g.Port,
|
|
|
|
IngressHosts: g.Hosts,
|
|
|
|
// Pass the protocol that was configured on the ingress listener in order
|
|
|
|
// to force that protocol on the Envoy listener.
|
|
|
|
Config: map[string]interface{}{
|
|
|
|
"protocol": g.Protocol,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
return upstream
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *handlerIngressGateway) watchIngressLeafCert(ctx context.Context, snap *ConfigSnapshot) error {
|
2021-08-24 13:09:01 +00:00
|
|
|
// Note that we DON'T test for TLS.Enabled because we need a leaf cert for the
|
|
|
|
// gateway even without TLS to use as a client cert.
|
|
|
|
if !snap.IngressGateway.GatewayConfigLoaded || !snap.IngressGateway.HostsSet {
|
2020-12-24 19:11:13 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Watch the leaf cert
|
|
|
|
if snap.IngressGateway.LeafCertWatchCancel != nil {
|
|
|
|
snap.IngressGateway.LeafCertWatchCancel()
|
|
|
|
}
|
|
|
|
ctx, cancel := context.WithCancel(ctx)
|
|
|
|
err := s.cache.Notify(ctx, cachetype.ConnectCALeafName, &cachetype.ConnectCALeafRequest{
|
|
|
|
Datacenter: s.source.Datacenter,
|
|
|
|
Token: s.token,
|
|
|
|
Service: s.service,
|
|
|
|
DNSSAN: s.generateIngressDNSSANs(snap),
|
|
|
|
EnterpriseMeta: s.proxyID.EnterpriseMeta,
|
|
|
|
}, leafWatchID, s.ch)
|
|
|
|
if err != nil {
|
|
|
|
cancel()
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
snap.IngressGateway.LeafCertWatchCancel = cancel
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-09-28 11:25:48 +00:00
|
|
|
// connectTLSServingEnabled returns true if Connect TLS is enabled at either
|
|
|
|
// gateway level or for at least one of the specific listeners.
|
|
|
|
func connectTLSServingEnabled(snap *ConfigSnapshot) bool {
|
|
|
|
if snap.IngressGateway.TLSConfig.Enabled {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, l := range snap.IngressGateway.Listeners {
|
|
|
|
if l.TLS != nil && l.TLS.Enabled {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2020-12-24 19:11:13 +00:00
|
|
|
func (s *handlerIngressGateway) generateIngressDNSSANs(snap *ConfigSnapshot) []string {
|
2021-09-28 11:25:48 +00:00
|
|
|
// Update our leaf cert watch with wildcard entries for our DNS domains as
|
|
|
|
// well as any configured custom hostnames from the service. Note that in the
|
|
|
|
// case that only a subset of listeners are TLS-enabled, we still load DNS
|
|
|
|
// SANs for all upstreams. We could limit it to only those that are reachable
|
|
|
|
// from the enabled listeners but that adds a lot of complication and they are
|
|
|
|
// already wildcards anyway. It's simpler to have one certificate for the
|
|
|
|
// whole proxy that works for any possible upstream we might need than try to
|
|
|
|
// be more selective when we are already using wildcard DNS names!
|
|
|
|
if !connectTLSServingEnabled(snap) {
|
2020-12-24 19:11:13 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
var dnsNames []string
|
|
|
|
namespaces := make(map[string]struct{})
|
|
|
|
for _, upstreams := range snap.IngressGateway.Upstreams {
|
|
|
|
for _, u := range upstreams {
|
|
|
|
namespaces[u.DestinationNamespace] = struct{}{}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-11-08 18:36:09 +00:00
|
|
|
// TODO(partitions): How should these be updated for partitions?
|
2020-12-24 19:11:13 +00:00
|
|
|
for ns := range namespaces {
|
|
|
|
// The default namespace is special cased in DNS resolution, so special
|
|
|
|
// case it here.
|
|
|
|
if ns == structs.IntentionDefaultNamespace {
|
|
|
|
ns = ""
|
|
|
|
} else {
|
|
|
|
ns = ns + "."
|
|
|
|
}
|
|
|
|
|
|
|
|
dnsNames = append(dnsNames, fmt.Sprintf("*.ingress.%s%s", ns, s.dnsConfig.Domain))
|
|
|
|
dnsNames = append(dnsNames, fmt.Sprintf("*.ingress.%s%s.%s", ns, s.source.Datacenter, s.dnsConfig.Domain))
|
|
|
|
if s.dnsConfig.AltDomain != "" {
|
|
|
|
dnsNames = append(dnsNames, fmt.Sprintf("*.ingress.%s%s", ns, s.dnsConfig.AltDomain))
|
|
|
|
dnsNames = append(dnsNames, fmt.Sprintf("*.ingress.%s%s.%s", ns, s.source.Datacenter, s.dnsConfig.AltDomain))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
dnsNames = append(dnsNames, snap.IngressGateway.Hosts...)
|
|
|
|
|
|
|
|
return dnsNames
|
|
|
|
}
|