2018-10-03 18:18:55 +00:00
|
|
|
package xds
|
|
|
|
|
|
|
|
import (
|
|
|
|
"errors"
|
|
|
|
"fmt"
|
2019-09-26 02:55:52 +00:00
|
|
|
"net"
|
|
|
|
"net/url"
|
|
|
|
"regexp"
|
2021-02-05 22:28:07 +00:00
|
|
|
"sort"
|
2019-09-26 02:55:52 +00:00
|
|
|
"strconv"
|
2019-04-29 16:27:57 +00:00
|
|
|
"strings"
|
2021-01-25 19:50:00 +00:00
|
|
|
"time"
|
2018-10-03 18:18:55 +00:00
|
|
|
|
2021-07-01 00:48:29 +00:00
|
|
|
"github.com/hashicorp/consul/agent/connect/ca"
|
|
|
|
|
2021-02-26 22:23:15 +00:00
|
|
|
envoy_core_v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
|
|
|
|
envoy_listener_v3 "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3"
|
|
|
|
envoy_route_v3 "github.com/envoyproxy/go-control-plane/envoy/config/route/v3"
|
2021-04-29 20:22:03 +00:00
|
|
|
envoy_grpc_stats_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/grpc_stats/v3"
|
2021-02-26 22:23:15 +00:00
|
|
|
envoy_http_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3"
|
|
|
|
envoy_tcp_proxy_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/tcp_proxy/v3"
|
|
|
|
envoy_tls_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3"
|
|
|
|
envoy_type_v3 "github.com/envoyproxy/go-control-plane/envoy/type/v3"
|
2021-02-22 21:00:15 +00:00
|
|
|
|
2020-06-23 20:19:56 +00:00
|
|
|
"github.com/golang/protobuf/jsonpb"
|
|
|
|
"github.com/golang/protobuf/proto"
|
2021-02-22 21:00:15 +00:00
|
|
|
"github.com/golang/protobuf/ptypes"
|
2020-06-23 20:19:56 +00:00
|
|
|
"github.com/golang/protobuf/ptypes/any"
|
|
|
|
"github.com/golang/protobuf/ptypes/wrappers"
|
2021-02-22 21:00:15 +00:00
|
|
|
|
2019-08-19 18:03:03 +00:00
|
|
|
"github.com/hashicorp/consul/agent/connect"
|
2018-10-03 18:18:55 +00:00
|
|
|
"github.com/hashicorp/consul/agent/proxycfg"
|
|
|
|
"github.com/hashicorp/consul/agent/structs"
|
2021-04-29 20:22:03 +00:00
|
|
|
"github.com/hashicorp/consul/sdk/iptables"
|
2018-10-03 18:18:55 +00:00
|
|
|
)
|
|
|
|
|
2021-05-03 20:15:22 +00:00
|
|
|
const virtualIPTag = "virtual"
|
|
|
|
|
2019-06-24 19:05:36 +00:00
|
|
|
// listenersFromSnapshot returns the xDS API representation of the "listeners" in the snapshot.
|
2021-04-29 18:54:05 +00:00
|
|
|
func (s *ResourceGenerator) listenersFromSnapshot(cfgSnap *proxycfg.ConfigSnapshot) ([]proto.Message, error) {
|
2018-10-03 18:18:55 +00:00
|
|
|
if cfgSnap == nil {
|
|
|
|
return nil, errors.New("nil config given")
|
|
|
|
}
|
|
|
|
|
2019-06-24 19:05:36 +00:00
|
|
|
switch cfgSnap.Kind {
|
|
|
|
case structs.ServiceKindConnectProxy:
|
2021-04-29 18:54:05 +00:00
|
|
|
return s.listenersFromSnapshotConnectProxy(cfgSnap)
|
2020-04-13 16:33:01 +00:00
|
|
|
case structs.ServiceKindTerminatingGateway:
|
2021-04-29 18:54:05 +00:00
|
|
|
return s.listenersFromSnapshotGateway(cfgSnap)
|
2019-06-18 00:52:01 +00:00
|
|
|
case structs.ServiceKindMeshGateway:
|
2021-04-29 18:54:05 +00:00
|
|
|
return s.listenersFromSnapshotGateway(cfgSnap)
|
2020-04-16 21:00:48 +00:00
|
|
|
case structs.ServiceKindIngressGateway:
|
2021-04-29 18:54:05 +00:00
|
|
|
return s.listenersFromSnapshotGateway(cfgSnap)
|
2019-06-24 19:05:36 +00:00
|
|
|
default:
|
|
|
|
return nil, fmt.Errorf("Invalid service kind: %v", cfgSnap.Kind)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-18 00:52:01 +00:00
|
|
|
// listenersFromSnapshotConnectProxy returns the "listeners" for a connect proxy service
|
2021-04-29 18:54:05 +00:00
|
|
|
func (s *ResourceGenerator) listenersFromSnapshotConnectProxy(cfgSnap *proxycfg.ConfigSnapshot) ([]proto.Message, error) {
|
2021-03-17 19:40:49 +00:00
|
|
|
resources := make([]proto.Message, 1)
|
2018-10-03 18:18:55 +00:00
|
|
|
var err error
|
2021-03-17 01:22:26 +00:00
|
|
|
|
|
|
|
// Configure inbound listener.
|
2021-04-29 18:54:05 +00:00
|
|
|
resources[0], err = s.makeInboundListener(cfgSnap, PublicListenerName)
|
2018-10-03 18:18:55 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-07-02 03:10:51 +00:00
|
|
|
|
2021-04-12 15:35:14 +00:00
|
|
|
// This outboundListener is exclusively used when transparent proxy mode is active.
|
2021-03-17 19:40:49 +00:00
|
|
|
// In that situation there is a single listener where we are redirecting outbound traffic,
|
|
|
|
// and each upstream gets a filter chain attached to that listener.
|
|
|
|
var outboundListener *envoy_listener_v3.Listener
|
|
|
|
|
2021-04-12 15:35:14 +00:00
|
|
|
if cfgSnap.Proxy.Mode == structs.ProxyModeTransparent {
|
|
|
|
port := iptables.DefaultTProxyOutboundPort
|
|
|
|
if cfgSnap.Proxy.TransparentProxy.OutboundListenerPort != 0 {
|
|
|
|
port = cfgSnap.Proxy.TransparentProxy.OutboundListenerPort
|
|
|
|
}
|
|
|
|
|
2021-03-26 20:00:44 +00:00
|
|
|
outboundListener = makePortListener(OutboundListenerName, "127.0.0.1", port, envoy_core_v3.TrafficDirection_OUTBOUND)
|
2021-03-17 19:40:49 +00:00
|
|
|
outboundListener.FilterChains = make([]*envoy_listener_v3.FilterChain, 0)
|
2021-06-08 19:18:41 +00:00
|
|
|
outboundListener.ListenerFilters = []*envoy_listener_v3.ListenerFilter{
|
|
|
|
{
|
|
|
|
// The original_dst filter is a listener filter that recovers the original destination
|
|
|
|
// address before the iptables redirection. This filter is needed for transparent
|
|
|
|
// proxies because they route to upstreams using filter chains that match on the
|
|
|
|
// destination IP address. If the filter is not present, no chain will match.
|
|
|
|
//
|
|
|
|
// TODO(tproxy): Hard-coded until we upgrade the go-control-plane library
|
|
|
|
Name: "envoy.filters.listener.original_dst",
|
|
|
|
},
|
|
|
|
}
|
2021-03-17 19:40:49 +00:00
|
|
|
}
|
|
|
|
|
2022-01-20 16:12:04 +00:00
|
|
|
for uid, chain := range cfgSnap.ConnectProxy.DiscoveryChain {
|
|
|
|
upstreamCfg := cfgSnap.ConnectProxy.UpstreamConfig[uid]
|
2021-12-13 22:30:49 +00:00
|
|
|
|
|
|
|
explicit := upstreamCfg.HasLocalPortOrSocket()
|
2022-01-20 16:12:04 +00:00
|
|
|
if _, implicit := cfgSnap.ConnectProxy.IntentionUpstreams[uid]; !implicit && !explicit {
|
2021-12-13 22:30:49 +00:00
|
|
|
// Discovery chain is not associated with a known explicit or implicit upstream so it is skipped.
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2022-01-20 16:12:04 +00:00
|
|
|
cfg := s.getAndModifyUpstreamConfigForListener(uid, upstreamCfg, chain)
|
2021-03-17 19:40:49 +00:00
|
|
|
|
|
|
|
// If escape hatch is present, create a listener from it and move on to the next
|
2021-03-18 03:13:40 +00:00
|
|
|
if cfg.EnvoyListenerJSON != "" {
|
|
|
|
upstreamListener, err := makeListenerFromUserConfig(cfg.EnvoyListenerJSON)
|
2021-03-17 19:40:49 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
resources = append(resources, upstreamListener)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
Update filter chain creation for sidecar/ingress listeners (#11245)
The duo of `makeUpstreamFilterChainForDiscoveryChain` and `makeListenerForDiscoveryChain` were really hard to reason about, and led to concealing a bug in their branching logic. There were several issues here:
- They tried to accomplish too much: determining filter name, cluster name, and whether RDS should be used.
- They embedded logic to handle significantly different kinds of upstream listeners (passthrough, prepared query, typical services, and catch-all)
- They needed to coalesce different data sources (Upstream and CompiledDiscoveryChain)
Rather than handling all of those tasks inside of these functions, this PR pulls out the RDS/clusterName/filterName logic.
This refactor also fixed a bug with the handling of [UpstreamDefaults](https://www.consul.io/docs/connect/config-entries/service-defaults#defaults). These defaults get stored as UpstreamConfig in the proxy snapshot with a DestinationName of "*", since they apply to all upstreams. However, this wildcard destination name must not be used when creating the name of the associated upstream cluster. The coalescing logic in the original functions here was in some situations creating clusters with a `*.` prefix, which is not a valid destination.
2021-11-09 21:43:51 +00:00
|
|
|
// RDS, Envoy's Route Discovery Service, is only used for HTTP services with a customized discovery chain.
|
|
|
|
useRDS := chain.Protocol != "tcp" && !chain.IsDefault()
|
|
|
|
|
|
|
|
var clusterName string
|
|
|
|
if !useRDS {
|
|
|
|
// When not using RDS we must generate a cluster name to attach to the filter chain.
|
|
|
|
// With RDS, cluster names get attached to the dynamic routes instead.
|
|
|
|
target, err := simpleChainTarget(chain)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
clusterName = CustomizeClusterName(target.Name, chain)
|
|
|
|
}
|
|
|
|
|
|
|
|
filterName := fmt.Sprintf("%s.%s.%s.%s", chain.ServiceName, chain.Namespace, chain.Partition, chain.Datacenter)
|
|
|
|
|
2021-03-26 20:00:44 +00:00
|
|
|
// Generate the upstream listeners for when they are explicitly set with a local bind port or socket path
|
Update filter chain creation for sidecar/ingress listeners (#11245)
The duo of `makeUpstreamFilterChainForDiscoveryChain` and `makeListenerForDiscoveryChain` were really hard to reason about, and led to concealing a bug in their branching logic. There were several issues here:
- They tried to accomplish too much: determining filter name, cluster name, and whether RDS should be used.
- They embedded logic to handle significantly different kinds of upstream listeners (passthrough, prepared query, typical services, and catch-all)
- They needed to coalesce different data sources (Upstream and CompiledDiscoveryChain)
Rather than handling all of those tasks inside of these functions, this PR pulls out the RDS/clusterName/filterName logic.
This refactor also fixed a bug with the handling of [UpstreamDefaults](https://www.consul.io/docs/connect/config-entries/service-defaults#defaults). These defaults get stored as UpstreamConfig in the proxy snapshot with a DestinationName of "*", since they apply to all upstreams. However, this wildcard destination name must not be used when creating the name of the associated upstream cluster. The coalescing logic in the original functions here was in some situations creating clusters with a `*.` prefix, which is not a valid destination.
2021-11-09 21:43:51 +00:00
|
|
|
if upstreamCfg != nil && upstreamCfg.HasLocalPortOrSocket() {
|
|
|
|
filterChain, err := s.makeUpstreamFilterChain(filterChainOpts{
|
2022-01-20 16:12:04 +00:00
|
|
|
routeName: uid.EnvoyID(),
|
Update filter chain creation for sidecar/ingress listeners (#11245)
The duo of `makeUpstreamFilterChainForDiscoveryChain` and `makeListenerForDiscoveryChain` were really hard to reason about, and led to concealing a bug in their branching logic. There were several issues here:
- They tried to accomplish too much: determining filter name, cluster name, and whether RDS should be used.
- They embedded logic to handle significantly different kinds of upstream listeners (passthrough, prepared query, typical services, and catch-all)
- They needed to coalesce different data sources (Upstream and CompiledDiscoveryChain)
Rather than handling all of those tasks inside of these functions, this PR pulls out the RDS/clusterName/filterName logic.
This refactor also fixed a bug with the handling of [UpstreamDefaults](https://www.consul.io/docs/connect/config-entries/service-defaults#defaults). These defaults get stored as UpstreamConfig in the proxy snapshot with a DestinationName of "*", since they apply to all upstreams. However, this wildcard destination name must not be used when creating the name of the associated upstream cluster. The coalescing logic in the original functions here was in some situations creating clusters with a `*.` prefix, which is not a valid destination.
2021-11-09 21:43:51 +00:00
|
|
|
clusterName: clusterName,
|
|
|
|
filterName: filterName,
|
|
|
|
protocol: cfg.Protocol,
|
|
|
|
useRDS: useRDS,
|
|
|
|
})
|
2021-03-17 19:40:49 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2022-01-20 16:12:04 +00:00
|
|
|
upstreamListener := makeListener(uid.EnvoyID(), upstreamCfg, envoy_core_v3.TrafficDirection_OUTBOUND)
|
2021-03-17 19:40:49 +00:00
|
|
|
upstreamListener.FilterChains = []*envoy_listener_v3.FilterChain{
|
|
|
|
filterChain,
|
|
|
|
}
|
|
|
|
resources = append(resources, upstreamListener)
|
|
|
|
|
|
|
|
// Avoid creating filter chains below for upstreams that have dedicated listeners
|
|
|
|
continue
|
2019-07-02 03:10:51 +00:00
|
|
|
}
|
|
|
|
|
2021-05-04 14:45:19 +00:00
|
|
|
// The rest of this loop is used exclusively for transparent proxies.
|
|
|
|
// Below we create a filter chain per upstream, rather than a listener per upstream
|
|
|
|
// as we do for explicit upstreams above.
|
Update filter chain creation for sidecar/ingress listeners (#11245)
The duo of `makeUpstreamFilterChainForDiscoveryChain` and `makeListenerForDiscoveryChain` were really hard to reason about, and led to concealing a bug in their branching logic. There were several issues here:
- They tried to accomplish too much: determining filter name, cluster name, and whether RDS should be used.
- They embedded logic to handle significantly different kinds of upstream listeners (passthrough, prepared query, typical services, and catch-all)
- They needed to coalesce different data sources (Upstream and CompiledDiscoveryChain)
Rather than handling all of those tasks inside of these functions, this PR pulls out the RDS/clusterName/filterName logic.
This refactor also fixed a bug with the handling of [UpstreamDefaults](https://www.consul.io/docs/connect/config-entries/service-defaults#defaults). These defaults get stored as UpstreamConfig in the proxy snapshot with a DestinationName of "*", since they apply to all upstreams. However, this wildcard destination name must not be used when creating the name of the associated upstream cluster. The coalescing logic in the original functions here was in some situations creating clusters with a `*.` prefix, which is not a valid destination.
2021-11-09 21:43:51 +00:00
|
|
|
|
|
|
|
filterChain, err := s.makeUpstreamFilterChain(filterChainOpts{
|
2022-01-20 16:12:04 +00:00
|
|
|
routeName: uid.EnvoyID(),
|
Update filter chain creation for sidecar/ingress listeners (#11245)
The duo of `makeUpstreamFilterChainForDiscoveryChain` and `makeListenerForDiscoveryChain` were really hard to reason about, and led to concealing a bug in their branching logic. There were several issues here:
- They tried to accomplish too much: determining filter name, cluster name, and whether RDS should be used.
- They embedded logic to handle significantly different kinds of upstream listeners (passthrough, prepared query, typical services, and catch-all)
- They needed to coalesce different data sources (Upstream and CompiledDiscoveryChain)
Rather than handling all of those tasks inside of these functions, this PR pulls out the RDS/clusterName/filterName logic.
This refactor also fixed a bug with the handling of [UpstreamDefaults](https://www.consul.io/docs/connect/config-entries/service-defaults#defaults). These defaults get stored as UpstreamConfig in the proxy snapshot with a DestinationName of "*", since they apply to all upstreams. However, this wildcard destination name must not be used when creating the name of the associated upstream cluster. The coalescing logic in the original functions here was in some situations creating clusters with a `*.` prefix, which is not a valid destination.
2021-11-09 21:43:51 +00:00
|
|
|
clusterName: clusterName,
|
|
|
|
filterName: filterName,
|
|
|
|
protocol: cfg.Protocol,
|
|
|
|
useRDS: useRDS,
|
|
|
|
})
|
2018-10-03 18:18:55 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2021-03-17 19:40:49 +00:00
|
|
|
|
2022-01-20 16:12:04 +00:00
|
|
|
endpoints := cfgSnap.ConnectProxy.WatchedUpstreamEndpoints[uid][chain.ID()]
|
2021-03-17 19:40:49 +00:00
|
|
|
uniqueAddrs := make(map[string]struct{})
|
|
|
|
|
2021-05-04 14:45:19 +00:00
|
|
|
// Match on the virtual IP for the upstream service (identified by the chain's ID).
|
|
|
|
// We do not match on all endpoints here since it would lead to load balancing across
|
|
|
|
// all instances when any instance address is dialed.
|
|
|
|
for _, e := range endpoints {
|
2022-01-12 20:08:49 +00:00
|
|
|
if e.Service.Kind == structs.ServiceKind(structs.TerminatingGateway) {
|
|
|
|
key := structs.ServiceGatewayVirtualIPTag(chain.CompoundServiceName())
|
|
|
|
|
|
|
|
if vip := e.Service.TaggedAddresses[key]; vip.Address != "" {
|
|
|
|
uniqueAddrs[vip.Address] = struct{}{}
|
|
|
|
}
|
|
|
|
|
|
|
|
continue
|
|
|
|
}
|
2021-12-01 06:03:08 +00:00
|
|
|
if vip := e.Service.TaggedAddresses[structs.TaggedAddressVirtualIP]; vip.Address != "" {
|
2021-05-04 14:45:19 +00:00
|
|
|
uniqueAddrs[vip.Address] = struct{}{}
|
2021-03-17 19:40:49 +00:00
|
|
|
}
|
2021-12-01 06:03:08 +00:00
|
|
|
|
|
|
|
// The virtualIPTag is used by consul-k8s to store the ClusterIP for a service.
|
|
|
|
// We only match on this virtual IP if the upstream is in the proxy's partition.
|
|
|
|
// This is because the IP is not guaranteed to be unique across k8s clusters.
|
|
|
|
if structs.EqualPartitions(e.Node.PartitionOrDefault(), cfgSnap.ProxyID.PartitionOrDefault()) {
|
|
|
|
if vip := e.Service.TaggedAddresses[virtualIPTag]; vip.Address != "" {
|
|
|
|
uniqueAddrs[vip.Address] = struct{}{}
|
|
|
|
}
|
|
|
|
}
|
2021-03-17 19:40:49 +00:00
|
|
|
}
|
2021-12-01 06:03:08 +00:00
|
|
|
if len(uniqueAddrs) > 2 {
|
|
|
|
s.Logger.Debug("detected multiple virtual IPs for an upstream, all will be used to match traffic",
|
2022-01-20 16:12:04 +00:00
|
|
|
"upstream", uid, "ip_count", len(uniqueAddrs))
|
2021-05-04 14:45:19 +00:00
|
|
|
}
|
2021-03-17 19:40:49 +00:00
|
|
|
|
|
|
|
// For every potential address we collected, create the appropriate address prefix to match on.
|
|
|
|
// In this case we are matching on exact addresses, so the prefix is the address itself,
|
|
|
|
// and the prefix length is based on whether it's IPv4 or IPv6.
|
2021-06-09 20:34:17 +00:00
|
|
|
filterChain.FilterChainMatch = makeFilterChainMatchFromAddrs(uniqueAddrs)
|
2021-03-17 19:40:49 +00:00
|
|
|
|
2021-06-09 20:34:17 +00:00
|
|
|
// Only attach the filter chain if there are addresses to match on
|
|
|
|
if filterChain.FilterChainMatch != nil && len(filterChain.FilterChainMatch.PrefixRanges) > 0 {
|
|
|
|
outboundListener.FilterChains = append(outboundListener.FilterChains, filterChain)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if outboundListener != nil {
|
|
|
|
// Add a passthrough for every mesh endpoint that can be dialed directly,
|
|
|
|
// as opposed to via a virtual IP.
|
|
|
|
var passthroughChains []*envoy_listener_v3.FilterChain
|
2021-03-17 19:40:49 +00:00
|
|
|
|
2022-01-20 16:12:04 +00:00
|
|
|
for uid, passthrough := range cfgSnap.ConnectProxy.PassthroughUpstreams {
|
2021-06-09 20:34:17 +00:00
|
|
|
u := structs.Upstream{
|
2022-01-20 16:12:04 +00:00
|
|
|
DestinationName: uid.Name,
|
|
|
|
DestinationNamespace: uid.NamespaceOrDefault(),
|
|
|
|
DestinationPartition: uid.PartitionOrDefault(),
|
2021-03-17 19:40:49 +00:00
|
|
|
}
|
2021-04-08 17:27:57 +00:00
|
|
|
|
Update filter chain creation for sidecar/ingress listeners (#11245)
The duo of `makeUpstreamFilterChainForDiscoveryChain` and `makeListenerForDiscoveryChain` were really hard to reason about, and led to concealing a bug in their branching logic. There were several issues here:
- They tried to accomplish too much: determining filter name, cluster name, and whether RDS should be used.
- They embedded logic to handle significantly different kinds of upstream listeners (passthrough, prepared query, typical services, and catch-all)
- They needed to coalesce different data sources (Upstream and CompiledDiscoveryChain)
Rather than handling all of those tasks inside of these functions, this PR pulls out the RDS/clusterName/filterName logic.
This refactor also fixed a bug with the handling of [UpstreamDefaults](https://www.consul.io/docs/connect/config-entries/service-defaults#defaults). These defaults get stored as UpstreamConfig in the proxy snapshot with a DestinationName of "*", since they apply to all upstreams. However, this wildcard destination name must not be used when creating the name of the associated upstream cluster. The coalescing logic in the original functions here was in some situations creating clusters with a `*.` prefix, which is not a valid destination.
2021-11-09 21:43:51 +00:00
|
|
|
filterName := fmt.Sprintf("%s.%s.%s.%s", u.DestinationName, u.DestinationNamespace, u.DestinationPartition, cfgSnap.Datacenter)
|
2021-04-08 17:27:57 +00:00
|
|
|
|
Update filter chain creation for sidecar/ingress listeners (#11245)
The duo of `makeUpstreamFilterChainForDiscoveryChain` and `makeListenerForDiscoveryChain` were really hard to reason about, and led to concealing a bug in their branching logic. There were several issues here:
- They tried to accomplish too much: determining filter name, cluster name, and whether RDS should be used.
- They embedded logic to handle significantly different kinds of upstream listeners (passthrough, prepared query, typical services, and catch-all)
- They needed to coalesce different data sources (Upstream and CompiledDiscoveryChain)
Rather than handling all of those tasks inside of these functions, this PR pulls out the RDS/clusterName/filterName logic.
This refactor also fixed a bug with the handling of [UpstreamDefaults](https://www.consul.io/docs/connect/config-entries/service-defaults#defaults). These defaults get stored as UpstreamConfig in the proxy snapshot with a DestinationName of "*", since they apply to all upstreams. However, this wildcard destination name must not be used when creating the name of the associated upstream cluster. The coalescing logic in the original functions here was in some situations creating clusters with a `*.` prefix, which is not a valid destination.
2021-11-09 21:43:51 +00:00
|
|
|
filterChain, err := s.makeUpstreamFilterChain(filterChainOpts{
|
|
|
|
clusterName: "passthrough~" + passthrough.SNI,
|
|
|
|
filterName: filterName,
|
|
|
|
protocol: "tcp",
|
|
|
|
})
|
2021-06-09 20:34:17 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
filterChain.FilterChainMatch = makeFilterChainMatchFromAddrs(passthrough.Addrs)
|
2021-03-17 19:40:49 +00:00
|
|
|
|
2021-06-09 20:34:17 +00:00
|
|
|
passthroughChains = append(passthroughChains, filterChain)
|
2021-03-17 19:40:49 +00:00
|
|
|
}
|
|
|
|
|
2021-06-09 20:34:17 +00:00
|
|
|
outboundListener.FilterChains = append(outboundListener.FilterChains, passthroughChains...)
|
|
|
|
|
2021-03-17 19:40:49 +00:00
|
|
|
// Filter chains are stable sorted to avoid draining if the list is provided out of order
|
|
|
|
sort.SliceStable(outboundListener.FilterChains, func(i, j int) bool {
|
2021-06-09 20:34:17 +00:00
|
|
|
return outboundListener.FilterChains[i].FilterChainMatch.PrefixRanges[0].AddressPrefix <
|
|
|
|
outboundListener.FilterChains[j].FilterChainMatch.PrefixRanges[0].AddressPrefix
|
2021-03-17 19:40:49 +00:00
|
|
|
})
|
|
|
|
|
2021-06-14 20:15:09 +00:00
|
|
|
// Add a catch-all filter chain that acts as a TCP proxy to destinations outside the mesh
|
2021-04-28 22:13:29 +00:00
|
|
|
if cfgSnap.ConnectProxy.MeshConfig == nil ||
|
2021-06-14 20:15:09 +00:00
|
|
|
!cfgSnap.ConnectProxy.MeshConfig.TransparentProxy.MeshDestinationsOnly {
|
2021-04-06 18:19:59 +00:00
|
|
|
|
Update filter chain creation for sidecar/ingress listeners (#11245)
The duo of `makeUpstreamFilterChainForDiscoveryChain` and `makeListenerForDiscoveryChain` were really hard to reason about, and led to concealing a bug in their branching logic. There were several issues here:
- They tried to accomplish too much: determining filter name, cluster name, and whether RDS should be used.
- They embedded logic to handle significantly different kinds of upstream listeners (passthrough, prepared query, typical services, and catch-all)
- They needed to coalesce different data sources (Upstream and CompiledDiscoveryChain)
Rather than handling all of those tasks inside of these functions, this PR pulls out the RDS/clusterName/filterName logic.
This refactor also fixed a bug with the handling of [UpstreamDefaults](https://www.consul.io/docs/connect/config-entries/service-defaults#defaults). These defaults get stored as UpstreamConfig in the proxy snapshot with a DestinationName of "*", since they apply to all upstreams. However, this wildcard destination name must not be used when creating the name of the associated upstream cluster. The coalescing logic in the original functions here was in some situations creating clusters with a `*.` prefix, which is not a valid destination.
2021-11-09 21:43:51 +00:00
|
|
|
filterChain, err := s.makeUpstreamFilterChain(filterChainOpts{
|
|
|
|
clusterName: OriginalDestinationClusterName,
|
|
|
|
filterName: OriginalDestinationClusterName,
|
|
|
|
protocol: "tcp",
|
|
|
|
})
|
2021-04-06 18:19:59 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
outboundListener.FilterChains = append(outboundListener.FilterChains, filterChain)
|
2021-03-17 19:40:49 +00:00
|
|
|
}
|
|
|
|
|
2021-06-09 20:34:17 +00:00
|
|
|
// Only add the outbound listener if configured.
|
|
|
|
if len(outboundListener.FilterChains) > 0 {
|
|
|
|
resources = append(resources, outboundListener)
|
|
|
|
}
|
2021-03-17 19:40:49 +00:00
|
|
|
}
|
|
|
|
|
2021-03-17 21:32:52 +00:00
|
|
|
// Looping over explicit upstreams is only needed for prepared queries because they do not have discovery chains
|
2022-01-20 16:12:04 +00:00
|
|
|
for uid, u := range cfgSnap.ConnectProxy.UpstreamConfig {
|
2021-03-17 21:32:52 +00:00
|
|
|
if u.DestinationType != structs.UpstreamDestTypePreparedQuery {
|
2021-03-17 19:40:49 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
cfg, err := structs.ParseUpstreamConfig(u.Config)
|
|
|
|
if err != nil {
|
|
|
|
// Don't hard fail on a config typo, just warn. The parse func returns
|
|
|
|
// default config if there is an error so it's safe to continue.
|
2022-01-20 16:12:04 +00:00
|
|
|
s.Logger.Warn("failed to parse", "upstream", uid, "error", err)
|
2021-03-17 19:40:49 +00:00
|
|
|
}
|
2021-09-22 19:27:10 +00:00
|
|
|
|
|
|
|
// If escape hatch is present, create a listener from it and move on to the next
|
|
|
|
if cfg.EnvoyListenerJSON != "" {
|
|
|
|
upstreamListener, err := makeListenerFromUserConfig(cfg.EnvoyListenerJSON)
|
|
|
|
if err != nil {
|
|
|
|
s.Logger.Error("failed to parse envoy_listener_json",
|
2022-01-20 16:12:04 +00:00
|
|
|
"upstream", uid,
|
2021-09-22 19:27:10 +00:00
|
|
|
"error", err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
resources = append(resources, upstreamListener)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2022-01-20 16:12:04 +00:00
|
|
|
upstreamListener := makeListener(uid.EnvoyID(), u, envoy_core_v3.TrafficDirection_OUTBOUND)
|
2021-03-17 19:40:49 +00:00
|
|
|
|
Update filter chain creation for sidecar/ingress listeners (#11245)
The duo of `makeUpstreamFilterChainForDiscoveryChain` and `makeListenerForDiscoveryChain` were really hard to reason about, and led to concealing a bug in their branching logic. There were several issues here:
- They tried to accomplish too much: determining filter name, cluster name, and whether RDS should be used.
- They embedded logic to handle significantly different kinds of upstream listeners (passthrough, prepared query, typical services, and catch-all)
- They needed to coalesce different data sources (Upstream and CompiledDiscoveryChain)
Rather than handling all of those tasks inside of these functions, this PR pulls out the RDS/clusterName/filterName logic.
This refactor also fixed a bug with the handling of [UpstreamDefaults](https://www.consul.io/docs/connect/config-entries/service-defaults#defaults). These defaults get stored as UpstreamConfig in the proxy snapshot with a DestinationName of "*", since they apply to all upstreams. However, this wildcard destination name must not be used when creating the name of the associated upstream cluster. The coalescing logic in the original functions here was in some situations creating clusters with a `*.` prefix, which is not a valid destination.
2021-11-09 21:43:51 +00:00
|
|
|
filterChain, err := s.makeUpstreamFilterChain(filterChainOpts{
|
|
|
|
// TODO (SNI partition) add partition for upstream SNI
|
|
|
|
clusterName: connect.UpstreamSNI(u, "", cfgSnap.Datacenter, cfgSnap.Roots.TrustDomain),
|
2022-01-20 16:12:04 +00:00
|
|
|
filterName: uid.EnvoyID(),
|
|
|
|
routeName: uid.EnvoyID(),
|
Update filter chain creation for sidecar/ingress listeners (#11245)
The duo of `makeUpstreamFilterChainForDiscoveryChain` and `makeListenerForDiscoveryChain` were really hard to reason about, and led to concealing a bug in their branching logic. There were several issues here:
- They tried to accomplish too much: determining filter name, cluster name, and whether RDS should be used.
- They embedded logic to handle significantly different kinds of upstream listeners (passthrough, prepared query, typical services, and catch-all)
- They needed to coalesce different data sources (Upstream and CompiledDiscoveryChain)
Rather than handling all of those tasks inside of these functions, this PR pulls out the RDS/clusterName/filterName logic.
This refactor also fixed a bug with the handling of [UpstreamDefaults](https://www.consul.io/docs/connect/config-entries/service-defaults#defaults). These defaults get stored as UpstreamConfig in the proxy snapshot with a DestinationName of "*", since they apply to all upstreams. However, this wildcard destination name must not be used when creating the name of the associated upstream cluster. The coalescing logic in the original functions here was in some situations creating clusters with a `*.` prefix, which is not a valid destination.
2021-11-09 21:43:51 +00:00
|
|
|
protocol: cfg.Protocol,
|
|
|
|
})
|
2021-03-17 19:40:49 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
upstreamListener.FilterChains = []*envoy_listener_v3.FilterChain{
|
|
|
|
filterChain,
|
|
|
|
}
|
|
|
|
resources = append(resources, upstreamListener)
|
2018-10-03 18:18:55 +00:00
|
|
|
}
|
2019-09-26 02:55:52 +00:00
|
|
|
|
2020-01-28 23:50:41 +00:00
|
|
|
cfgSnap.Proxy.Expose.Finalize()
|
2019-09-26 02:55:52 +00:00
|
|
|
paths := cfgSnap.Proxy.Expose.Paths
|
|
|
|
|
|
|
|
// Add service health checks to the list of paths to create listeners for if needed
|
|
|
|
if cfgSnap.Proxy.Expose.Checks {
|
2020-01-24 15:04:58 +00:00
|
|
|
psid := structs.NewServiceID(cfgSnap.Proxy.DestinationServiceID, &cfgSnap.ProxyID.EnterpriseMeta)
|
2019-12-10 02:26:41 +00:00
|
|
|
for _, check := range s.CheckFetcher.ServiceHTTPBasedChecks(psid) {
|
2019-09-26 02:55:52 +00:00
|
|
|
p, err := parseCheckPath(check)
|
|
|
|
if err != nil {
|
2020-01-28 23:50:41 +00:00
|
|
|
s.Logger.Warn("failed to create listener for", "check", check.CheckID, "error", err)
|
2019-09-26 02:55:52 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
paths = append(paths, p)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Configure additional listener for exposed check paths
|
|
|
|
for _, path := range paths {
|
|
|
|
clusterName := LocalAppClusterName
|
|
|
|
if path.LocalPathPort != cfgSnap.Proxy.LocalServicePort {
|
|
|
|
clusterName = makeExposeClusterName(path.LocalPathPort)
|
|
|
|
}
|
|
|
|
|
|
|
|
l, err := s.makeExposedCheckListener(cfgSnap, clusterName, path)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
resources = append(resources, l)
|
|
|
|
}
|
|
|
|
|
2018-10-03 18:18:55 +00:00
|
|
|
return resources, nil
|
|
|
|
}
|
|
|
|
|
2021-06-09 20:34:17 +00:00
|
|
|
func makeFilterChainMatchFromAddrs(addrs map[string]struct{}) *envoy_listener_v3.FilterChainMatch {
|
|
|
|
ranges := make([]*envoy_core_v3.CidrRange, 0)
|
|
|
|
|
|
|
|
for addr := range addrs {
|
|
|
|
ip := net.ParseIP(addr)
|
|
|
|
if ip == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
pfxLen := uint32(32)
|
|
|
|
if ip.To4() == nil {
|
|
|
|
pfxLen = 128
|
|
|
|
}
|
|
|
|
ranges = append(ranges, &envoy_core_v3.CidrRange{
|
|
|
|
AddressPrefix: addr,
|
|
|
|
PrefixLen: &wrappers.UInt32Value{Value: pfxLen},
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// The match rules are stable sorted to avoid draining if the list is provided out of order
|
|
|
|
sort.SliceStable(ranges, func(i, j int) bool {
|
|
|
|
return ranges[i].AddressPrefix < ranges[j].AddressPrefix
|
|
|
|
})
|
|
|
|
|
|
|
|
return &envoy_listener_v3.FilterChainMatch{
|
|
|
|
PrefixRanges: ranges,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-09-26 02:55:52 +00:00
|
|
|
func parseCheckPath(check structs.CheckType) (structs.ExposePath, error) {
|
|
|
|
var path structs.ExposePath
|
|
|
|
|
|
|
|
if check.HTTP != "" {
|
|
|
|
path.Protocol = "http"
|
|
|
|
|
|
|
|
// Get path and local port from original HTTP target
|
|
|
|
u, err := url.Parse(check.HTTP)
|
|
|
|
if err != nil {
|
|
|
|
return path, fmt.Errorf("failed to parse url '%s': %v", check.HTTP, err)
|
|
|
|
}
|
|
|
|
path.Path = u.Path
|
|
|
|
|
|
|
|
_, portStr, err := net.SplitHostPort(u.Host)
|
|
|
|
if err != nil {
|
|
|
|
return path, fmt.Errorf("failed to parse port from '%s': %v", check.HTTP, err)
|
|
|
|
}
|
|
|
|
path.LocalPathPort, err = strconv.Atoi(portStr)
|
|
|
|
if err != nil {
|
|
|
|
return path, fmt.Errorf("failed to parse port from '%s': %v", check.HTTP, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get listener port from proxied HTTP target
|
|
|
|
u, err = url.Parse(check.ProxyHTTP)
|
|
|
|
if err != nil {
|
|
|
|
return path, fmt.Errorf("failed to parse url '%s': %v", check.ProxyHTTP, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
_, portStr, err = net.SplitHostPort(u.Host)
|
|
|
|
if err != nil {
|
|
|
|
return path, fmt.Errorf("failed to parse port from '%s': %v", check.ProxyHTTP, err)
|
|
|
|
}
|
|
|
|
path.ListenerPort, err = strconv.Atoi(portStr)
|
|
|
|
if err != nil {
|
|
|
|
return path, fmt.Errorf("failed to parse port from '%s': %v", check.ProxyHTTP, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if check.GRPC != "" {
|
|
|
|
path.Path = "/grpc.health.v1.Health/Check"
|
|
|
|
path.Protocol = "http2"
|
|
|
|
|
|
|
|
// Get local port from original GRPC target of the form: host/service
|
|
|
|
proxyServerAndService := strings.SplitN(check.GRPC, "/", 2)
|
|
|
|
_, portStr, err := net.SplitHostPort(proxyServerAndService[0])
|
|
|
|
if err != nil {
|
|
|
|
return path, fmt.Errorf("failed to split host/port from '%s': %v", check.GRPC, err)
|
|
|
|
}
|
|
|
|
path.LocalPathPort, err = strconv.Atoi(portStr)
|
|
|
|
if err != nil {
|
|
|
|
return path, fmt.Errorf("failed to parse port from '%s': %v", check.GRPC, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get listener port from proxied GRPC target of the form: host/service
|
|
|
|
proxyServerAndService = strings.SplitN(check.ProxyGRPC, "/", 2)
|
|
|
|
_, portStr, err = net.SplitHostPort(proxyServerAndService[0])
|
|
|
|
if err != nil {
|
|
|
|
return path, fmt.Errorf("failed to split host/port from '%s': %v", check.ProxyGRPC, err)
|
|
|
|
}
|
|
|
|
path.ListenerPort, err = strconv.Atoi(portStr)
|
|
|
|
if err != nil {
|
|
|
|
return path, fmt.Errorf("failed to parse port from '%s': %v", check.ProxyGRPC, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
path.ParsedFromCheck = true
|
|
|
|
|
|
|
|
return path, nil
|
|
|
|
}
|
|
|
|
|
2020-04-13 16:33:01 +00:00
|
|
|
// listenersFromSnapshotGateway returns the "listener" for a terminating-gateway or mesh-gateway service
|
2021-04-29 18:54:05 +00:00
|
|
|
func (s *ResourceGenerator) listenersFromSnapshotGateway(cfgSnap *proxycfg.ConfigSnapshot) ([]proto.Message, error) {
|
2020-03-26 16:20:56 +00:00
|
|
|
cfg, err := ParseGatewayConfig(cfgSnap.Proxy.Config)
|
2019-06-18 00:52:01 +00:00
|
|
|
if err != nil {
|
|
|
|
// Don't hard fail on a config typo, just warn. The parse func returns
|
|
|
|
// default config if there is an error so it's safe to continue.
|
2020-01-28 23:50:41 +00:00
|
|
|
s.Logger.Warn("failed to parse Connect.Proxy.Config", "error", err)
|
2019-06-18 00:52:01 +00:00
|
|
|
}
|
|
|
|
|
2021-02-05 22:28:07 +00:00
|
|
|
// We'll collect all of the desired listeners first, and deduplicate them later.
|
2020-04-13 16:33:01 +00:00
|
|
|
type namedAddress struct {
|
|
|
|
name string
|
|
|
|
structs.ServiceAddress
|
|
|
|
}
|
|
|
|
addrs := make([]namedAddress, 0)
|
2019-06-18 00:52:01 +00:00
|
|
|
|
|
|
|
var resources []proto.Message
|
|
|
|
if !cfg.NoDefaultBind {
|
|
|
|
addr := cfgSnap.Address
|
|
|
|
if addr == "" {
|
|
|
|
addr = "0.0.0.0"
|
|
|
|
}
|
|
|
|
|
2020-04-13 16:33:01 +00:00
|
|
|
a := structs.ServiceAddress{
|
|
|
|
Address: addr,
|
|
|
|
Port: cfgSnap.Port,
|
|
|
|
}
|
2021-02-05 22:28:07 +00:00
|
|
|
addrs = append(addrs, namedAddress{name: "default", ServiceAddress: a})
|
2019-06-18 00:52:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if cfg.BindTaggedAddresses {
|
|
|
|
for name, addrCfg := range cfgSnap.TaggedAddresses {
|
2020-04-13 16:33:01 +00:00
|
|
|
a := structs.ServiceAddress{
|
|
|
|
Address: addrCfg.Address,
|
|
|
|
Port: addrCfg.Port,
|
|
|
|
}
|
2021-02-05 22:28:07 +00:00
|
|
|
addrs = append(addrs, namedAddress{name: name, ServiceAddress: a})
|
2019-06-18 00:52:01 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for name, addrCfg := range cfg.BindAddresses {
|
2020-04-13 16:33:01 +00:00
|
|
|
a := structs.ServiceAddress{
|
|
|
|
Address: addrCfg.Address,
|
|
|
|
Port: addrCfg.Port,
|
|
|
|
}
|
2021-02-05 22:28:07 +00:00
|
|
|
addrs = append(addrs, namedAddress{name: name, ServiceAddress: a})
|
2019-06-18 00:52:01 +00:00
|
|
|
}
|
|
|
|
|
2021-02-05 22:28:07 +00:00
|
|
|
// Prevent invalid configurations of binding to the same port/addr twice
|
|
|
|
// including with the any addresses
|
|
|
|
//
|
|
|
|
// Sort the list and then if two items share a service address, take the
|
|
|
|
// first one to ensure we generate one listener per address and it's
|
|
|
|
// stable.
|
|
|
|
sort.Slice(addrs, func(i, j int) bool {
|
|
|
|
return addrs[i].name < addrs[j].name
|
|
|
|
})
|
|
|
|
|
|
|
|
// Make listeners and deduplicate on the fly.
|
|
|
|
seen := make(map[structs.ServiceAddress]bool)
|
2020-04-13 16:33:01 +00:00
|
|
|
for _, a := range addrs {
|
2021-02-05 22:28:07 +00:00
|
|
|
if seen[a.ServiceAddress] {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
seen[a.ServiceAddress] = true
|
|
|
|
|
2021-02-26 22:23:15 +00:00
|
|
|
var l *envoy_listener_v3.Listener
|
2020-04-13 16:33:01 +00:00
|
|
|
|
|
|
|
switch cfgSnap.Kind {
|
|
|
|
case structs.ServiceKindTerminatingGateway:
|
2021-04-29 18:54:05 +00:00
|
|
|
l, err = s.makeTerminatingGatewayListener(cfgSnap, a.name, a.Address, a.Port)
|
2020-04-13 16:33:01 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-05-21 14:08:12 +00:00
|
|
|
case structs.ServiceKindIngressGateway:
|
|
|
|
listeners, err := s.makeIngressGatewayListeners(a.Address, cfgSnap)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
resources = append(resources, listeners...)
|
2020-04-13 16:33:01 +00:00
|
|
|
case structs.ServiceKindMeshGateway:
|
|
|
|
l, err = s.makeMeshGatewayListener(a.name, a.Address, a.Port, cfgSnap)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
2020-04-17 01:08:41 +00:00
|
|
|
if l != nil {
|
|
|
|
resources = append(resources, l)
|
|
|
|
}
|
2020-04-13 16:33:01 +00:00
|
|
|
}
|
2019-06-18 00:52:01 +00:00
|
|
|
return resources, err
|
|
|
|
}
|
|
|
|
|
2018-10-03 18:18:55 +00:00
|
|
|
// makeListener returns a listener with name and bind details set. Filters must
|
|
|
|
// be added before it's useful.
|
|
|
|
//
|
|
|
|
// Note on names: Envoy listeners attempt graceful transitions of connections
|
|
|
|
// when their config changes but that means they can't have their bind address
|
|
|
|
// or port changed in a running instance. Since our users might choose to change
|
|
|
|
// a bind address or port for the public or upstream listeners, we need to
|
|
|
|
// encode those into the unique name for the listener such that if the user
|
|
|
|
// changes them, we actually create a whole new listener on the new address and
|
|
|
|
// port. Envoy should take care of closing the old one once it sees it's no
|
|
|
|
// longer in the config.
|
2021-03-26 20:00:44 +00:00
|
|
|
func makeListener(name string, upstream *structs.Upstream, trafficDirection envoy_core_v3.TrafficDirection) *envoy_listener_v3.Listener {
|
|
|
|
if upstream.LocalBindPort == 0 && upstream.LocalBindSocketPath != "" {
|
|
|
|
return makePipeListener(name, upstream.LocalBindSocketPath, upstream.LocalBindSocketMode, trafficDirection)
|
|
|
|
}
|
|
|
|
|
|
|
|
return makePortListenerWithDefault(name, upstream.LocalBindAddress, upstream.LocalBindPort, trafficDirection)
|
|
|
|
}
|
|
|
|
|
|
|
|
func makePortListener(name, addr string, port int, trafficDirection envoy_core_v3.TrafficDirection) *envoy_listener_v3.Listener {
|
2021-02-26 22:23:15 +00:00
|
|
|
return &envoy_listener_v3.Listener{
|
2021-02-22 21:00:15 +00:00
|
|
|
Name: fmt.Sprintf("%s:%s:%d", name, addr, port),
|
|
|
|
Address: makeAddress(addr, port),
|
|
|
|
TrafficDirection: trafficDirection,
|
2018-10-03 18:18:55 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-26 20:00:44 +00:00
|
|
|
func makePortListenerWithDefault(name, addr string, port int, trafficDirection envoy_core_v3.TrafficDirection) *envoy_listener_v3.Listener {
|
|
|
|
if addr == "" {
|
|
|
|
addr = "127.0.0.1"
|
|
|
|
}
|
|
|
|
return makePortListener(name, addr, port, trafficDirection)
|
|
|
|
}
|
|
|
|
|
2021-04-14 01:01:30 +00:00
|
|
|
func makePipeListener(name, path string, mode_str string, trafficDirection envoy_core_v3.TrafficDirection) *envoy_listener_v3.Listener {
|
|
|
|
// We've already validated this, so it should not fail.
|
|
|
|
mode, err := strconv.ParseUint(mode_str, 0, 32)
|
|
|
|
if err != nil {
|
|
|
|
mode = 0
|
|
|
|
}
|
2021-03-26 20:00:44 +00:00
|
|
|
return &envoy_listener_v3.Listener{
|
|
|
|
Name: fmt.Sprintf("%s:%s", name, path),
|
2021-04-14 01:01:30 +00:00
|
|
|
Address: makePipeAddress(path, uint32(mode)),
|
2021-03-26 20:00:44 +00:00
|
|
|
TrafficDirection: trafficDirection,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-03 18:18:55 +00:00
|
|
|
// makeListenerFromUserConfig returns the listener config decoded from an
|
|
|
|
// arbitrary proto3 json format string or an error if it's invalid.
|
|
|
|
//
|
|
|
|
// For now we only support embedding in JSON strings because of the hcl parsing
|
2020-06-09 21:43:05 +00:00
|
|
|
// pain (see Background section in the comment for decode.HookWeakDecodeFromSlice).
|
|
|
|
// This may be fixed in decode.HookWeakDecodeFromSlice in the future.
|
2018-10-03 18:18:55 +00:00
|
|
|
//
|
|
|
|
// When we do that we can support just nesting the config directly into the
|
|
|
|
// JSON/hcl naturally but this is a stop-gap that gets us an escape hatch
|
|
|
|
// immediately. It's also probably not a bad thing to support long-term since
|
|
|
|
// any config generated by other systems will likely be in canonical protobuf
|
|
|
|
// from rather than our slight variant in JSON/hcl.
|
2021-02-26 22:23:15 +00:00
|
|
|
func makeListenerFromUserConfig(configJSON string) (*envoy_listener_v3.Listener, error) {
|
2021-02-22 21:00:15 +00:00
|
|
|
// Type field is present so decode it as a any.Any
|
|
|
|
var any any.Any
|
|
|
|
if err := jsonpb.UnmarshalString(configJSON, &any); err != nil {
|
2018-10-03 18:18:55 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
2021-02-26 22:23:15 +00:00
|
|
|
var l envoy_listener_v3.Listener
|
2021-02-22 21:00:15 +00:00
|
|
|
if err := proto.Unmarshal(any.Value, &l); err != nil {
|
|
|
|
return nil, err
|
2018-10-03 18:18:55 +00:00
|
|
|
}
|
2021-02-22 21:00:15 +00:00
|
|
|
return &l, nil
|
2018-10-03 18:18:55 +00:00
|
|
|
}
|
|
|
|
|
2020-08-27 17:20:58 +00:00
|
|
|
// Ensure that the first filter in each filter chain of a public listener is
|
|
|
|
// the authz filter to prevent unauthorized access.
|
2021-04-29 18:54:05 +00:00
|
|
|
func (s *ResourceGenerator) injectConnectFilters(cfgSnap *proxycfg.ConfigSnapshot, listener *envoy_listener_v3.Listener) error {
|
2020-08-27 17:20:58 +00:00
|
|
|
authzFilter, err := makeRBACNetworkFilter(
|
|
|
|
cfgSnap.ConnectProxy.Intentions,
|
|
|
|
cfgSnap.IntentionDefaultAllow,
|
|
|
|
)
|
2018-10-03 18:18:55 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2020-08-27 17:20:58 +00:00
|
|
|
|
2018-10-03 18:18:55 +00:00
|
|
|
for idx := range listener.FilterChains {
|
|
|
|
// Insert our authz filter before any others
|
|
|
|
listener.FilterChains[idx].Filters =
|
2021-02-26 22:23:15 +00:00
|
|
|
append([]*envoy_listener_v3.Filter{
|
2020-08-27 17:20:58 +00:00
|
|
|
authzFilter,
|
|
|
|
}, listener.FilterChains[idx].Filters...)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-02-22 21:00:15 +00:00
|
|
|
const (
|
|
|
|
httpConnectionManagerOldName = "envoy.http_connection_manager"
|
|
|
|
httpConnectionManagerNewName = "envoy.filters.network.http_connection_manager"
|
|
|
|
)
|
2020-08-27 17:20:58 +00:00
|
|
|
|
2021-06-14 22:20:27 +00:00
|
|
|
func extractRdsResourceNames(listener *envoy_listener_v3.Listener) ([]string, error) {
|
|
|
|
var found []string
|
|
|
|
|
|
|
|
for chainIdx, chain := range listener.FilterChains {
|
|
|
|
for filterIdx, filter := range chain.Filters {
|
|
|
|
if filter.Name != httpConnectionManagerNewName {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
tc, ok := filter.ConfigType.(*envoy_listener_v3.Filter_TypedConfig)
|
|
|
|
if !ok {
|
|
|
|
return nil, fmt.Errorf(
|
|
|
|
"filter chain %d has a %q filter %d with an unsupported config type: %T",
|
|
|
|
chainIdx,
|
|
|
|
filter.Name,
|
|
|
|
filterIdx,
|
|
|
|
filter.ConfigType,
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
var hcm envoy_http_v3.HttpConnectionManager
|
|
|
|
if err := ptypes.UnmarshalAny(tc.TypedConfig, &hcm); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if hcm.RouteSpecifier == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
rds, ok := hcm.RouteSpecifier.(*envoy_http_v3.HttpConnectionManager_Rds)
|
|
|
|
if !ok {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if rds.Rds == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
found = append(found, rds.Rds.RouteConfigName)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return found, nil
|
|
|
|
}
|
|
|
|
|
2020-08-27 17:20:58 +00:00
|
|
|
// Locate the existing http connect manager L4 filter and inject our RBAC filter at the top.
|
2021-03-17 01:22:26 +00:00
|
|
|
func injectHTTPFilterOnFilterChains(
|
2021-02-26 22:23:15 +00:00
|
|
|
listener *envoy_listener_v3.Listener,
|
|
|
|
authzFilter *envoy_http_v3.HttpFilter,
|
2020-08-27 17:20:58 +00:00
|
|
|
) error {
|
|
|
|
for chainIdx, chain := range listener.FilterChains {
|
|
|
|
var (
|
2021-02-26 22:23:15 +00:00
|
|
|
hcmFilter *envoy_listener_v3.Filter
|
2020-08-27 17:20:58 +00:00
|
|
|
hcmFilterIdx int
|
|
|
|
)
|
|
|
|
|
|
|
|
for filterIdx, filter := range chain.Filters {
|
2021-02-22 21:00:15 +00:00
|
|
|
if filter.Name == httpConnectionManagerOldName ||
|
2020-08-27 17:20:58 +00:00
|
|
|
filter.Name == httpConnectionManagerNewName {
|
|
|
|
hcmFilter = filter
|
|
|
|
hcmFilterIdx = filterIdx
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if hcmFilter == nil {
|
|
|
|
return fmt.Errorf(
|
|
|
|
"filter chain %d lacks either a %q or %q filter",
|
|
|
|
chainIdx,
|
2021-02-22 21:00:15 +00:00
|
|
|
httpConnectionManagerOldName,
|
2020-08-27 17:20:58 +00:00
|
|
|
httpConnectionManagerNewName,
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2021-08-20 16:57:45 +00:00
|
|
|
var hcm envoy_http_v3.HttpConnectionManager
|
2021-02-26 22:23:15 +00:00
|
|
|
tc, ok := hcmFilter.ConfigType.(*envoy_listener_v3.Filter_TypedConfig)
|
2021-02-22 21:00:15 +00:00
|
|
|
if !ok {
|
2020-08-27 17:20:58 +00:00
|
|
|
return fmt.Errorf(
|
|
|
|
"filter chain %d has a %q filter with an unsupported config type: %T",
|
|
|
|
chainIdx,
|
|
|
|
hcmFilter.Name,
|
2021-02-22 21:00:15 +00:00
|
|
|
hcmFilter.ConfigType,
|
2020-08-27 17:20:58 +00:00
|
|
|
)
|
|
|
|
}
|
2018-10-03 18:18:55 +00:00
|
|
|
|
2021-02-22 21:00:15 +00:00
|
|
|
if err := ptypes.UnmarshalAny(tc.TypedConfig, &hcm); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-08-27 17:20:58 +00:00
|
|
|
// Insert our authz filter before any others
|
2021-02-26 22:23:15 +00:00
|
|
|
hcm.HttpFilters = append([]*envoy_http_v3.HttpFilter{
|
2020-08-27 17:20:58 +00:00
|
|
|
authzFilter,
|
|
|
|
}, hcm.HttpFilters...)
|
|
|
|
|
|
|
|
// And persist the modified filter.
|
2021-02-22 21:00:15 +00:00
|
|
|
newFilter, err := makeFilter(hcmFilter.Name, &hcm)
|
2020-08-27 17:20:58 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
chain.Filters[hcmFilterIdx] = newFilter
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure every filter chain uses our TLS certs. We might allow users to work
|
|
|
|
// around this later if there is a good use case but this is actually a feature
|
|
|
|
// for now as it allows them to specify custom listener params in config but
|
|
|
|
// still get our certs delivered dynamically and intentions enforced without
|
|
|
|
// coming up with some complicated templating/merging solution.
|
2021-04-29 18:54:05 +00:00
|
|
|
func (s *ResourceGenerator) injectConnectTLSOnFilterChains(cfgSnap *proxycfg.ConfigSnapshot, listener *envoy_listener_v3.Listener) error {
|
2020-08-27 17:20:58 +00:00
|
|
|
for idx := range listener.FilterChains {
|
2021-02-26 22:23:15 +00:00
|
|
|
tlsContext := &envoy_tls_v3.DownstreamTlsContext{
|
2022-01-11 16:46:42 +00:00
|
|
|
CommonTlsContext: makeCommonTLSContextFromLeafWithoutParams(cfgSnap, cfgSnap.Leaf()),
|
2020-06-23 20:19:56 +00:00
|
|
|
RequireClientCertificate: &wrappers.BoolValue{Value: true},
|
2018-10-03 18:18:55 +00:00
|
|
|
}
|
2021-02-22 21:00:15 +00:00
|
|
|
transportSocket, err := makeDownstreamTLSTransportSocket(tlsContext)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
listener.FilterChains[idx].TransportSocket = transportSocket
|
2018-10-03 18:18:55 +00:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-04-29 18:54:05 +00:00
|
|
|
func (s *ResourceGenerator) makeInboundListener(cfgSnap *proxycfg.ConfigSnapshot, name string) (proto.Message, error) {
|
2021-02-26 22:23:15 +00:00
|
|
|
var l *envoy_listener_v3.Listener
|
2018-10-03 18:18:55 +00:00
|
|
|
var err error
|
|
|
|
|
2019-04-29 16:27:57 +00:00
|
|
|
cfg, err := ParseProxyConfig(cfgSnap.Proxy.Config)
|
|
|
|
if err != nil {
|
|
|
|
// Don't hard fail on a config typo, just warn. The parse func returns
|
|
|
|
// default config if there is an error so it's safe to continue.
|
2020-01-28 23:50:41 +00:00
|
|
|
s.Logger.Warn("failed to parse Connect.Proxy.Config", "error", err)
|
2019-04-29 16:27:57 +00:00
|
|
|
}
|
|
|
|
|
2021-03-17 01:22:26 +00:00
|
|
|
// This controls if we do L4 or L7 intention checks.
|
|
|
|
useHTTPFilter := structs.IsProtocolHTTPLike(cfg.Protocol)
|
|
|
|
|
|
|
|
// Generate and return custom public listener from config if one was provided.
|
2019-04-29 16:27:57 +00:00
|
|
|
if cfg.PublicListenerJSON != "" {
|
|
|
|
l, err = makeListenerFromUserConfig(cfg.PublicListenerJSON)
|
|
|
|
if err != nil {
|
2021-03-17 01:22:26 +00:00
|
|
|
return nil, err
|
2018-10-03 18:18:55 +00:00
|
|
|
}
|
2019-07-05 15:06:47 +00:00
|
|
|
|
2021-03-17 22:18:56 +00:00
|
|
|
// For HTTP-like services attach an RBAC http filter and do a best-effort insert
|
|
|
|
if useHTTPFilter {
|
|
|
|
httpAuthzFilter, err := makeRBACHTTPFilter(
|
|
|
|
cfgSnap.ConnectProxy.Intentions,
|
|
|
|
cfgSnap.IntentionDefaultAllow,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Try our best to inject the HTTP RBAC filter.
|
|
|
|
if err := injectHTTPFilterOnFilterChains(l, httpAuthzFilter); err != nil {
|
|
|
|
s.Logger.Warn(
|
|
|
|
"could not inject the HTTP RBAC filter to enforce intentions on user-provided "+
|
|
|
|
"'envoy_public_listener_json' config; falling back on the RBAC network filter instead",
|
|
|
|
"proxy", cfgSnap.ProxyID,
|
|
|
|
"error", err,
|
|
|
|
)
|
|
|
|
|
|
|
|
// If we get an error inject the RBAC network filter instead.
|
|
|
|
useHTTPFilter = false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-29 18:54:05 +00:00
|
|
|
err := s.finalizePublicListenerFromConfig(l, cfgSnap, useHTTPFilter)
|
2021-03-17 01:22:26 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("failed to attach Consul filters and TLS context to custom public listener: %v", err)
|
2018-10-03 18:18:55 +00:00
|
|
|
}
|
2021-03-17 01:22:26 +00:00
|
|
|
return l, nil
|
|
|
|
}
|
2019-07-05 15:06:47 +00:00
|
|
|
|
2021-03-17 22:18:56 +00:00
|
|
|
// No JSON user config, use default listener address
|
2021-03-17 19:40:49 +00:00
|
|
|
// Default to listening on all addresses, but override with bind address if one is set.
|
2021-03-17 01:22:26 +00:00
|
|
|
addr := cfgSnap.Address
|
2021-03-17 19:40:49 +00:00
|
|
|
if addr == "" {
|
|
|
|
addr = "0.0.0.0"
|
|
|
|
}
|
2021-03-17 01:22:26 +00:00
|
|
|
if cfg.BindAddress != "" {
|
|
|
|
addr = cfg.BindAddress
|
|
|
|
}
|
2019-04-29 16:27:57 +00:00
|
|
|
|
2021-03-17 01:22:26 +00:00
|
|
|
// Override with bind port if one is set, otherwise default to
|
|
|
|
// proxy service's address
|
|
|
|
port := cfgSnap.Port
|
|
|
|
if cfg.BindPort != 0 {
|
|
|
|
port = cfg.BindPort
|
|
|
|
}
|
2020-08-27 17:20:58 +00:00
|
|
|
|
2021-03-26 20:00:44 +00:00
|
|
|
l = makePortListener(name, addr, port, envoy_core_v3.TrafficDirection_INBOUND)
|
2020-08-27 17:20:58 +00:00
|
|
|
|
2021-03-17 01:22:26 +00:00
|
|
|
filterOpts := listenerFilterOpts{
|
|
|
|
protocol: cfg.Protocol,
|
|
|
|
filterName: name,
|
|
|
|
routeName: name,
|
|
|
|
cluster: LocalAppClusterName,
|
|
|
|
requestTimeoutMs: cfg.LocalRequestTimeoutMs,
|
|
|
|
}
|
|
|
|
if useHTTPFilter {
|
|
|
|
filterOpts.httpAuthzFilter, err = makeRBACHTTPFilter(
|
|
|
|
cfgSnap.ConnectProxy.Intentions,
|
|
|
|
cfgSnap.IntentionDefaultAllow,
|
|
|
|
)
|
2018-10-03 18:18:55 +00:00
|
|
|
if err != nil {
|
2019-04-29 16:27:57 +00:00
|
|
|
return nil, err
|
2018-10-03 18:18:55 +00:00
|
|
|
}
|
2021-03-17 01:22:26 +00:00
|
|
|
}
|
|
|
|
filter, err := makeListenerFilter(filterOpts)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
l.FilterChains = []*envoy_listener_v3.FilterChain{
|
|
|
|
{
|
|
|
|
Filters: []*envoy_listener_v3.Filter{
|
|
|
|
filter,
|
2018-10-03 18:18:55 +00:00
|
|
|
},
|
2021-03-17 01:22:26 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2021-04-29 18:54:05 +00:00
|
|
|
err = s.finalizePublicListenerFromConfig(l, cfgSnap, useHTTPFilter)
|
2021-03-17 22:18:56 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("failed to attach Consul filters and TLS context to custom public listener: %v", err)
|
2021-03-17 01:22:26 +00:00
|
|
|
}
|
2020-08-27 17:20:58 +00:00
|
|
|
|
2021-03-17 01:22:26 +00:00
|
|
|
return l, err
|
|
|
|
}
|
|
|
|
|
2021-03-17 22:18:56 +00:00
|
|
|
// finalizePublicListenerFromConfig is used for best-effort injection of Consul filter-chains onto listeners.
|
|
|
|
// This include L4 authorization filters and TLS context.
|
2021-04-29 18:54:05 +00:00
|
|
|
func (s *ResourceGenerator) finalizePublicListenerFromConfig(l *envoy_listener_v3.Listener, cfgSnap *proxycfg.ConfigSnapshot, useHTTPFilter bool) error {
|
2020-08-27 17:20:58 +00:00
|
|
|
if !useHTTPFilter {
|
2021-03-17 01:22:26 +00:00
|
|
|
// Best-effort injection of L4 intentions
|
2021-04-29 18:54:05 +00:00
|
|
|
if err := s.injectConnectFilters(cfgSnap, l); err != nil {
|
2021-03-17 01:22:26 +00:00
|
|
|
return nil
|
2020-08-27 17:20:58 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-17 01:22:26 +00:00
|
|
|
// Always apply TLS certificates
|
2021-04-29 18:54:05 +00:00
|
|
|
if err := s.injectConnectTLSOnFilterChains(cfgSnap, l); err != nil {
|
2021-03-17 01:22:26 +00:00
|
|
|
return nil
|
2018-10-03 18:18:55 +00:00
|
|
|
}
|
2021-03-17 01:22:26 +00:00
|
|
|
return nil
|
2018-10-03 18:18:55 +00:00
|
|
|
}
|
|
|
|
|
2021-04-29 18:54:05 +00:00
|
|
|
func (s *ResourceGenerator) makeExposedCheckListener(cfgSnap *proxycfg.ConfigSnapshot, cluster string, path structs.ExposePath) (proto.Message, error) {
|
2019-09-26 02:55:52 +00:00
|
|
|
cfg, err := ParseProxyConfig(cfgSnap.Proxy.Config)
|
|
|
|
if err != nil {
|
|
|
|
// Don't hard fail on a config typo, just warn. The parse func returns
|
|
|
|
// default config if there is an error so it's safe to continue.
|
2020-01-28 23:50:41 +00:00
|
|
|
s.Logger.Warn("failed to parse Connect.Proxy.Config", "error", err)
|
2019-09-26 02:55:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// No user config, use default listener
|
|
|
|
addr := cfgSnap.Address
|
|
|
|
|
|
|
|
// Override with bind address if one is set, otherwise default to 0.0.0.0
|
|
|
|
if cfg.BindAddress != "" {
|
|
|
|
addr = cfg.BindAddress
|
|
|
|
} else if addr == "" {
|
|
|
|
addr = "0.0.0.0"
|
|
|
|
}
|
|
|
|
|
|
|
|
// Strip any special characters from path to make a valid and hopefully unique name
|
|
|
|
r := regexp.MustCompile(`[^a-zA-Z0-9]+`)
|
|
|
|
strippedPath := r.ReplaceAllString(path.Path, "")
|
|
|
|
listenerName := fmt.Sprintf("exposed_path_%s", strippedPath)
|
|
|
|
|
2021-03-26 20:00:44 +00:00
|
|
|
l := makePortListener(listenerName, addr, path.ListenerPort, envoy_core_v3.TrafficDirection_INBOUND)
|
2019-09-26 02:55:52 +00:00
|
|
|
|
|
|
|
filterName := fmt.Sprintf("exposed_path_filter_%s_%d", strippedPath, path.ListenerPort)
|
|
|
|
|
2020-08-27 17:20:58 +00:00
|
|
|
opts := listenerFilterOpts{
|
|
|
|
useRDS: false,
|
|
|
|
protocol: path.Protocol,
|
|
|
|
filterName: filterName,
|
2020-09-04 18:45:05 +00:00
|
|
|
routeName: filterName,
|
2020-08-27 17:20:58 +00:00
|
|
|
cluster: cluster,
|
|
|
|
statPrefix: "",
|
|
|
|
routePath: path.Path,
|
|
|
|
httpAuthzFilter: nil,
|
|
|
|
}
|
|
|
|
f, err := makeListenerFilter(opts)
|
2019-09-26 02:55:52 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2021-02-26 22:23:15 +00:00
|
|
|
chain := &envoy_listener_v3.FilterChain{
|
|
|
|
Filters: []*envoy_listener_v3.Filter{f},
|
2019-09-26 02:55:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// For registered checks restrict traffic sources to localhost and Consul's advertise addr
|
|
|
|
if path.ParsedFromCheck {
|
|
|
|
|
|
|
|
// For the advertise addr we use a CidrRange that only matches one address
|
|
|
|
advertise := s.CfgFetcher.AdvertiseAddrLAN()
|
|
|
|
|
|
|
|
// Get prefix length based on whether address is ipv4 (32 bits) or ipv6 (128 bits)
|
|
|
|
advertiseLen := 32
|
|
|
|
ip := net.ParseIP(advertise)
|
|
|
|
if ip != nil && strings.Contains(advertise, ":") {
|
|
|
|
advertiseLen = 128
|
|
|
|
}
|
|
|
|
|
2021-02-26 22:23:15 +00:00
|
|
|
ranges := make([]*envoy_core_v3.CidrRange, 0, 3)
|
2021-02-19 20:38:43 +00:00
|
|
|
ranges = append(ranges,
|
2021-02-26 22:23:15 +00:00
|
|
|
&envoy_core_v3.CidrRange{AddressPrefix: "127.0.0.1", PrefixLen: &wrappers.UInt32Value{Value: 8}},
|
|
|
|
&envoy_core_v3.CidrRange{AddressPrefix: advertise, PrefixLen: &wrappers.UInt32Value{Value: uint32(advertiseLen)}},
|
2021-02-19 20:38:43 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
if ok, err := kernelSupportsIPv6(); err != nil {
|
|
|
|
return nil, err
|
|
|
|
} else if ok {
|
|
|
|
ranges = append(ranges,
|
2021-02-26 22:23:15 +00:00
|
|
|
&envoy_core_v3.CidrRange{AddressPrefix: "::1", PrefixLen: &wrappers.UInt32Value{Value: 128}},
|
2021-02-19 20:38:43 +00:00
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2021-02-26 22:23:15 +00:00
|
|
|
chain.FilterChainMatch = &envoy_listener_v3.FilterChainMatch{
|
2021-02-19 20:38:43 +00:00
|
|
|
SourcePrefixRanges: ranges,
|
2019-09-26 02:55:52 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-02-26 22:23:15 +00:00
|
|
|
l.FilterChains = []*envoy_listener_v3.FilterChain{chain}
|
2019-09-26 02:55:52 +00:00
|
|
|
|
|
|
|
return l, err
|
|
|
|
}
|
|
|
|
|
2021-04-29 18:54:05 +00:00
|
|
|
func (s *ResourceGenerator) makeTerminatingGatewayListener(
|
2020-07-09 22:04:51 +00:00
|
|
|
cfgSnap *proxycfg.ConfigSnapshot,
|
|
|
|
name, addr string,
|
|
|
|
port int,
|
2021-02-26 22:23:15 +00:00
|
|
|
) (*envoy_listener_v3.Listener, error) {
|
2021-03-26 20:00:44 +00:00
|
|
|
l := makePortListener(name, addr, port, envoy_core_v3.TrafficDirection_INBOUND)
|
2020-04-13 16:33:01 +00:00
|
|
|
|
|
|
|
tlsInspector, err := makeTLSInspectorListenerFilter()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2021-02-26 22:23:15 +00:00
|
|
|
l.ListenerFilters = []*envoy_listener_v3.ListenerFilter{tlsInspector}
|
2020-04-13 16:33:01 +00:00
|
|
|
|
|
|
|
// Make a FilterChain for each linked service
|
|
|
|
// Match on the cluster name,
|
2020-08-27 17:20:58 +00:00
|
|
|
for _, svc := range cfgSnap.TerminatingGateway.ValidServices() {
|
2021-09-01 14:35:39 +00:00
|
|
|
clusterName := connect.ServiceSNI(svc.Name, "", svc.NamespaceOrDefault(), svc.PartitionOrDefault(), cfgSnap.Datacenter, cfgSnap.Roots.TrustDomain)
|
2020-08-27 17:20:58 +00:00
|
|
|
|
|
|
|
// Resolvers are optional.
|
2020-04-14 14:59:23 +00:00
|
|
|
resolver, hasResolver := cfgSnap.TerminatingGateway.ServiceResolvers[svc]
|
2020-04-13 16:33:01 +00:00
|
|
|
|
2020-08-27 17:20:58 +00:00
|
|
|
intentions := cfgSnap.TerminatingGateway.Intentions[svc]
|
|
|
|
svcConfig := cfgSnap.TerminatingGateway.ServiceConfigs[svc]
|
|
|
|
|
|
|
|
cfg, err := ParseProxyConfig(svcConfig.ProxyConfig)
|
|
|
|
if err != nil {
|
|
|
|
// Don't hard fail on a config typo, just warn. The parse func returns
|
|
|
|
// default config if there is an error so it's safe to continue.
|
2021-04-29 18:54:05 +00:00
|
|
|
s.Logger.Warn(
|
2020-08-27 17:20:58 +00:00
|
|
|
"failed to parse Connect.Proxy.Config for linked service",
|
|
|
|
"service", svc.String(),
|
|
|
|
"error", err,
|
|
|
|
)
|
2020-04-17 01:04:14 +00:00
|
|
|
}
|
|
|
|
|
2020-08-27 17:20:58 +00:00
|
|
|
clusterChain, err := s.makeFilterChainTerminatingGateway(
|
|
|
|
cfgSnap,
|
|
|
|
clusterName,
|
|
|
|
svc,
|
|
|
|
intentions,
|
|
|
|
cfg.Protocol,
|
|
|
|
)
|
2020-04-13 16:33:01 +00:00
|
|
|
if err != nil {
|
2020-04-14 14:59:23 +00:00
|
|
|
return nil, fmt.Errorf("failed to make filter chain for cluster %q: %v", clusterName, err)
|
2020-04-13 16:33:01 +00:00
|
|
|
}
|
2020-04-14 14:59:23 +00:00
|
|
|
l.FilterChains = append(l.FilterChains, clusterChain)
|
2020-04-13 16:33:01 +00:00
|
|
|
|
2020-04-14 14:59:23 +00:00
|
|
|
// if there is a service-resolver for this service then also setup subset filter chains for it
|
|
|
|
if hasResolver {
|
|
|
|
// generate 1 filter chain for each service subset
|
|
|
|
for subsetName := range resolver.Subsets {
|
2021-09-01 14:35:39 +00:00
|
|
|
subsetClusterName := connect.ServiceSNI(svc.Name, subsetName, svc.NamespaceOrDefault(), svc.PartitionOrDefault(), cfgSnap.Datacenter, cfgSnap.Roots.TrustDomain)
|
2020-08-27 17:20:58 +00:00
|
|
|
|
|
|
|
subsetClusterChain, err := s.makeFilterChainTerminatingGateway(
|
|
|
|
cfgSnap,
|
|
|
|
subsetClusterName,
|
|
|
|
svc,
|
|
|
|
intentions,
|
|
|
|
cfg.Protocol,
|
|
|
|
)
|
2020-04-14 14:59:23 +00:00
|
|
|
if err != nil {
|
2020-08-27 17:20:58 +00:00
|
|
|
return nil, fmt.Errorf("failed to make filter chain for cluster %q: %v", subsetClusterName, err)
|
2020-04-14 14:59:23 +00:00
|
|
|
}
|
2020-08-27 17:20:58 +00:00
|
|
|
l.FilterChains = append(l.FilterChains, subsetClusterChain)
|
2020-04-14 14:59:23 +00:00
|
|
|
}
|
2020-04-13 16:33:01 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-02-08 16:19:57 +00:00
|
|
|
// Before we add the fallback, sort these chains by the matched name. All
|
|
|
|
// of these filter chains are independent, but envoy requires them to be in
|
|
|
|
// some order. If we put them in a random order then every xDS iteration
|
|
|
|
// envoy will force the listener to be replaced. Sorting these has no
|
|
|
|
// effect on how they operate, but it does mean that we won't churn
|
|
|
|
// listeners at idle.
|
|
|
|
sort.Slice(l.FilterChains, func(i, j int) bool {
|
|
|
|
return l.FilterChains[i].FilterChainMatch.ServerNames[0] < l.FilterChains[j].FilterChainMatch.ServerNames[0]
|
|
|
|
})
|
|
|
|
|
2020-04-14 21:13:25 +00:00
|
|
|
// This fallback catch-all filter ensures a listener will be present for health checks to pass
|
|
|
|
// Envoy will reset these connections since known endpoints are caught by filter chain matches above
|
2020-11-16 23:37:19 +00:00
|
|
|
tcpProxy, err := makeTCPProxyFilter(name, "", "terminating_gateway.")
|
2020-04-14 21:13:25 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2021-02-26 22:23:15 +00:00
|
|
|
fallback := &envoy_listener_v3.FilterChain{
|
|
|
|
Filters: []*envoy_listener_v3.Filter{
|
2020-04-14 21:13:25 +00:00
|
|
|
{Name: "envoy.filters.network.sni_cluster"},
|
|
|
|
tcpProxy,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
l.FilterChains = append(l.FilterChains, fallback)
|
|
|
|
|
2020-04-13 16:33:01 +00:00
|
|
|
return l, nil
|
|
|
|
}
|
|
|
|
|
2021-04-29 18:54:05 +00:00
|
|
|
func (s *ResourceGenerator) makeFilterChainTerminatingGateway(
|
2020-07-09 22:04:51 +00:00
|
|
|
cfgSnap *proxycfg.ConfigSnapshot,
|
2021-06-15 17:19:45 +00:00
|
|
|
cluster string,
|
2020-07-09 22:04:51 +00:00
|
|
|
service structs.ServiceName,
|
2020-08-27 17:20:58 +00:00
|
|
|
intentions structs.Intentions,
|
|
|
|
protocol string,
|
2021-02-26 22:23:15 +00:00
|
|
|
) (*envoy_listener_v3.FilterChain, error) {
|
|
|
|
tlsContext := &envoy_tls_v3.DownstreamTlsContext{
|
2022-01-11 16:46:42 +00:00
|
|
|
CommonTlsContext: makeCommonTLSContextFromLeafWithoutParams(cfgSnap, cfgSnap.TerminatingGateway.ServiceLeaves[service]),
|
2021-02-22 21:00:15 +00:00
|
|
|
RequireClientCertificate: &wrappers.BoolValue{Value: true},
|
|
|
|
}
|
|
|
|
transportSocket, err := makeDownstreamTLSTransportSocket(tlsContext)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2021-02-26 22:23:15 +00:00
|
|
|
filterChain := &envoy_listener_v3.FilterChain{
|
2020-08-27 17:20:58 +00:00
|
|
|
FilterChainMatch: makeSNIFilterChainMatch(cluster),
|
2021-02-26 22:23:15 +00:00
|
|
|
Filters: make([]*envoy_listener_v3.Filter, 0, 3),
|
2021-02-22 21:00:15 +00:00
|
|
|
TransportSocket: transportSocket,
|
2020-04-24 20:24:00 +00:00
|
|
|
}
|
2020-08-27 17:20:58 +00:00
|
|
|
|
|
|
|
// This controls if we do L4 or L7 intention checks.
|
|
|
|
useHTTPFilter := structs.IsProtocolHTTPLike(protocol)
|
|
|
|
|
|
|
|
// If this is L4, the first filter we setup is to do intention checks.
|
|
|
|
if !useHTTPFilter {
|
|
|
|
authFilter, err := makeRBACNetworkFilter(
|
|
|
|
intentions,
|
|
|
|
cfgSnap.IntentionDefaultAllow,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
filterChain.Filters = append(filterChain.Filters, authFilter)
|
2020-04-14 14:59:23 +00:00
|
|
|
}
|
|
|
|
|
2020-08-27 17:20:58 +00:00
|
|
|
// Lastly we setup the actual proxying component. For L4 this is a straight
|
|
|
|
// tcp proxy. For L7 this is a very hands-off HTTP proxy just to inject an
|
|
|
|
// HTTP filter to do intention checks here instead.
|
|
|
|
opts := listenerFilterOpts{
|
|
|
|
protocol: protocol,
|
2021-09-07 20:29:32 +00:00
|
|
|
filterName: fmt.Sprintf("%s.%s.%s.%s", service.Name, service.NamespaceOrDefault(), service.PartitionOrDefault(), cfgSnap.Datacenter),
|
2020-09-04 18:45:05 +00:00
|
|
|
routeName: cluster, // Set cluster name for route config since each will have its own
|
2020-08-27 17:20:58 +00:00
|
|
|
cluster: cluster,
|
2021-06-15 14:12:02 +00:00
|
|
|
statPrefix: "upstream.",
|
2020-08-27 17:20:58 +00:00
|
|
|
routePath: "",
|
|
|
|
}
|
|
|
|
|
|
|
|
if useHTTPFilter {
|
|
|
|
var err error
|
|
|
|
opts.httpAuthzFilter, err = makeRBACHTTPFilter(
|
|
|
|
intentions,
|
|
|
|
cfgSnap.IntentionDefaultAllow,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-08-28 20:27:40 +00:00
|
|
|
|
|
|
|
opts.cluster = ""
|
|
|
|
opts.useRDS = true
|
2020-08-27 17:20:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
filter, err := makeListenerFilter(opts)
|
2020-04-14 14:59:23 +00:00
|
|
|
if err != nil {
|
2020-06-23 20:19:56 +00:00
|
|
|
return nil, err
|
2020-04-14 14:59:23 +00:00
|
|
|
}
|
2020-08-27 17:20:58 +00:00
|
|
|
filterChain.Filters = append(filterChain.Filters, filter)
|
2020-04-14 14:59:23 +00:00
|
|
|
|
2020-08-27 17:20:58 +00:00
|
|
|
return filterChain, nil
|
2020-04-14 14:59:23 +00:00
|
|
|
}
|
|
|
|
|
2021-04-29 18:54:05 +00:00
|
|
|
func (s *ResourceGenerator) makeMeshGatewayListener(name, addr string, port int, cfgSnap *proxycfg.ConfigSnapshot) (*envoy_listener_v3.Listener, error) {
|
2019-06-18 00:52:01 +00:00
|
|
|
tlsInspector, err := makeTLSInspectorListenerFilter()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
sniCluster, err := makeSNIClusterFilter()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// The cluster name here doesn't matter as the sni_cluster
|
|
|
|
// filter will fill it in for us.
|
2020-11-16 23:37:19 +00:00
|
|
|
tcpProxy, err := makeTCPProxyFilter(name, "", "mesh_gateway_local.")
|
2019-06-18 00:52:01 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2021-02-26 22:23:15 +00:00
|
|
|
sniClusterChain := &envoy_listener_v3.FilterChain{
|
|
|
|
Filters: []*envoy_listener_v3.Filter{
|
2019-06-18 00:52:01 +00:00
|
|
|
sniCluster,
|
|
|
|
tcpProxy,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2021-03-26 20:00:44 +00:00
|
|
|
l := makePortListener(name, addr, port, envoy_core_v3.TrafficDirection_UNSPECIFIED)
|
2021-02-26 22:23:15 +00:00
|
|
|
l.ListenerFilters = []*envoy_listener_v3.ListenerFilter{tlsInspector}
|
2019-06-18 00:52:01 +00:00
|
|
|
|
2021-10-27 18:36:44 +00:00
|
|
|
// We need 1 Filter Chain per remote cluster
|
2021-10-26 21:58:23 +00:00
|
|
|
keys := cfgSnap.MeshGateway.GatewayKeys()
|
2021-10-23 20:17:29 +00:00
|
|
|
for _, key := range keys {
|
2021-10-24 15:51:55 +00:00
|
|
|
if key.Matches(cfgSnap.Datacenter, cfgSnap.ProxyID.PartitionOrEmpty()) {
|
2020-03-09 20:59:02 +00:00
|
|
|
continue // skip local
|
|
|
|
}
|
2021-10-24 15:51:55 +00:00
|
|
|
|
2021-10-24 15:16:28 +00:00
|
|
|
clusterName := connect.GatewaySNI(key.Datacenter, key.Partition, cfgSnap.Roots.TrustDomain)
|
2021-10-23 20:17:29 +00:00
|
|
|
filterName := fmt.Sprintf("%s.%s", name, key.String())
|
2020-11-16 23:37:19 +00:00
|
|
|
dcTCPProxy, err := makeTCPProxyFilter(filterName, clusterName, "mesh_gateway_remote.")
|
2019-06-18 00:52:01 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2021-02-26 22:23:15 +00:00
|
|
|
l.FilterChains = append(l.FilterChains, &envoy_listener_v3.FilterChain{
|
|
|
|
FilterChainMatch: &envoy_listener_v3.FilterChainMatch{
|
2019-06-18 00:52:01 +00:00
|
|
|
ServerNames: []string{fmt.Sprintf("*.%s", clusterName)},
|
|
|
|
},
|
2021-02-26 22:23:15 +00:00
|
|
|
Filters: []*envoy_listener_v3.Filter{
|
2019-06-18 00:52:01 +00:00
|
|
|
dcTCPProxy,
|
|
|
|
},
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2021-10-26 22:10:30 +00:00
|
|
|
if cfgSnap.ProxyID.InDefaultPartition() &&
|
2021-10-26 21:58:23 +00:00
|
|
|
cfgSnap.ServiceMeta[structs.MetaWANFederationKey] == "1" &&
|
|
|
|
cfgSnap.ServerSNIFn != nil {
|
|
|
|
|
2021-10-23 20:17:29 +00:00
|
|
|
for _, key := range keys {
|
|
|
|
if key.Datacenter == cfgSnap.Datacenter {
|
2020-03-09 20:59:02 +00:00
|
|
|
continue // skip local
|
|
|
|
}
|
2021-10-23 20:17:29 +00:00
|
|
|
clusterName := cfgSnap.ServerSNIFn(key.Datacenter, "")
|
|
|
|
filterName := fmt.Sprintf("%s.%s", name, key.String())
|
2020-11-16 23:37:19 +00:00
|
|
|
dcTCPProxy, err := makeTCPProxyFilter(filterName, clusterName, "mesh_gateway_remote.")
|
2020-03-09 20:59:02 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2021-02-26 22:23:15 +00:00
|
|
|
l.FilterChains = append(l.FilterChains, &envoy_listener_v3.FilterChain{
|
|
|
|
FilterChainMatch: &envoy_listener_v3.FilterChainMatch{
|
2020-03-09 20:59:02 +00:00
|
|
|
ServerNames: []string{fmt.Sprintf("*.%s", clusterName)},
|
|
|
|
},
|
2021-02-26 22:23:15 +00:00
|
|
|
Filters: []*envoy_listener_v3.Filter{
|
2020-03-09 20:59:02 +00:00
|
|
|
dcTCPProxy,
|
|
|
|
},
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wildcard all flavors to each server.
|
|
|
|
for _, srv := range cfgSnap.MeshGateway.ConsulServers {
|
|
|
|
clusterName := cfgSnap.ServerSNIFn(cfgSnap.Datacenter, srv.Node.Node)
|
|
|
|
|
2020-11-16 23:37:19 +00:00
|
|
|
filterName := fmt.Sprintf("%s.%s", name, cfgSnap.Datacenter)
|
|
|
|
dcTCPProxy, err := makeTCPProxyFilter(filterName, clusterName, "mesh_gateway_local_server.")
|
2020-03-09 20:59:02 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2021-02-26 22:23:15 +00:00
|
|
|
l.FilterChains = append(l.FilterChains, &envoy_listener_v3.FilterChain{
|
|
|
|
FilterChainMatch: &envoy_listener_v3.FilterChainMatch{
|
2020-03-09 20:59:02 +00:00
|
|
|
ServerNames: []string{fmt.Sprintf("%s", clusterName)},
|
|
|
|
},
|
2021-02-26 22:23:15 +00:00
|
|
|
Filters: []*envoy_listener_v3.Filter{
|
2020-03-09 20:59:02 +00:00
|
|
|
dcTCPProxy,
|
|
|
|
},
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-18 00:52:01 +00:00
|
|
|
// This needs to get tacked on at the end as it has no
|
|
|
|
// matching and will act as a catch all
|
|
|
|
l.FilterChains = append(l.FilterChains, sniClusterChain)
|
|
|
|
|
|
|
|
return l, nil
|
|
|
|
}
|
|
|
|
|
Update filter chain creation for sidecar/ingress listeners (#11245)
The duo of `makeUpstreamFilterChainForDiscoveryChain` and `makeListenerForDiscoveryChain` were really hard to reason about, and led to concealing a bug in their branching logic. There were several issues here:
- They tried to accomplish too much: determining filter name, cluster name, and whether RDS should be used.
- They embedded logic to handle significantly different kinds of upstream listeners (passthrough, prepared query, typical services, and catch-all)
- They needed to coalesce different data sources (Upstream and CompiledDiscoveryChain)
Rather than handling all of those tasks inside of these functions, this PR pulls out the RDS/clusterName/filterName logic.
This refactor also fixed a bug with the handling of [UpstreamDefaults](https://www.consul.io/docs/connect/config-entries/service-defaults#defaults). These defaults get stored as UpstreamConfig in the proxy snapshot with a DestinationName of "*", since they apply to all upstreams. However, this wildcard destination name must not be used when creating the name of the associated upstream cluster. The coalescing logic in the original functions here was in some situations creating clusters with a `*.` prefix, which is not a valid destination.
2021-11-09 21:43:51 +00:00
|
|
|
type filterChainOpts struct {
|
|
|
|
routeName string
|
|
|
|
clusterName string
|
|
|
|
filterName string
|
|
|
|
protocol string
|
|
|
|
useRDS bool
|
|
|
|
tlsContext *envoy_tls_v3.DownstreamTlsContext
|
|
|
|
}
|
2021-03-17 19:40:49 +00:00
|
|
|
|
Update filter chain creation for sidecar/ingress listeners (#11245)
The duo of `makeUpstreamFilterChainForDiscoveryChain` and `makeListenerForDiscoveryChain` were really hard to reason about, and led to concealing a bug in their branching logic. There were several issues here:
- They tried to accomplish too much: determining filter name, cluster name, and whether RDS should be used.
- They embedded logic to handle significantly different kinds of upstream listeners (passthrough, prepared query, typical services, and catch-all)
- They needed to coalesce different data sources (Upstream and CompiledDiscoveryChain)
Rather than handling all of those tasks inside of these functions, this PR pulls out the RDS/clusterName/filterName logic.
This refactor also fixed a bug with the handling of [UpstreamDefaults](https://www.consul.io/docs/connect/config-entries/service-defaults#defaults). These defaults get stored as UpstreamConfig in the proxy snapshot with a DestinationName of "*", since they apply to all upstreams. However, this wildcard destination name must not be used when creating the name of the associated upstream cluster. The coalescing logic in the original functions here was in some situations creating clusters with a `*.` prefix, which is not a valid destination.
2021-11-09 21:43:51 +00:00
|
|
|
func (s *ResourceGenerator) makeUpstreamFilterChain(opts filterChainOpts) (*envoy_listener_v3.FilterChain, error) {
|
|
|
|
filter, err := makeListenerFilter(listenerFilterOpts{
|
|
|
|
useRDS: opts.useRDS,
|
|
|
|
protocol: opts.protocol,
|
|
|
|
filterName: opts.filterName,
|
|
|
|
routeName: opts.routeName,
|
|
|
|
cluster: opts.clusterName,
|
|
|
|
statPrefix: "upstream.",
|
|
|
|
})
|
2021-03-17 19:40:49 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
Update filter chain creation for sidecar/ingress listeners (#11245)
The duo of `makeUpstreamFilterChainForDiscoveryChain` and `makeListenerForDiscoveryChain` were really hard to reason about, and led to concealing a bug in their branching logic. There were several issues here:
- They tried to accomplish too much: determining filter name, cluster name, and whether RDS should be used.
- They embedded logic to handle significantly different kinds of upstream listeners (passthrough, prepared query, typical services, and catch-all)
- They needed to coalesce different data sources (Upstream and CompiledDiscoveryChain)
Rather than handling all of those tasks inside of these functions, this PR pulls out the RDS/clusterName/filterName logic.
This refactor also fixed a bug with the handling of [UpstreamDefaults](https://www.consul.io/docs/connect/config-entries/service-defaults#defaults). These defaults get stored as UpstreamConfig in the proxy snapshot with a DestinationName of "*", since they apply to all upstreams. However, this wildcard destination name must not be used when creating the name of the associated upstream cluster. The coalescing logic in the original functions here was in some situations creating clusters with a `*.` prefix, which is not a valid destination.
2021-11-09 21:43:51 +00:00
|
|
|
|
|
|
|
transportSocket, err := makeDownstreamTLSTransportSocket(opts.tlsContext)
|
2021-03-17 19:40:49 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return &envoy_listener_v3.FilterChain{
|
|
|
|
Filters: []*envoy_listener_v3.Filter{
|
|
|
|
filter,
|
|
|
|
},
|
|
|
|
TransportSocket: transportSocket,
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
Update filter chain creation for sidecar/ingress listeners (#11245)
The duo of `makeUpstreamFilterChainForDiscoveryChain` and `makeListenerForDiscoveryChain` were really hard to reason about, and led to concealing a bug in their branching logic. There were several issues here:
- They tried to accomplish too much: determining filter name, cluster name, and whether RDS should be used.
- They embedded logic to handle significantly different kinds of upstream listeners (passthrough, prepared query, typical services, and catch-all)
- They needed to coalesce different data sources (Upstream and CompiledDiscoveryChain)
Rather than handling all of those tasks inside of these functions, this PR pulls out the RDS/clusterName/filterName logic.
This refactor also fixed a bug with the handling of [UpstreamDefaults](https://www.consul.io/docs/connect/config-entries/service-defaults#defaults). These defaults get stored as UpstreamConfig in the proxy snapshot with a DestinationName of "*", since they apply to all upstreams. However, this wildcard destination name must not be used when creating the name of the associated upstream cluster. The coalescing logic in the original functions here was in some situations creating clusters with a `*.` prefix, which is not a valid destination.
2021-11-09 21:43:51 +00:00
|
|
|
// simpleChainTarget returns the discovery target for a chain with a single node.
|
|
|
|
// A chain can have a single target if it is for a TCP service or an HTTP service without
|
|
|
|
// multiple splits/routes/failovers.
|
|
|
|
func simpleChainTarget(chain *structs.CompiledDiscoveryChain) (*structs.DiscoveryTarget, error) {
|
|
|
|
startNode := chain.Nodes[chain.StartNode]
|
|
|
|
if startNode == nil {
|
|
|
|
return nil, fmt.Errorf("missing first node in compiled discovery chain for: %s", chain.ServiceName)
|
2019-07-08 11:48:48 +00:00
|
|
|
}
|
Update filter chain creation for sidecar/ingress listeners (#11245)
The duo of `makeUpstreamFilterChainForDiscoveryChain` and `makeListenerForDiscoveryChain` were really hard to reason about, and led to concealing a bug in their branching logic. There were several issues here:
- They tried to accomplish too much: determining filter name, cluster name, and whether RDS should be used.
- They embedded logic to handle significantly different kinds of upstream listeners (passthrough, prepared query, typical services, and catch-all)
- They needed to coalesce different data sources (Upstream and CompiledDiscoveryChain)
Rather than handling all of those tasks inside of these functions, this PR pulls out the RDS/clusterName/filterName logic.
This refactor also fixed a bug with the handling of [UpstreamDefaults](https://www.consul.io/docs/connect/config-entries/service-defaults#defaults). These defaults get stored as UpstreamConfig in the proxy snapshot with a DestinationName of "*", since they apply to all upstreams. However, this wildcard destination name must not be used when creating the name of the associated upstream cluster. The coalescing logic in the original functions here was in some situations creating clusters with a `*.` prefix, which is not a valid destination.
2021-11-09 21:43:51 +00:00
|
|
|
if startNode.Type != structs.DiscoveryGraphNodeTypeResolver {
|
|
|
|
return nil, fmt.Errorf("expected discovery chain with single node, found unexpected start node: %s", startNode.Type)
|
2019-07-08 11:48:48 +00:00
|
|
|
}
|
Update filter chain creation for sidecar/ingress listeners (#11245)
The duo of `makeUpstreamFilterChainForDiscoveryChain` and `makeListenerForDiscoveryChain` were really hard to reason about, and led to concealing a bug in their branching logic. There were several issues here:
- They tried to accomplish too much: determining filter name, cluster name, and whether RDS should be used.
- They embedded logic to handle significantly different kinds of upstream listeners (passthrough, prepared query, typical services, and catch-all)
- They needed to coalesce different data sources (Upstream and CompiledDiscoveryChain)
Rather than handling all of those tasks inside of these functions, this PR pulls out the RDS/clusterName/filterName logic.
This refactor also fixed a bug with the handling of [UpstreamDefaults](https://www.consul.io/docs/connect/config-entries/service-defaults#defaults). These defaults get stored as UpstreamConfig in the proxy snapshot with a DestinationName of "*", since they apply to all upstreams. However, this wildcard destination name must not be used when creating the name of the associated upstream cluster. The coalescing logic in the original functions here was in some situations creating clusters with a `*.` prefix, which is not a valid destination.
2021-11-09 21:43:51 +00:00
|
|
|
targetID := startNode.Resolver.Target
|
|
|
|
return chain.Targets[targetID], nil
|
2019-07-02 03:10:51 +00:00
|
|
|
}
|
|
|
|
|
2022-01-20 16:12:04 +00:00
|
|
|
func (s *ResourceGenerator) getAndModifyUpstreamConfigForListener(
|
|
|
|
uid proxycfg.UpstreamID,
|
|
|
|
u *structs.Upstream,
|
|
|
|
chain *structs.CompiledDiscoveryChain,
|
|
|
|
) structs.UpstreamConfig {
|
2020-05-21 14:08:12 +00:00
|
|
|
var (
|
2021-03-09 05:10:27 +00:00
|
|
|
cfg structs.UpstreamConfig
|
2020-05-21 14:08:12 +00:00
|
|
|
err error
|
|
|
|
)
|
|
|
|
|
2021-03-17 19:40:49 +00:00
|
|
|
configMap := make(map[string]interface{})
|
|
|
|
if u != nil {
|
|
|
|
configMap = u.Config
|
|
|
|
}
|
2020-05-21 14:08:12 +00:00
|
|
|
if chain == nil || chain.IsDefault() {
|
2021-03-17 19:40:49 +00:00
|
|
|
cfg, err = structs.ParseUpstreamConfig(configMap)
|
2020-05-21 14:08:12 +00:00
|
|
|
if err != nil {
|
|
|
|
// Don't hard fail on a config typo, just warn. The parse func returns
|
|
|
|
// default config if there is an error so it's safe to continue.
|
2022-01-20 16:12:04 +00:00
|
|
|
s.Logger.Warn("failed to parse", "upstream", uid, "error", err)
|
2020-05-21 14:08:12 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Use NoDefaults here so that we can set the protocol to the chain
|
|
|
|
// protocol if necessary
|
2021-03-17 19:40:49 +00:00
|
|
|
cfg, err = structs.ParseUpstreamConfigNoDefaults(configMap)
|
2020-05-21 14:08:12 +00:00
|
|
|
if err != nil {
|
|
|
|
// Don't hard fail on a config typo, just warn. The parse func returns
|
|
|
|
// default config if there is an error so it's safe to continue.
|
2022-01-20 16:12:04 +00:00
|
|
|
s.Logger.Warn("failed to parse", "upstream", uid, "error", err)
|
2020-05-21 14:08:12 +00:00
|
|
|
}
|
|
|
|
|
2021-03-15 20:12:57 +00:00
|
|
|
if cfg.EnvoyListenerJSON != "" {
|
2021-04-29 18:54:05 +00:00
|
|
|
s.Logger.Warn("ignoring escape hatch setting because already configured for",
|
2022-01-20 16:12:04 +00:00
|
|
|
"discovery chain", chain.ServiceName, "upstream", uid, "config", "envoy_listener_json")
|
2020-05-21 14:08:12 +00:00
|
|
|
|
|
|
|
// Remove from config struct so we don't use it later on
|
2021-03-15 20:12:57 +00:00
|
|
|
cfg.EnvoyListenerJSON = ""
|
2020-05-21 14:08:12 +00:00
|
|
|
}
|
|
|
|
|
2021-03-17 19:40:49 +00:00
|
|
|
protocol := cfg.Protocol
|
|
|
|
if protocol == "" {
|
|
|
|
protocol = chain.Protocol
|
2020-05-21 14:08:12 +00:00
|
|
|
}
|
2021-03-17 19:40:49 +00:00
|
|
|
if protocol == "" {
|
|
|
|
protocol = "tcp"
|
2020-05-21 14:08:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// set back on the config so that we can use it from return value
|
2021-03-17 19:40:49 +00:00
|
|
|
cfg.Protocol = protocol
|
2020-05-21 14:08:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return cfg
|
|
|
|
}
|
|
|
|
|
2020-08-27 17:20:58 +00:00
|
|
|
type listenerFilterOpts struct {
|
2021-01-25 19:50:00 +00:00
|
|
|
useRDS bool
|
|
|
|
protocol string
|
|
|
|
filterName string
|
|
|
|
routeName string
|
|
|
|
cluster string
|
|
|
|
statPrefix string
|
|
|
|
routePath string
|
|
|
|
requestTimeoutMs *int
|
2021-03-17 19:40:49 +00:00
|
|
|
ingressGateway bool
|
2021-02-26 22:23:15 +00:00
|
|
|
httpAuthzFilter *envoy_http_v3.HttpFilter
|
2020-08-27 17:20:58 +00:00
|
|
|
}
|
2019-09-26 02:55:52 +00:00
|
|
|
|
2021-02-26 22:23:15 +00:00
|
|
|
func makeListenerFilter(opts listenerFilterOpts) (*envoy_listener_v3.Filter, error) {
|
2020-08-27 17:20:58 +00:00
|
|
|
switch opts.protocol {
|
2020-09-04 18:45:05 +00:00
|
|
|
case "grpc", "http2", "http":
|
|
|
|
return makeHTTPFilter(opts)
|
2019-04-29 16:27:57 +00:00
|
|
|
case "tcp":
|
|
|
|
fallthrough
|
|
|
|
default:
|
2020-08-27 17:20:58 +00:00
|
|
|
if opts.useRDS {
|
2020-06-23 20:19:56 +00:00
|
|
|
return nil, fmt.Errorf("RDS is not compatible with the tcp proxy filter")
|
2020-08-27 17:20:58 +00:00
|
|
|
} else if opts.cluster == "" {
|
2020-06-23 20:19:56 +00:00
|
|
|
return nil, fmt.Errorf("cluster name is required for a tcp proxy filter")
|
2019-10-17 21:44:59 +00:00
|
|
|
}
|
2020-08-27 17:20:58 +00:00
|
|
|
return makeTCPProxyFilter(opts.filterName, opts.cluster, opts.statPrefix)
|
2019-04-29 16:27:57 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-02-26 22:23:15 +00:00
|
|
|
func makeTLSInspectorListenerFilter() (*envoy_listener_v3.ListenerFilter, error) {
|
|
|
|
return &envoy_listener_v3.ListenerFilter{Name: "envoy.filters.listener.tls_inspector"}, nil
|
2019-06-18 00:52:01 +00:00
|
|
|
}
|
|
|
|
|
2021-08-24 13:10:16 +00:00
|
|
|
func makeSNIFilterChainMatch(sniMatches ...string) *envoy_listener_v3.FilterChainMatch {
|
2021-02-26 22:23:15 +00:00
|
|
|
return &envoy_listener_v3.FilterChainMatch{
|
2021-08-24 13:10:16 +00:00
|
|
|
ServerNames: sniMatches,
|
2020-04-13 16:33:01 +00:00
|
|
|
}
|
2019-06-18 00:52:01 +00:00
|
|
|
}
|
|
|
|
|
2021-02-26 22:23:15 +00:00
|
|
|
func makeSNIClusterFilter() (*envoy_listener_v3.Filter, error) {
|
2019-06-18 00:52:01 +00:00
|
|
|
// This filter has no config which is why we are not calling make
|
2021-02-26 22:23:15 +00:00
|
|
|
return &envoy_listener_v3.Filter{Name: "envoy.filters.network.sni_cluster"}, nil
|
2019-06-18 00:52:01 +00:00
|
|
|
}
|
|
|
|
|
2021-02-26 22:23:15 +00:00
|
|
|
func makeTCPProxyFilter(filterName, cluster, statPrefix string) (*envoy_listener_v3.Filter, error) {
|
|
|
|
cfg := &envoy_tcp_proxy_v3.TcpProxy{
|
2020-11-16 23:37:19 +00:00
|
|
|
StatPrefix: makeStatPrefix(statPrefix, filterName),
|
2021-02-26 22:23:15 +00:00
|
|
|
ClusterSpecifier: &envoy_tcp_proxy_v3.TcpProxy_Cluster{Cluster: cluster},
|
2018-10-03 18:18:55 +00:00
|
|
|
}
|
2021-02-22 21:00:15 +00:00
|
|
|
return makeFilter("envoy.filters.network.tcp_proxy", cfg)
|
2018-10-03 18:18:55 +00:00
|
|
|
}
|
|
|
|
|
2020-11-16 23:37:19 +00:00
|
|
|
func makeStatPrefix(prefix, filterName string) string {
|
2019-04-29 16:27:57 +00:00
|
|
|
// Replace colons here because Envoy does that in the metrics for the actual
|
|
|
|
// clusters but doesn't in the stat prefix here while dashboards assume they
|
|
|
|
// will match.
|
2020-11-16 23:37:19 +00:00
|
|
|
return fmt.Sprintf("%s%s", prefix, strings.Replace(filterName, ":", "_", -1))
|
2019-04-29 16:27:57 +00:00
|
|
|
}
|
|
|
|
|
2021-02-26 22:23:15 +00:00
|
|
|
func makeHTTPFilter(opts listenerFilterOpts) (*envoy_listener_v3.Filter, error) {
|
|
|
|
cfg := &envoy_http_v3.HttpConnectionManager{
|
2020-11-16 23:37:19 +00:00
|
|
|
StatPrefix: makeStatPrefix(opts.statPrefix, opts.filterName),
|
2021-02-26 22:23:15 +00:00
|
|
|
CodecType: envoy_http_v3.HttpConnectionManager_AUTO,
|
|
|
|
HttpFilters: []*envoy_http_v3.HttpFilter{
|
2020-06-16 17:19:31 +00:00
|
|
|
{
|
2021-02-22 21:00:15 +00:00
|
|
|
Name: "envoy.filters.http.router",
|
2019-07-02 03:10:51 +00:00
|
|
|
},
|
|
|
|
},
|
2021-02-26 22:23:15 +00:00
|
|
|
Tracing: &envoy_http_v3.HttpConnectionManager_Tracing{
|
2019-07-02 03:10:51 +00:00
|
|
|
// Don't trace any requests by default unless the client application
|
|
|
|
// explicitly propagates trace headers that indicate this should be
|
|
|
|
// sampled.
|
2021-02-26 22:23:15 +00:00
|
|
|
RandomSampling: &envoy_type_v3.Percent{Value: 0.0},
|
2019-07-02 03:10:51 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2020-09-04 18:45:05 +00:00
|
|
|
if opts.useRDS {
|
|
|
|
if opts.cluster != "" {
|
2020-06-23 20:19:56 +00:00
|
|
|
return nil, fmt.Errorf("cannot specify cluster name when using RDS")
|
2019-07-02 03:10:51 +00:00
|
|
|
}
|
2021-02-26 22:23:15 +00:00
|
|
|
cfg.RouteSpecifier = &envoy_http_v3.HttpConnectionManager_Rds{
|
|
|
|
Rds: &envoy_http_v3.Rds{
|
2020-09-04 18:45:05 +00:00
|
|
|
RouteConfigName: opts.routeName,
|
2021-02-26 22:23:15 +00:00
|
|
|
ConfigSource: &envoy_core_v3.ConfigSource{
|
|
|
|
ResourceApiVersion: envoy_core_v3.ApiVersion_V3,
|
|
|
|
ConfigSourceSpecifier: &envoy_core_v3.ConfigSource_Ads{
|
|
|
|
Ads: &envoy_core_v3.AggregatedConfigSource{},
|
2019-07-02 03:10:51 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
} else {
|
2020-09-04 18:45:05 +00:00
|
|
|
if opts.cluster == "" {
|
2020-06-23 20:19:56 +00:00
|
|
|
return nil, fmt.Errorf("must specify cluster name when not using RDS")
|
2019-07-02 03:10:51 +00:00
|
|
|
}
|
2021-01-25 19:50:00 +00:00
|
|
|
|
2021-02-26 22:23:15 +00:00
|
|
|
route := &envoy_route_v3.Route{
|
|
|
|
Match: &envoy_route_v3.RouteMatch{
|
|
|
|
PathSpecifier: &envoy_route_v3.RouteMatch_Prefix{
|
2019-09-26 02:55:52 +00:00
|
|
|
Prefix: "/",
|
|
|
|
},
|
|
|
|
// TODO(banks) Envoy supports matching only valid GRPC
|
|
|
|
// requests which might be nice to add here for gRPC services
|
|
|
|
// but it's not supported in our current envoy SDK version
|
|
|
|
// although docs say it was supported by 1.8.0. Going to defer
|
|
|
|
// that until we've updated the deps.
|
|
|
|
},
|
2021-02-26 22:23:15 +00:00
|
|
|
Action: &envoy_route_v3.Route_Route{
|
|
|
|
Route: &envoy_route_v3.RouteAction{
|
|
|
|
ClusterSpecifier: &envoy_route_v3.RouteAction_Cluster{
|
2020-09-04 18:45:05 +00:00
|
|
|
Cluster: opts.cluster,
|
2019-09-26 02:55:52 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
2021-01-25 19:50:00 +00:00
|
|
|
|
|
|
|
if opts.requestTimeoutMs != nil {
|
|
|
|
r := route.GetRoute()
|
2021-02-22 21:00:15 +00:00
|
|
|
r.Timeout = ptypes.DurationProto(time.Duration(*opts.requestTimeoutMs) * time.Millisecond)
|
2021-01-25 19:50:00 +00:00
|
|
|
}
|
|
|
|
|
2019-09-26 02:55:52 +00:00
|
|
|
// If a path is provided, do not match on a catch-all prefix
|
2020-09-04 18:45:05 +00:00
|
|
|
if opts.routePath != "" {
|
2021-02-26 22:23:15 +00:00
|
|
|
route.Match.PathSpecifier = &envoy_route_v3.RouteMatch_Path{Path: opts.routePath}
|
2019-09-26 02:55:52 +00:00
|
|
|
}
|
|
|
|
|
2021-02-26 22:23:15 +00:00
|
|
|
cfg.RouteSpecifier = &envoy_http_v3.HttpConnectionManager_RouteConfig{
|
|
|
|
RouteConfig: &envoy_route_v3.RouteConfiguration{
|
2020-09-04 18:45:05 +00:00
|
|
|
Name: opts.routeName,
|
2021-02-26 22:23:15 +00:00
|
|
|
VirtualHosts: []*envoy_route_v3.VirtualHost{
|
2019-09-26 02:55:52 +00:00
|
|
|
{
|
2020-09-04 18:45:05 +00:00
|
|
|
Name: opts.filterName,
|
2019-04-29 16:27:57 +00:00
|
|
|
Domains: []string{"*"},
|
2021-02-26 22:23:15 +00:00
|
|
|
Routes: []*envoy_route_v3.Route{
|
2019-09-26 02:55:52 +00:00
|
|
|
route,
|
2019-04-29 16:27:57 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2019-07-02 03:10:51 +00:00
|
|
|
}
|
2019-04-29 16:27:57 +00:00
|
|
|
}
|
|
|
|
|
2020-09-04 18:45:05 +00:00
|
|
|
if opts.protocol == "http2" || opts.protocol == "grpc" {
|
2021-02-26 22:23:15 +00:00
|
|
|
cfg.Http2ProtocolOptions = &envoy_core_v3.Http2ProtocolOptions{}
|
2019-04-29 16:27:57 +00:00
|
|
|
}
|
|
|
|
|
2020-08-27 17:20:58 +00:00
|
|
|
// Like injectConnectFilters for L4, here we ensure that the first filter
|
|
|
|
// (other than the "envoy.grpc_http1_bridge" filter) in the http filter
|
|
|
|
// chain of a public listener is the authz filter to prevent unauthorized
|
|
|
|
// access and that every filter chain uses our TLS certs.
|
2020-09-04 18:45:05 +00:00
|
|
|
if opts.httpAuthzFilter != nil {
|
2021-02-26 22:23:15 +00:00
|
|
|
cfg.HttpFilters = append([]*envoy_http_v3.HttpFilter{opts.httpAuthzFilter}, cfg.HttpFilters...)
|
2020-08-27 17:20:58 +00:00
|
|
|
}
|
|
|
|
|
2020-09-04 18:45:05 +00:00
|
|
|
if opts.protocol == "grpc" {
|
2020-08-27 17:20:58 +00:00
|
|
|
// Add grpc bridge before router and authz
|
2021-02-26 22:23:15 +00:00
|
|
|
cfg.HttpFilters = append([]*envoy_http_v3.HttpFilter{{
|
|
|
|
Name: "envoy.filters.http.grpc_http1_bridge",
|
2019-04-29 16:27:57 +00:00
|
|
|
}}, cfg.HttpFilters...)
|
2021-04-29 20:22:03 +00:00
|
|
|
|
|
|
|
// In envoy 1.14.x the default value "stats_for_all_methods=true" was
|
|
|
|
// deprecated, and was changed to "false" in 1.18.x. Avoid using the
|
|
|
|
// default. TODO: we may want to expose this to users somehow easily.
|
|
|
|
grpcStatsFilter, err := makeEnvoyHTTPFilter(
|
|
|
|
"envoy.filters.http.grpc_stats",
|
|
|
|
&envoy_grpc_stats_v3.FilterConfig{
|
|
|
|
PerMethodStatSpecifier: &envoy_grpc_stats_v3.FilterConfig_StatsForAllMethods{
|
|
|
|
StatsForAllMethods: makeBoolValue(true),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
cfg.HttpFilters = append([]*envoy_http_v3.HttpFilter{
|
|
|
|
grpcStatsFilter,
|
|
|
|
}, cfg.HttpFilters...)
|
2019-04-29 16:27:57 +00:00
|
|
|
}
|
|
|
|
|
2021-02-22 21:00:15 +00:00
|
|
|
return makeFilter("envoy.filters.network.http_connection_manager", cfg)
|
2019-04-29 16:27:57 +00:00
|
|
|
}
|
|
|
|
|
2021-02-26 22:23:15 +00:00
|
|
|
func makeFilter(name string, cfg proto.Message) (*envoy_listener_v3.Filter, error) {
|
2021-02-22 21:00:15 +00:00
|
|
|
any, err := ptypes.MarshalAny(cfg)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2020-08-27 17:20:58 +00:00
|
|
|
}
|
|
|
|
|
2021-02-26 22:23:15 +00:00
|
|
|
return &envoy_listener_v3.Filter{
|
|
|
|
Name: name,
|
|
|
|
ConfigType: &envoy_listener_v3.Filter_TypedConfig{TypedConfig: any},
|
|
|
|
}, nil
|
2018-10-03 18:18:55 +00:00
|
|
|
}
|
|
|
|
|
2021-02-26 22:23:15 +00:00
|
|
|
func makeEnvoyHTTPFilter(name string, cfg proto.Message) (*envoy_http_v3.HttpFilter, error) {
|
2021-02-22 21:00:15 +00:00
|
|
|
any, err := ptypes.MarshalAny(cfg)
|
2018-10-03 18:18:55 +00:00
|
|
|
if err != nil {
|
2020-06-23 20:19:56 +00:00
|
|
|
return nil, err
|
2018-10-03 18:18:55 +00:00
|
|
|
}
|
|
|
|
|
2021-02-26 22:23:15 +00:00
|
|
|
return &envoy_http_v3.HttpFilter{
|
2019-06-07 12:10:43 +00:00
|
|
|
Name: name,
|
2021-02-26 22:23:15 +00:00
|
|
|
ConfigType: &envoy_http_v3.HttpFilter_TypedConfig{TypedConfig: any},
|
2018-10-03 18:18:55 +00:00
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2022-01-11 16:46:42 +00:00
|
|
|
func makeCommonTLSContextFromLeafWithoutParams(cfgSnap *proxycfg.ConfigSnapshot, leaf *structs.IssuedCert) *envoy_tls_v3.CommonTlsContext {
|
|
|
|
return makeCommonTLSContextFromLeaf(cfgSnap, leaf, nil)
|
|
|
|
}
|
|
|
|
|
|
|
|
func makeCommonTLSContextFromLeaf(cfgSnap *proxycfg.ConfigSnapshot, leaf *structs.IssuedCert, tlsParams *envoy_tls_v3.TlsParameters) *envoy_tls_v3.CommonTlsContext {
|
2018-10-03 18:18:55 +00:00
|
|
|
// Concatenate all the root PEMs into one.
|
2019-03-22 19:37:14 +00:00
|
|
|
if cfgSnap.Roots == nil {
|
|
|
|
return nil
|
|
|
|
}
|
2020-08-28 20:27:40 +00:00
|
|
|
|
|
|
|
rootPEMS := ""
|
2018-10-03 18:18:55 +00:00
|
|
|
for _, root := range cfgSnap.Roots.Roots {
|
2021-07-01 00:48:29 +00:00
|
|
|
rootPEMS += ca.EnsureTrailingNewline(root.RootCert)
|
2018-10-03 18:18:55 +00:00
|
|
|
}
|
|
|
|
|
2022-01-11 16:46:42 +00:00
|
|
|
if tlsParams == nil {
|
|
|
|
tlsParams = &envoy_tls_v3.TlsParameters{}
|
|
|
|
}
|
|
|
|
|
2021-02-26 22:23:15 +00:00
|
|
|
return &envoy_tls_v3.CommonTlsContext{
|
2022-01-11 16:46:42 +00:00
|
|
|
TlsParams: tlsParams,
|
2021-02-26 22:23:15 +00:00
|
|
|
TlsCertificates: []*envoy_tls_v3.TlsCertificate{
|
2020-06-16 17:19:31 +00:00
|
|
|
{
|
2021-02-26 22:23:15 +00:00
|
|
|
CertificateChain: &envoy_core_v3.DataSource{
|
|
|
|
Specifier: &envoy_core_v3.DataSource_InlineString{
|
2021-07-01 00:48:29 +00:00
|
|
|
InlineString: ca.EnsureTrailingNewline(leaf.CertPEM),
|
2018-10-03 18:18:55 +00:00
|
|
|
},
|
|
|
|
},
|
2021-02-26 22:23:15 +00:00
|
|
|
PrivateKey: &envoy_core_v3.DataSource{
|
|
|
|
Specifier: &envoy_core_v3.DataSource_InlineString{
|
2021-07-01 00:48:29 +00:00
|
|
|
InlineString: ca.EnsureTrailingNewline(leaf.PrivateKeyPEM),
|
2018-10-03 18:18:55 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2021-02-26 22:23:15 +00:00
|
|
|
ValidationContextType: &envoy_tls_v3.CommonTlsContext_ValidationContext{
|
|
|
|
ValidationContext: &envoy_tls_v3.CertificateValidationContext{
|
2018-10-03 18:18:55 +00:00
|
|
|
// TODO(banks): later for L7 support we may need to configure ALPN here.
|
2021-02-26 22:23:15 +00:00
|
|
|
TrustedCa: &envoy_core_v3.DataSource{
|
|
|
|
Specifier: &envoy_core_v3.DataSource_InlineString{
|
2018-10-03 18:18:55 +00:00
|
|
|
InlineString: rootPEMS,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
2020-04-27 22:25:37 +00:00
|
|
|
|
2021-02-26 22:23:15 +00:00
|
|
|
func makeDownstreamTLSTransportSocket(tlsContext *envoy_tls_v3.DownstreamTlsContext) (*envoy_core_v3.TransportSocket, error) {
|
2021-02-22 21:00:15 +00:00
|
|
|
if tlsContext == nil {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
return makeTransportSocket("tls", tlsContext)
|
|
|
|
}
|
|
|
|
|
2021-02-26 22:23:15 +00:00
|
|
|
func makeUpstreamTLSTransportSocket(tlsContext *envoy_tls_v3.UpstreamTlsContext) (*envoy_core_v3.TransportSocket, error) {
|
2021-02-22 21:00:15 +00:00
|
|
|
if tlsContext == nil {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
return makeTransportSocket("tls", tlsContext)
|
|
|
|
}
|
|
|
|
|
2021-02-26 22:23:15 +00:00
|
|
|
func makeTransportSocket(name string, config proto.Message) (*envoy_core_v3.TransportSocket, error) {
|
2021-02-22 21:00:15 +00:00
|
|
|
any, err := ptypes.MarshalAny(config)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2021-02-26 22:23:15 +00:00
|
|
|
return &envoy_core_v3.TransportSocket{
|
2021-02-22 21:00:15 +00:00
|
|
|
Name: name,
|
2021-02-26 22:23:15 +00:00
|
|
|
ConfigType: &envoy_core_v3.TransportSocket_TypedConfig{
|
2021-02-22 21:00:15 +00:00
|
|
|
TypedConfig: any,
|
|
|
|
},
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2021-02-26 22:23:15 +00:00
|
|
|
func makeCommonTLSContextFromFiles(caFile, certFile, keyFile string) *envoy_tls_v3.CommonTlsContext {
|
|
|
|
ctx := envoy_tls_v3.CommonTlsContext{
|
|
|
|
TlsParams: &envoy_tls_v3.TlsParameters{},
|
2020-04-27 22:25:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Verify certificate of peer if caFile is specified
|
|
|
|
if caFile != "" {
|
2021-02-26 22:23:15 +00:00
|
|
|
ctx.ValidationContextType = &envoy_tls_v3.CommonTlsContext_ValidationContext{
|
|
|
|
ValidationContext: &envoy_tls_v3.CertificateValidationContext{
|
|
|
|
TrustedCa: &envoy_core_v3.DataSource{
|
|
|
|
Specifier: &envoy_core_v3.DataSource_Filename{
|
2020-04-27 22:25:37 +00:00
|
|
|
Filename: caFile,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Present certificate for mTLS if cert and key files are specified
|
|
|
|
if certFile != "" && keyFile != "" {
|
2021-02-26 22:23:15 +00:00
|
|
|
ctx.TlsCertificates = []*envoy_tls_v3.TlsCertificate{
|
2020-04-27 22:25:37 +00:00
|
|
|
{
|
2021-02-26 22:23:15 +00:00
|
|
|
CertificateChain: &envoy_core_v3.DataSource{
|
|
|
|
Specifier: &envoy_core_v3.DataSource_Filename{
|
2020-04-27 22:25:37 +00:00
|
|
|
Filename: certFile,
|
|
|
|
},
|
|
|
|
},
|
2021-02-26 22:23:15 +00:00
|
|
|
PrivateKey: &envoy_core_v3.DataSource{
|
|
|
|
Specifier: &envoy_core_v3.DataSource_Filename{
|
2020-04-27 22:25:37 +00:00
|
|
|
Filename: keyFile,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return &ctx
|
|
|
|
}
|