2018-10-03 18:18:55 +00:00
|
|
|
package xds
|
|
|
|
|
|
|
|
import (
|
|
|
|
"errors"
|
2019-06-24 19:05:36 +00:00
|
|
|
"fmt"
|
2022-09-23 03:14:25 +00:00
|
|
|
"strconv"
|
2022-07-19 18:56:28 +00:00
|
|
|
|
2021-02-26 22:23:15 +00:00
|
|
|
envoy_cluster_v3 "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3"
|
|
|
|
envoy_core_v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
|
|
|
|
envoy_endpoint_v3 "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3"
|
2020-06-23 20:19:56 +00:00
|
|
|
"github.com/golang/protobuf/proto"
|
2022-09-27 13:49:28 +00:00
|
|
|
"github.com/hashicorp/go-bexpr"
|
2021-02-22 21:00:15 +00:00
|
|
|
|
2019-08-19 18:03:03 +00:00
|
|
|
"github.com/hashicorp/consul/agent/connect"
|
2018-10-03 18:18:55 +00:00
|
|
|
"github.com/hashicorp/consul/agent/proxycfg"
|
|
|
|
"github.com/hashicorp/consul/agent/structs"
|
2019-03-22 19:37:14 +00:00
|
|
|
"github.com/hashicorp/consul/api"
|
2018-10-03 18:18:55 +00:00
|
|
|
)
|
|
|
|
|
2020-02-19 16:57:55 +00:00
|
|
|
const (
|
|
|
|
UnnamedSubset = ""
|
|
|
|
)
|
|
|
|
|
2018-10-03 18:18:55 +00:00
|
|
|
// endpointsFromSnapshot returns the xDS API representation of the "endpoints"
|
2021-04-29 18:54:05 +00:00
|
|
|
func (s *ResourceGenerator) endpointsFromSnapshot(cfgSnap *proxycfg.ConfigSnapshot) ([]proto.Message, error) {
|
2018-10-03 18:18:55 +00:00
|
|
|
if cfgSnap == nil {
|
|
|
|
return nil, errors.New("nil config given")
|
|
|
|
}
|
2019-06-24 19:05:36 +00:00
|
|
|
|
|
|
|
switch cfgSnap.Kind {
|
|
|
|
case structs.ServiceKindConnectProxy:
|
2020-03-27 21:57:16 +00:00
|
|
|
return s.endpointsFromSnapshotConnectProxy(cfgSnap)
|
2020-04-13 16:33:01 +00:00
|
|
|
case structs.ServiceKindTerminatingGateway:
|
|
|
|
return s.endpointsFromSnapshotTerminatingGateway(cfgSnap)
|
2019-06-18 00:52:01 +00:00
|
|
|
case structs.ServiceKindMeshGateway:
|
2020-03-27 21:57:16 +00:00
|
|
|
return s.endpointsFromSnapshotMeshGateway(cfgSnap)
|
2020-04-16 21:00:48 +00:00
|
|
|
case structs.ServiceKindIngressGateway:
|
|
|
|
return s.endpointsFromSnapshotIngressGateway(cfgSnap)
|
2019-06-24 19:05:36 +00:00
|
|
|
default:
|
|
|
|
return nil, fmt.Errorf("Invalid service kind: %v", cfgSnap.Kind)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// endpointsFromSnapshotConnectProxy returns the xDS API representation of the "endpoints"
|
|
|
|
// (upstream instances) in the snapshot.
|
2021-04-29 18:54:05 +00:00
|
|
|
func (s *ResourceGenerator) endpointsFromSnapshotConnectProxy(cfgSnap *proxycfg.ConfigSnapshot) ([]proto.Message, error) {
|
2022-06-03 21:42:50 +00:00
|
|
|
// TODO: this estimate is wrong
|
2019-08-05 18:30:35 +00:00
|
|
|
resources := make([]proto.Message, 0,
|
2022-06-03 21:42:50 +00:00
|
|
|
len(cfgSnap.ConnectProxy.PreparedQueryEndpoints)+
|
2022-07-13 16:14:57 +00:00
|
|
|
cfgSnap.ConnectProxy.PeerUpstreamEndpoints.Len()+
|
2022-06-03 21:42:50 +00:00
|
|
|
len(cfgSnap.ConnectProxy.WatchedUpstreamEndpoints))
|
2019-07-02 03:10:51 +00:00
|
|
|
|
2022-08-30 15:46:34 +00:00
|
|
|
getUpstream := func(uid proxycfg.UpstreamID) (*structs.Upstream, bool) {
|
2022-06-28 19:52:25 +00:00
|
|
|
upstream := cfgSnap.ConnectProxy.UpstreamConfig[uid]
|
2021-12-13 22:30:49 +00:00
|
|
|
|
2022-06-28 19:52:25 +00:00
|
|
|
explicit := upstream.HasLocalPortOrSocket()
|
2022-07-13 20:12:01 +00:00
|
|
|
implicit := cfgSnap.ConnectProxy.IsImplicitUpstream(uid)
|
2022-08-30 15:46:34 +00:00
|
|
|
return upstream, !implicit && !explicit
|
|
|
|
}
|
|
|
|
|
|
|
|
// NOTE: Any time we skip a chain below we MUST also skip that discovery chain in clusters.go
|
|
|
|
// so that the sets of endpoints generated matches the sets of clusters.
|
|
|
|
for uid, chain := range cfgSnap.ConnectProxy.DiscoveryChain {
|
|
|
|
upstream, skip := getUpstream(uid)
|
|
|
|
if skip {
|
2021-12-13 22:30:49 +00:00
|
|
|
// Discovery chain is not associated with a known explicit or implicit upstream so it is skipped.
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2022-06-28 19:52:25 +00:00
|
|
|
var upstreamConfigMap map[string]interface{}
|
|
|
|
if upstream != nil {
|
|
|
|
upstreamConfigMap = upstream.Config
|
|
|
|
}
|
|
|
|
|
|
|
|
es, err := s.endpointsFromDiscoveryChain(
|
2022-01-20 16:12:04 +00:00
|
|
|
uid,
|
2021-03-17 19:40:49 +00:00
|
|
|
chain,
|
2022-08-30 15:46:34 +00:00
|
|
|
cfgSnap,
|
2021-10-29 00:47:42 +00:00
|
|
|
cfgSnap.Locality,
|
2022-06-28 19:52:25 +00:00
|
|
|
upstreamConfigMap,
|
2022-01-20 16:12:04 +00:00
|
|
|
cfgSnap.ConnectProxy.WatchedUpstreamEndpoints[uid],
|
|
|
|
cfgSnap.ConnectProxy.WatchedGatewayEndpoints[uid],
|
2022-06-28 19:52:25 +00:00
|
|
|
false,
|
2021-03-17 19:40:49 +00:00
|
|
|
)
|
2022-06-28 19:52:25 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2021-03-17 19:40:49 +00:00
|
|
|
resources = append(resources, es...)
|
|
|
|
}
|
|
|
|
|
2022-06-03 21:42:50 +00:00
|
|
|
// NOTE: Any time we skip an upstream below we MUST also skip that same
|
|
|
|
// upstream in clusters.go so that the sets of endpoints generated matches
|
|
|
|
// the sets of clusters.
|
|
|
|
for _, uid := range cfgSnap.ConnectProxy.PeeredUpstreamIDs() {
|
2022-08-30 15:46:34 +00:00
|
|
|
_, skip := getUpstream(uid)
|
|
|
|
if skip {
|
|
|
|
// Discovery chain is not associated with a known explicit or implicit upstream so it is skipped.
|
2022-06-03 21:42:50 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2022-07-25 17:49:00 +00:00
|
|
|
tbs, ok := cfgSnap.ConnectProxy.UpstreamPeerTrustBundles.Get(uid.Peer)
|
|
|
|
if !ok {
|
|
|
|
// this should never happen since we loop through upstreams with
|
|
|
|
// set trust bundles
|
|
|
|
return nil, fmt.Errorf("trust bundle not ready for peer %s", uid.Peer)
|
2022-06-03 21:42:50 +00:00
|
|
|
}
|
|
|
|
|
2022-07-25 17:49:00 +00:00
|
|
|
clusterName := generatePeeredClusterName(uid, tbs)
|
|
|
|
|
2022-08-30 15:46:34 +00:00
|
|
|
loadAssignment, err := s.makeUpstreamLoadAssignmentForPeerService(cfgSnap, clusterName, uid)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2022-06-10 21:11:40 +00:00
|
|
|
}
|
|
|
|
|
2022-08-30 15:46:34 +00:00
|
|
|
if loadAssignment != nil {
|
|
|
|
resources = append(resources, loadAssignment)
|
2022-06-03 21:42:50 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-17 21:17:43 +00:00
|
|
|
// Looping over explicit upstreams is only needed for prepared queries because they do not have discovery chains
|
2019-07-02 03:10:51 +00:00
|
|
|
for _, u := range cfgSnap.Proxy.Upstreams {
|
2021-03-17 21:17:43 +00:00
|
|
|
if u.DestinationType != structs.UpstreamDestTypePreparedQuery {
|
2021-03-17 19:40:49 +00:00
|
|
|
continue
|
|
|
|
}
|
2022-01-20 16:12:04 +00:00
|
|
|
uid := proxycfg.NewUpstreamID(&u)
|
2019-07-02 03:10:51 +00:00
|
|
|
|
2021-03-17 19:40:49 +00:00
|
|
|
dc := u.Datacenter
|
|
|
|
if dc == "" {
|
|
|
|
dc = cfgSnap.Datacenter
|
2019-07-02 03:10:51 +00:00
|
|
|
}
|
2021-03-17 19:40:49 +00:00
|
|
|
clusterName := connect.UpstreamSNI(&u, "", dc, cfgSnap.Roots.TrustDomain)
|
2019-07-02 03:10:51 +00:00
|
|
|
|
2022-01-20 16:12:04 +00:00
|
|
|
endpoints, ok := cfgSnap.ConnectProxy.PreparedQueryEndpoints[uid]
|
2021-03-17 19:40:49 +00:00
|
|
|
if ok {
|
|
|
|
la := makeLoadAssignment(
|
|
|
|
clusterName,
|
|
|
|
[]loadAssignmentEndpointGroup{
|
|
|
|
{Endpoints: endpoints},
|
|
|
|
},
|
2021-10-29 00:47:42 +00:00
|
|
|
cfgSnap.Locality,
|
2020-04-16 21:00:48 +00:00
|
|
|
)
|
2021-03-17 19:40:49 +00:00
|
|
|
resources = append(resources, la)
|
2019-07-02 03:10:51 +00:00
|
|
|
}
|
2019-06-18 00:52:01 +00:00
|
|
|
}
|
2019-07-02 03:10:51 +00:00
|
|
|
|
2022-07-14 18:45:51 +00:00
|
|
|
// Loop over potential destinations in the mesh, then grab the gateway nodes associated with each
|
|
|
|
cfgSnap.ConnectProxy.DestinationsUpstream.ForEachKey(func(uid proxycfg.UpstreamID) bool {
|
2022-07-18 21:10:06 +00:00
|
|
|
svcConfig, ok := cfgSnap.ConnectProxy.DestinationsUpstream.Get(uid)
|
|
|
|
if !ok || svcConfig.Destination == nil {
|
|
|
|
return true
|
|
|
|
}
|
2022-07-14 18:45:51 +00:00
|
|
|
|
2022-07-18 21:10:06 +00:00
|
|
|
for _, address := range svcConfig.Destination.Addresses {
|
|
|
|
name := clusterNameForDestination(cfgSnap, uid.Name, address, uid.NamespaceOrDefault(), uid.PartitionOrDefault())
|
|
|
|
|
|
|
|
endpoints, ok := cfgSnap.ConnectProxy.DestinationGateways.Get(uid)
|
|
|
|
if ok {
|
|
|
|
la := makeLoadAssignment(
|
|
|
|
name,
|
|
|
|
[]loadAssignmentEndpointGroup{
|
|
|
|
{Endpoints: endpoints},
|
|
|
|
},
|
|
|
|
proxycfg.GatewayKey{ /*empty so it never matches*/ },
|
|
|
|
)
|
|
|
|
resources = append(resources, la)
|
|
|
|
}
|
2022-07-14 18:45:51 +00:00
|
|
|
}
|
2022-07-18 21:10:06 +00:00
|
|
|
|
2022-07-14 18:45:51 +00:00
|
|
|
return true
|
|
|
|
})
|
|
|
|
|
2019-06-18 00:52:01 +00:00
|
|
|
return resources, nil
|
|
|
|
}
|
|
|
|
|
2021-04-29 18:54:05 +00:00
|
|
|
func (s *ResourceGenerator) filterSubsetEndpoints(subset *structs.ServiceResolverSubset, endpoints structs.CheckServiceNodes) (structs.CheckServiceNodes, error) {
|
2020-02-19 16:57:55 +00:00
|
|
|
// locally execute the subsets filter
|
|
|
|
if subset.Filter != "" {
|
|
|
|
filter, err := bexpr.CreateFilter(subset.Filter, nil, endpoints)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
raw, err := filter.Execute(endpoints)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return raw.(structs.CheckServiceNodes), nil
|
|
|
|
}
|
|
|
|
return endpoints, nil
|
|
|
|
}
|
|
|
|
|
2021-04-29 18:54:05 +00:00
|
|
|
func (s *ResourceGenerator) endpointsFromSnapshotTerminatingGateway(cfgSnap *proxycfg.ConfigSnapshot) ([]proto.Message, error) {
|
2020-04-14 14:59:23 +00:00
|
|
|
return s.endpointsFromServicesAndResolvers(cfgSnap, cfgSnap.TerminatingGateway.ServiceGroups, cfgSnap.TerminatingGateway.ServiceResolvers)
|
2020-04-13 16:33:01 +00:00
|
|
|
}
|
|
|
|
|
2021-04-29 18:54:05 +00:00
|
|
|
func (s *ResourceGenerator) endpointsFromSnapshotMeshGateway(cfgSnap *proxycfg.ConfigSnapshot) ([]proto.Message, error) {
|
2021-10-26 21:58:23 +00:00
|
|
|
keys := cfgSnap.MeshGateway.GatewayKeys()
|
2022-09-26 16:50:17 +00:00
|
|
|
|
|
|
|
// Allocation count (this is a lower bound - all subset specific clusters will be appended):
|
|
|
|
// 1 cluster per remote dc/partition
|
|
|
|
// 1 cluster per local service
|
|
|
|
// 1 cluster per unique peer server (control plane traffic)
|
|
|
|
resources := make([]proto.Message, 0, len(keys)+len(cfgSnap.MeshGateway.ServiceGroups)+len(cfgSnap.MeshGateway.PeerServers))
|
2019-06-18 00:52:01 +00:00
|
|
|
|
2021-10-23 20:17:29 +00:00
|
|
|
for _, key := range keys {
|
2021-10-29 00:41:58 +00:00
|
|
|
if key.Matches(cfgSnap.Datacenter, cfgSnap.ProxyID.PartitionOrDefault()) {
|
2021-10-26 22:25:35 +00:00
|
|
|
continue // skip local
|
|
|
|
}
|
2021-10-26 21:58:23 +00:00
|
|
|
// Also skip gateways with a hostname as their address. EDS cannot resolve hostnames,
|
|
|
|
// so we provide them through CDS instead.
|
2021-10-26 22:25:35 +00:00
|
|
|
if len(cfgSnap.MeshGateway.HostnameDatacenters[key.String()]) > 0 {
|
2020-06-03 21:28:45 +00:00
|
|
|
continue
|
2020-03-09 20:59:02 +00:00
|
|
|
}
|
2020-06-03 21:28:45 +00:00
|
|
|
|
2022-06-28 19:52:25 +00:00
|
|
|
endpoints := cfgSnap.GetMeshGatewayEndpoints(key)
|
2021-11-09 16:45:36 +00:00
|
|
|
if len(endpoints) == 0 {
|
|
|
|
s.Logger.Error("skipping mesh gateway endpoints because no definition found", "datacenter", key)
|
|
|
|
continue
|
2020-03-09 20:59:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
{ // standard connect
|
2021-10-24 15:16:28 +00:00
|
|
|
clusterName := connect.GatewaySNI(key.Datacenter, key.Partition, cfgSnap.Roots.TrustDomain)
|
2020-03-09 20:59:02 +00:00
|
|
|
|
|
|
|
la := makeLoadAssignment(
|
|
|
|
clusterName,
|
|
|
|
[]loadAssignmentEndpointGroup{
|
|
|
|
{Endpoints: endpoints},
|
|
|
|
},
|
2021-10-29 00:47:42 +00:00
|
|
|
cfgSnap.Locality,
|
2020-03-09 20:59:02 +00:00
|
|
|
)
|
|
|
|
resources = append(resources, la)
|
|
|
|
}
|
|
|
|
|
2021-10-26 22:10:30 +00:00
|
|
|
if cfgSnap.ProxyID.InDefaultPartition() &&
|
2021-10-26 21:58:23 +00:00
|
|
|
cfgSnap.ServiceMeta[structs.MetaWANFederationKey] == "1" &&
|
|
|
|
cfgSnap.ServerSNIFn != nil {
|
2020-03-09 20:59:02 +00:00
|
|
|
|
2021-10-26 21:58:23 +00:00
|
|
|
clusterName := cfgSnap.ServerSNIFn(key.Datacenter, "")
|
2020-03-09 20:59:02 +00:00
|
|
|
la := makeLoadAssignment(
|
|
|
|
clusterName,
|
|
|
|
[]loadAssignmentEndpointGroup{
|
|
|
|
{Endpoints: endpoints},
|
|
|
|
},
|
2021-10-29 00:47:42 +00:00
|
|
|
cfgSnap.Locality,
|
2020-03-09 20:59:02 +00:00
|
|
|
)
|
|
|
|
resources = append(resources, la)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-03 21:28:45 +00:00
|
|
|
// generate endpoints for our servers if WAN federation is enabled
|
2021-10-26 22:10:30 +00:00
|
|
|
if cfgSnap.ProxyID.InDefaultPartition() &&
|
2021-10-26 21:58:23 +00:00
|
|
|
cfgSnap.ServiceMeta[structs.MetaWANFederationKey] == "1" &&
|
|
|
|
cfgSnap.ServerSNIFn != nil {
|
2021-02-26 22:23:15 +00:00
|
|
|
var allServersLbEndpoints []*envoy_endpoint_v3.LbEndpoint
|
2020-03-09 20:59:02 +00:00
|
|
|
|
2022-09-26 16:50:17 +00:00
|
|
|
servers, _ := cfgSnap.MeshGateway.WatchedLocalServers.Get(structs.ConsulServiceName)
|
2022-09-23 01:24:13 +00:00
|
|
|
for _, srv := range servers {
|
2020-03-09 20:59:02 +00:00
|
|
|
clusterName := cfgSnap.ServerSNIFn(cfgSnap.Datacenter, srv.Node.Node)
|
|
|
|
|
2022-01-28 06:49:06 +00:00
|
|
|
_, addr, port := srv.BestAddress(false /*wan*/)
|
2020-03-09 20:59:02 +00:00
|
|
|
|
2021-02-26 22:23:15 +00:00
|
|
|
lbEndpoint := &envoy_endpoint_v3.LbEndpoint{
|
|
|
|
HostIdentifier: &envoy_endpoint_v3.LbEndpoint_Endpoint{
|
|
|
|
Endpoint: &envoy_endpoint_v3.Endpoint{
|
2020-06-23 20:19:56 +00:00
|
|
|
Address: makeAddress(addr, port),
|
2020-03-09 20:59:02 +00:00
|
|
|
},
|
|
|
|
},
|
2021-02-26 22:23:15 +00:00
|
|
|
HealthStatus: envoy_core_v3.HealthStatus_UNKNOWN,
|
2020-03-09 20:59:02 +00:00
|
|
|
}
|
|
|
|
|
2021-02-26 22:23:15 +00:00
|
|
|
cla := &envoy_endpoint_v3.ClusterLoadAssignment{
|
2020-03-09 20:59:02 +00:00
|
|
|
ClusterName: clusterName,
|
2021-02-26 22:23:15 +00:00
|
|
|
Endpoints: []*envoy_endpoint_v3.LocalityLbEndpoints{{
|
|
|
|
LbEndpoints: []*envoy_endpoint_v3.LbEndpoint{lbEndpoint},
|
2020-03-09 20:59:02 +00:00
|
|
|
}},
|
|
|
|
}
|
|
|
|
allServersLbEndpoints = append(allServersLbEndpoints, lbEndpoint)
|
|
|
|
|
|
|
|
resources = append(resources, cla)
|
|
|
|
}
|
|
|
|
|
|
|
|
// And add one catch all so that remote datacenters can dial ANY server
|
|
|
|
// in this datacenter without knowing its name.
|
2021-02-26 22:23:15 +00:00
|
|
|
resources = append(resources, &envoy_endpoint_v3.ClusterLoadAssignment{
|
2020-03-09 20:59:02 +00:00
|
|
|
ClusterName: cfgSnap.ServerSNIFn(cfgSnap.Datacenter, ""),
|
2021-02-26 22:23:15 +00:00
|
|
|
Endpoints: []*envoy_endpoint_v3.LocalityLbEndpoints{{
|
2020-03-09 20:59:02 +00:00
|
|
|
LbEndpoints: allServersLbEndpoints,
|
|
|
|
}},
|
|
|
|
})
|
2019-06-24 19:05:36 +00:00
|
|
|
}
|
2019-06-18 00:52:01 +00:00
|
|
|
|
2022-09-23 03:14:25 +00:00
|
|
|
// Create endpoints for the cluster where local servers will be dialed by peers.
|
|
|
|
// When peering through gateways we load balance across the local servers. They cannot be addressed individually.
|
|
|
|
if cfg := cfgSnap.MeshConfig(); cfg.PeerThroughMeshGateways() {
|
|
|
|
var serverEndpoints []*envoy_endpoint_v3.LbEndpoint
|
|
|
|
|
2022-09-26 16:50:17 +00:00
|
|
|
servers, _ := cfgSnap.MeshGateway.WatchedLocalServers.Get(structs.ConsulServiceName)
|
2022-09-23 03:14:25 +00:00
|
|
|
for _, srv := range servers {
|
|
|
|
if isReplica := srv.Service.Meta["read_replica"]; isReplica == "true" {
|
|
|
|
// Peering control-plane traffic can only ever be handled by the local leader.
|
|
|
|
// We avoid routing to read replicas since they will never be Raft voters.
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
_, addr, _ := srv.BestAddress(false)
|
|
|
|
portStr, ok := srv.Service.Meta["grpc_tls_port"]
|
|
|
|
if !ok {
|
|
|
|
s.Logger.Warn("peering is enabled but local server %q does not have the required gRPC TLS port configured",
|
|
|
|
"server", srv.Node.Node)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
port, err := strconv.Atoi(portStr)
|
|
|
|
if err != nil {
|
|
|
|
s.Logger.Error("peering is enabled but local server has invalid gRPC TLS port",
|
|
|
|
"server", srv.Node.Node, "port", portStr, "error", err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
serverEndpoints = append(serverEndpoints, &envoy_endpoint_v3.LbEndpoint{
|
|
|
|
HostIdentifier: &envoy_endpoint_v3.LbEndpoint_Endpoint{
|
|
|
|
Endpoint: &envoy_endpoint_v3.Endpoint{
|
|
|
|
Address: makeAddress(addr, port),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
})
|
|
|
|
}
|
2022-09-26 16:50:17 +00:00
|
|
|
if len(serverEndpoints) > 0 {
|
|
|
|
resources = append(resources, &envoy_endpoint_v3.ClusterLoadAssignment{
|
|
|
|
ClusterName: connect.PeeringServerSAN(cfgSnap.Datacenter, cfgSnap.Roots.TrustDomain),
|
|
|
|
Endpoints: []*envoy_endpoint_v3.LocalityLbEndpoints{{
|
|
|
|
LbEndpoints: serverEndpoints,
|
|
|
|
}},
|
|
|
|
})
|
|
|
|
}
|
2022-09-23 03:14:25 +00:00
|
|
|
}
|
|
|
|
|
2020-02-19 16:57:55 +00:00
|
|
|
// Generate the endpoints for each service and its subsets
|
2020-04-14 14:59:23 +00:00
|
|
|
e, err := s.endpointsFromServicesAndResolvers(cfgSnap, cfgSnap.MeshGateway.ServiceGroups, cfgSnap.MeshGateway.ServiceResolvers)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
resources = append(resources, e...)
|
|
|
|
|
2022-06-28 19:52:25 +00:00
|
|
|
// Generate the endpoints for exported discovery chain targets.
|
2022-07-12 16:03:41 +00:00
|
|
|
e, err = s.makeExportedUpstreamEndpointsForMeshGateway(cfgSnap)
|
2022-06-28 19:52:25 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
resources = append(resources, e...)
|
|
|
|
|
2022-10-06 13:54:14 +00:00
|
|
|
// generate the outgoing endpoints for imported peer services.
|
|
|
|
e, err = s.makeEndpointsForOutgoingPeeredServices(cfgSnap)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
resources = append(resources, e...)
|
|
|
|
|
2022-09-26 16:50:17 +00:00
|
|
|
// Generate the endpoints for peer server control planes.
|
|
|
|
e, err = s.makePeerServerEndpointsForMeshGateway(cfgSnap)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
resources = append(resources, e...)
|
|
|
|
|
2020-04-14 14:59:23 +00:00
|
|
|
return resources, nil
|
|
|
|
}
|
|
|
|
|
2021-04-29 18:54:05 +00:00
|
|
|
func (s *ResourceGenerator) endpointsFromServicesAndResolvers(
|
2020-04-14 14:59:23 +00:00
|
|
|
cfgSnap *proxycfg.ConfigSnapshot,
|
2020-06-12 14:57:41 +00:00
|
|
|
services map[structs.ServiceName]structs.CheckServiceNodes,
|
2021-04-29 18:54:05 +00:00
|
|
|
resolvers map[structs.ServiceName]*structs.ServiceResolverConfigEntry,
|
|
|
|
) ([]proto.Message, error) {
|
2020-04-14 14:59:23 +00:00
|
|
|
resources := make([]proto.Message, 0, len(services))
|
|
|
|
|
|
|
|
// generate the endpoints for the linked service groups
|
|
|
|
for svc, endpoints := range services {
|
2020-06-03 21:28:45 +00:00
|
|
|
// Skip creating endpoints for services that have hostnames as addresses
|
|
|
|
// EDS cannot resolve hostnames so we provide them through CDS instead
|
|
|
|
if cfgSnap.Kind == structs.ServiceKindTerminatingGateway && len(cfgSnap.TerminatingGateway.HostnameServices[svc]) > 0 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2020-04-14 14:59:23 +00:00
|
|
|
clusterEndpoints := make(map[string][]loadAssignmentEndpointGroup)
|
|
|
|
clusterEndpoints[UnnamedSubset] = []loadAssignmentEndpointGroup{{Endpoints: endpoints, OnlyPassing: false}}
|
2020-02-19 16:57:55 +00:00
|
|
|
|
|
|
|
// Collect all of the loadAssignmentEndpointGroups for the various subsets. We do this before generating
|
|
|
|
// the endpoints for the default/unnamed subset so that we can take into account the DefaultSubset on the
|
|
|
|
// service-resolver which may prevent the default/unnamed cluster from creating endpoints for all service
|
|
|
|
// instances.
|
2020-04-14 14:59:23 +00:00
|
|
|
if resolver, hasResolver := resolvers[svc]; hasResolver {
|
2020-02-19 16:57:55 +00:00
|
|
|
for subsetName, subset := range resolver.Subsets {
|
|
|
|
subsetEndpoints, err := s.filterSubsetEndpoints(&subset, endpoints)
|
2019-07-02 13:43:35 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-04-14 14:59:23 +00:00
|
|
|
groups := []loadAssignmentEndpointGroup{{Endpoints: subsetEndpoints, OnlyPassing: subset.OnlyPassing}}
|
|
|
|
clusterEndpoints[subsetName] = groups
|
2019-07-02 13:43:35 +00:00
|
|
|
|
2020-02-19 16:57:55 +00:00
|
|
|
// if this subset is the default then override the unnamed subset with this configuration
|
|
|
|
if subsetName == resolver.DefaultSubset {
|
2020-04-14 14:59:23 +00:00
|
|
|
clusterEndpoints[UnnamedSubset] = groups
|
2019-07-02 13:43:35 +00:00
|
|
|
}
|
|
|
|
}
|
2020-02-19 16:57:55 +00:00
|
|
|
}
|
2019-07-02 13:43:35 +00:00
|
|
|
|
2020-02-19 16:57:55 +00:00
|
|
|
// now generate the load assignment for all subsets
|
2020-04-14 14:59:23 +00:00
|
|
|
for subsetName, groups := range clusterEndpoints {
|
2021-09-01 14:35:39 +00:00
|
|
|
clusterName := connect.ServiceSNI(svc.Name, subsetName, svc.NamespaceOrDefault(), svc.PartitionOrDefault(), cfgSnap.Datacenter, cfgSnap.Roots.TrustDomain)
|
2019-07-02 13:43:35 +00:00
|
|
|
la := makeLoadAssignment(
|
|
|
|
clusterName,
|
2020-04-14 14:59:23 +00:00
|
|
|
groups,
|
2021-10-29 00:47:42 +00:00
|
|
|
cfgSnap.Locality,
|
2019-07-02 13:43:35 +00:00
|
|
|
)
|
|
|
|
resources = append(resources, la)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-03 18:18:55 +00:00
|
|
|
return resources, nil
|
|
|
|
}
|
|
|
|
|
2022-10-06 13:54:14 +00:00
|
|
|
func (s *ResourceGenerator) makeEndpointsForOutgoingPeeredServices(
|
|
|
|
cfgSnap *proxycfg.ConfigSnapshot,
|
|
|
|
) ([]proto.Message, error) {
|
|
|
|
var resources []proto.Message
|
|
|
|
|
|
|
|
// generate the endpoints for the linked service groups
|
|
|
|
for _, serviceGroups := range cfgSnap.MeshGateway.PeeringServices {
|
|
|
|
for sn, serviceGroup := range serviceGroups {
|
|
|
|
if serviceGroup.UseCDS || len(serviceGroup.Nodes) == 0 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
node := serviceGroup.Nodes[0]
|
|
|
|
if node.Service == nil {
|
|
|
|
return nil, fmt.Errorf("couldn't get SNI for peered service %s", sn.String())
|
|
|
|
}
|
|
|
|
// This uses the SNI in the accepting cluster peer so the remote mesh
|
|
|
|
// gateway can distinguish between an exported service as opposed to the
|
|
|
|
// usual mesh gateway route for a service.
|
|
|
|
clusterName := node.Service.Connect.PeerMeta.PrimarySNI()
|
|
|
|
|
|
|
|
groups := []loadAssignmentEndpointGroup{{Endpoints: serviceGroup.Nodes, OnlyPassing: false}}
|
|
|
|
|
|
|
|
la := makeLoadAssignment(
|
|
|
|
clusterName,
|
|
|
|
groups,
|
|
|
|
cfgSnap.Locality,
|
|
|
|
)
|
|
|
|
resources = append(resources, la)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return resources, nil
|
|
|
|
}
|
|
|
|
|
2022-09-26 16:50:17 +00:00
|
|
|
func (s *ResourceGenerator) makePeerServerEndpointsForMeshGateway(cfgSnap *proxycfg.ConfigSnapshot) ([]proto.Message, error) {
|
|
|
|
resources := make([]proto.Message, 0, len(cfgSnap.MeshGateway.PeerServers))
|
|
|
|
|
|
|
|
// Peer server names are assumed to already be formatted in SNI notation:
|
|
|
|
// server.<datacenter>.peering.<trust-domain>
|
|
|
|
for name, servers := range cfgSnap.MeshGateway.PeerServers {
|
|
|
|
if servers.UseCDS || len(servers.Addresses) == 0 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
es := make([]*envoy_endpoint_v3.LbEndpoint, 0, len(servers.Addresses))
|
|
|
|
|
|
|
|
for _, address := range servers.Addresses {
|
|
|
|
es = append(es, makeEndpoint(address.Address, address.Port))
|
|
|
|
}
|
|
|
|
|
|
|
|
cla := &envoy_endpoint_v3.ClusterLoadAssignment{
|
|
|
|
ClusterName: name,
|
|
|
|
Endpoints: []*envoy_endpoint_v3.LocalityLbEndpoints{
|
|
|
|
{
|
|
|
|
LbEndpoints: es,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
resources = append(resources, cla)
|
|
|
|
}
|
|
|
|
|
|
|
|
return resources, nil
|
|
|
|
}
|
|
|
|
|
2021-04-29 18:54:05 +00:00
|
|
|
func (s *ResourceGenerator) endpointsFromSnapshotIngressGateway(cfgSnap *proxycfg.ConfigSnapshot) ([]proto.Message, error) {
|
2020-04-16 21:00:48 +00:00
|
|
|
var resources []proto.Message
|
2022-01-20 16:12:04 +00:00
|
|
|
createdClusters := make(map[proxycfg.UpstreamID]bool)
|
2020-04-16 23:24:11 +00:00
|
|
|
for _, upstreams := range cfgSnap.IngressGateway.Upstreams {
|
|
|
|
for _, u := range upstreams {
|
2022-01-20 16:12:04 +00:00
|
|
|
uid := proxycfg.NewUpstreamID(&u)
|
2020-04-16 21:00:48 +00:00
|
|
|
|
2020-04-21 21:06:23 +00:00
|
|
|
// If we've already created endpoints for this upstream, skip it. Multiple listeners may
|
|
|
|
// reference the same upstream, so we don't need to create duplicate endpoints in that case.
|
2022-01-20 16:12:04 +00:00
|
|
|
if createdClusters[uid] {
|
2020-04-21 21:06:23 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2022-06-28 19:52:25 +00:00
|
|
|
es, err := s.endpointsFromDiscoveryChain(
|
2022-01-20 16:12:04 +00:00
|
|
|
uid,
|
|
|
|
cfgSnap.IngressGateway.DiscoveryChain[uid],
|
2022-08-30 15:46:34 +00:00
|
|
|
cfgSnap,
|
2021-10-22 21:30:42 +00:00
|
|
|
proxycfg.GatewayKey{Datacenter: cfgSnap.Datacenter, Partition: u.DestinationPartition},
|
2022-06-28 19:52:25 +00:00
|
|
|
u.Config,
|
2022-01-20 16:12:04 +00:00
|
|
|
cfgSnap.IngressGateway.WatchedUpstreamEndpoints[uid],
|
|
|
|
cfgSnap.IngressGateway.WatchedGatewayEndpoints[uid],
|
2022-06-28 19:52:25 +00:00
|
|
|
false,
|
2020-04-16 23:24:11 +00:00
|
|
|
)
|
2022-06-28 19:52:25 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-04-16 23:24:11 +00:00
|
|
|
resources = append(resources, es...)
|
2022-01-20 16:12:04 +00:00
|
|
|
createdClusters[uid] = true
|
2020-04-16 23:24:11 +00:00
|
|
|
}
|
2020-04-16 21:00:48 +00:00
|
|
|
}
|
|
|
|
return resources, nil
|
|
|
|
}
|
|
|
|
|
2021-05-04 04:43:55 +00:00
|
|
|
// used in clusters.go
|
2021-02-26 22:23:15 +00:00
|
|
|
func makeEndpoint(host string, port int) *envoy_endpoint_v3.LbEndpoint {
|
|
|
|
return &envoy_endpoint_v3.LbEndpoint{
|
|
|
|
HostIdentifier: &envoy_endpoint_v3.LbEndpoint_Endpoint{
|
|
|
|
Endpoint: &envoy_endpoint_v3.Endpoint{
|
2020-06-23 20:19:56 +00:00
|
|
|
Address: makeAddress(host, port),
|
2019-07-02 03:10:51 +00:00
|
|
|
},
|
2018-10-03 18:18:55 +00:00
|
|
|
},
|
2019-07-02 03:10:51 +00:00
|
|
|
}
|
2018-10-03 18:18:55 +00:00
|
|
|
}
|
|
|
|
|
2021-05-04 04:43:55 +00:00
|
|
|
func makePipeEndpoint(path string) *envoy_endpoint_v3.LbEndpoint {
|
|
|
|
return &envoy_endpoint_v3.LbEndpoint{
|
|
|
|
HostIdentifier: &envoy_endpoint_v3.LbEndpoint_Endpoint{
|
|
|
|
Endpoint: &envoy_endpoint_v3.Endpoint{
|
|
|
|
Address: makePipeAddress(path, 0),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-08-30 15:46:34 +00:00
|
|
|
func (s *ResourceGenerator) makeUpstreamLoadAssignmentForPeerService(cfgSnap *proxycfg.ConfigSnapshot, clusterName string, uid proxycfg.UpstreamID) (*envoy_endpoint_v3.ClusterLoadAssignment, error) {
|
|
|
|
var la *envoy_endpoint_v3.ClusterLoadAssignment
|
|
|
|
|
|
|
|
upstreamsSnapshot, err := cfgSnap.ToConfigSnapshotUpstreams()
|
|
|
|
if err != nil {
|
|
|
|
return la, err
|
|
|
|
}
|
|
|
|
|
2022-09-27 13:49:28 +00:00
|
|
|
upstream := cfgSnap.ConnectProxy.UpstreamConfig[uid]
|
Fix mesh gateway configuration with proxy-defaults (#15186)
* Fix mesh gateway proxy-defaults not affecting upstreams.
* Clarify distinction with upstream settings
Top-level mesh gateway mode in proxy-defaults and service-defaults gets
merged into NodeService.Proxy.MeshGateway, and only gets merged with
the mode attached to an an upstream in proxycfg/xds.
* Fix mgw mode usage for peered upstreams
There were a couple issues with how mgw mode was being handled for
peered upstreams.
For starters, mesh gateway mode from proxy-defaults
and the top-level of service-defaults gets stored in
NodeService.Proxy.MeshGateway, but the upstream watch for peered data
was only considering the mesh gateway config attached in
NodeService.Proxy.Upstreams[i]. This means that applying a mesh gateway
mode via global proxy-defaults or service-defaults on the downstream
would not have an effect.
Separately, transparent proxy watches for peered upstreams didn't
consider mesh gateway mode at all.
This commit addresses the first issue by ensuring that we overlay the
upstream config for peered upstreams as we do for non-peered. The second
issue is addressed by re-using setupWatchesForPeeredUpstream when
handling transparent proxy updates.
Note that for transparent proxies we do not yet support mesh gateway
mode per upstream, so the NodeService.Proxy.MeshGateway mode is used.
* Fix upstream mesh gateway mode handling in xds
This commit ensures that when determining the mesh gateway mode for
peered upstreams we consider the NodeService.Proxy.MeshGateway config as
a baseline.
In absense of this change, setting a mesh gateway mode via
proxy-defaults or the top-level of service-defaults will not have an
effect for peered upstreams.
* Merge service/proxy defaults in cfg resolver
Previously the mesh gateway mode for connect proxies would be
merged at three points:
1. On servers, in ComputeResolvedServiceConfig.
2. On clients, in MergeServiceConfig.
3. On clients, in proxycfg/xds.
The first merge returns a ServiceConfigResponse where there is a
top-level MeshGateway config from proxy/service-defaults, along with
per-upstream config.
The second merge combines per-upstream config specified at the service
instance with per-upstream config specified centrally.
The third merge combines the NodeService.Proxy.MeshGateway
config containing proxy/service-defaults data with the per-upstream
mode. This third merge is easy to miss, which led to peered upstreams
not considering the mesh gateway mode from proxy-defaults.
This commit removes the third merge, and ensures that all mesh gateway
config is available at the upstream. This way proxycfg/xds do not need
to do additional overlays.
* Ensure that proxy-defaults is considered in wc
Upstream defaults become a synthetic Upstream definition under a
wildcard key "*". Now that proxycfg/xds expect Upstream definitions to
have the final MeshGateway values, this commit ensures that values from
proxy-defaults/service-defaults are the default for this synthetic
upstream.
* Add changelog.
Co-authored-by: freddygv <freddy@hashicorp.com>
2022-11-09 16:14:29 +00:00
|
|
|
|
2022-09-27 13:49:28 +00:00
|
|
|
// If an upstream is configured with local mesh gw mode, we make a load assignment
|
|
|
|
// from the gateway endpoints instead of those of the upstreams.
|
|
|
|
if upstream != nil && upstream.MeshGateway.Mode == structs.MeshGatewayModeLocal {
|
|
|
|
localGw, ok := cfgSnap.ConnectProxy.WatchedLocalGWEndpoints.Get(cfgSnap.Locality.String())
|
|
|
|
if !ok {
|
|
|
|
// local GW is not ready; return early
|
|
|
|
return la, nil
|
|
|
|
}
|
|
|
|
la = makeLoadAssignment(
|
|
|
|
clusterName,
|
|
|
|
[]loadAssignmentEndpointGroup{
|
|
|
|
{Endpoints: localGw},
|
|
|
|
},
|
|
|
|
cfgSnap.Locality,
|
|
|
|
)
|
|
|
|
return la, nil
|
|
|
|
}
|
|
|
|
|
2022-08-30 15:46:34 +00:00
|
|
|
// Also skip peer instances with a hostname as their address. EDS
|
|
|
|
// cannot resolve hostnames, so we provide them through CDS instead.
|
|
|
|
if _, ok := upstreamsSnapshot.PeerUpstreamEndpointsUseHostnames[uid]; ok {
|
|
|
|
return la, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
endpoints, ok := upstreamsSnapshot.PeerUpstreamEndpoints.Get(uid)
|
|
|
|
if !ok {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
la = makeLoadAssignment(
|
|
|
|
clusterName,
|
|
|
|
[]loadAssignmentEndpointGroup{
|
|
|
|
{Endpoints: endpoints},
|
|
|
|
},
|
|
|
|
proxycfg.GatewayKey{ /*empty so it never matches*/ },
|
|
|
|
)
|
|
|
|
return la, nil
|
|
|
|
}
|
|
|
|
|
2021-04-29 18:54:05 +00:00
|
|
|
func (s *ResourceGenerator) endpointsFromDiscoveryChain(
|
2022-01-20 16:12:04 +00:00
|
|
|
uid proxycfg.UpstreamID,
|
2020-04-16 21:00:48 +00:00
|
|
|
chain *structs.CompiledDiscoveryChain,
|
2022-08-30 15:46:34 +00:00
|
|
|
cfgSnap *proxycfg.ConfigSnapshot,
|
2021-10-22 21:30:42 +00:00
|
|
|
gatewayKey proxycfg.GatewayKey,
|
2022-06-28 19:52:25 +00:00
|
|
|
upstreamConfigMap map[string]interface{},
|
2021-10-22 21:30:42 +00:00
|
|
|
upstreamEndpoints map[string]structs.CheckServiceNodes,
|
|
|
|
gatewayEndpoints map[string]structs.CheckServiceNodes,
|
2022-06-28 19:52:25 +00:00
|
|
|
forMeshGateway bool,
|
|
|
|
) ([]proto.Message, error) {
|
2020-04-16 21:00:48 +00:00
|
|
|
if chain == nil {
|
2022-06-28 19:52:25 +00:00
|
|
|
if forMeshGateway {
|
|
|
|
return nil, fmt.Errorf("missing discovery chain for %s", uid)
|
|
|
|
}
|
|
|
|
return nil, nil
|
2020-04-16 21:00:48 +00:00
|
|
|
}
|
|
|
|
|
2022-06-28 19:52:25 +00:00
|
|
|
if upstreamConfigMap == nil {
|
|
|
|
upstreamConfigMap = make(map[string]interface{}) // TODO:needed?
|
2020-05-26 08:57:22 +00:00
|
|
|
}
|
|
|
|
|
2022-08-30 15:46:34 +00:00
|
|
|
upstreamsSnapshot, err := cfgSnap.ToConfigSnapshotUpstreams()
|
|
|
|
|
|
|
|
// Mesh gateways are exempt because upstreamsSnapshot is only used for
|
|
|
|
// cluster peering targets and transative failover/redirects are unsupported.
|
|
|
|
if err != nil && !forMeshGateway {
|
2022-09-09 17:58:28 +00:00
|
|
|
return nil, err
|
2022-08-30 15:46:34 +00:00
|
|
|
}
|
|
|
|
|
2022-06-28 19:52:25 +00:00
|
|
|
var resources []proto.Message
|
|
|
|
|
2021-02-26 22:23:15 +00:00
|
|
|
var escapeHatchCluster *envoy_cluster_v3.Cluster
|
2022-06-28 19:52:25 +00:00
|
|
|
if !forMeshGateway {
|
|
|
|
cfg, err := structs.ParseUpstreamConfigNoDefaults(upstreamConfigMap)
|
|
|
|
if err != nil {
|
|
|
|
// Don't hard fail on a config typo, just warn. The parse func returns
|
|
|
|
// default config if there is an error so it's safe to continue.
|
|
|
|
s.Logger.Warn("failed to parse", "upstream", uid,
|
|
|
|
"error", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if cfg.EnvoyClusterJSON != "" {
|
|
|
|
if chain.Default {
|
|
|
|
// If you haven't done anything to setup the discovery chain, then
|
|
|
|
// you can use the envoy_cluster_json escape hatch.
|
|
|
|
escapeHatchCluster, err = makeClusterFromUserConfig(cfg.EnvoyClusterJSON)
|
|
|
|
if err != nil {
|
|
|
|
return resources, nil
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
s.Logger.Warn("ignoring escape hatch setting, because a discovery chain is configued for",
|
|
|
|
"discovery chain", chain.ServiceName, "upstream", uid,
|
|
|
|
"envoy_cluster_json", chain.ServiceName)
|
2020-05-26 08:57:22 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-16 21:00:48 +00:00
|
|
|
// Find all resolver nodes.
|
|
|
|
for _, node := range chain.Nodes {
|
|
|
|
if node.Type != structs.DiscoveryGraphNodeTypeResolver {
|
|
|
|
continue
|
|
|
|
}
|
2022-08-30 15:46:34 +00:00
|
|
|
primaryTargetID := node.Resolver.Target
|
2020-04-16 21:00:48 +00:00
|
|
|
failover := node.Resolver.Failover
|
2022-08-12 18:30:46 +00:00
|
|
|
|
2022-09-09 17:58:28 +00:00
|
|
|
var targetsClustersData []targetClusterData
|
2022-08-30 15:46:34 +00:00
|
|
|
|
2022-08-12 18:30:46 +00:00
|
|
|
var numFailoverTargets int
|
|
|
|
if failover != nil {
|
|
|
|
numFailoverTargets = len(failover.Targets)
|
|
|
|
}
|
|
|
|
if numFailoverTargets > 0 && !forMeshGateway {
|
2022-08-30 15:46:34 +00:00
|
|
|
for _, targetID := range append([]string{primaryTargetID}, failover.Targets...) {
|
2022-09-09 17:58:28 +00:00
|
|
|
targetData, ok := s.getTargetClusterData(upstreamsSnapshot, chain, targetID, forMeshGateway, true)
|
|
|
|
if !ok {
|
|
|
|
continue
|
2022-08-12 18:30:46 +00:00
|
|
|
}
|
|
|
|
if escapeHatchCluster != nil {
|
2022-09-09 17:58:28 +00:00
|
|
|
targetData.clusterName = escapeHatchCluster.Name
|
2022-08-12 18:30:46 +00:00
|
|
|
}
|
|
|
|
|
2022-09-09 17:58:28 +00:00
|
|
|
targetsClustersData = append(targetsClustersData, targetData)
|
2022-08-30 15:46:34 +00:00
|
|
|
}
|
|
|
|
} else {
|
2022-09-09 17:58:28 +00:00
|
|
|
if td, ok := s.getTargetClusterData(upstreamsSnapshot, chain, primaryTargetID, forMeshGateway, false); ok {
|
|
|
|
if escapeHatchCluster != nil {
|
|
|
|
td.clusterName = escapeHatchCluster.Name
|
|
|
|
}
|
|
|
|
targetsClustersData = append(targetsClustersData, td)
|
2022-08-30 15:46:34 +00:00
|
|
|
}
|
2022-08-12 18:30:46 +00:00
|
|
|
}
|
2020-04-16 21:00:48 +00:00
|
|
|
|
2022-09-09 17:58:28 +00:00
|
|
|
for _, targetOpt := range targetsClustersData {
|
|
|
|
s.Logger.Debug("generating endpoints for", "cluster", targetOpt.clusterName)
|
|
|
|
targetUID := proxycfg.NewUpstreamIDFromTargetID(targetOpt.targetID)
|
2022-08-30 15:46:34 +00:00
|
|
|
if targetUID.Peer != "" {
|
2022-09-09 17:58:28 +00:00
|
|
|
loadAssignment, err := s.makeUpstreamLoadAssignmentForPeerService(cfgSnap, targetOpt.clusterName, targetUID)
|
2022-08-30 15:46:34 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if loadAssignment != nil {
|
|
|
|
resources = append(resources, loadAssignment)
|
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
2020-04-16 21:00:48 +00:00
|
|
|
|
2022-08-30 15:46:34 +00:00
|
|
|
endpointGroup, valid := makeLoadAssignmentEndpointGroup(
|
|
|
|
chain.Targets,
|
|
|
|
upstreamEndpoints,
|
|
|
|
gatewayEndpoints,
|
2022-09-09 17:58:28 +00:00
|
|
|
targetOpt.targetID,
|
2022-08-30 15:46:34 +00:00
|
|
|
gatewayKey,
|
|
|
|
forMeshGateway,
|
|
|
|
)
|
|
|
|
if !valid {
|
|
|
|
continue // skip the cluster if we're still populating the snapshot
|
|
|
|
}
|
|
|
|
|
|
|
|
la := makeLoadAssignment(
|
2022-09-09 17:58:28 +00:00
|
|
|
targetOpt.clusterName,
|
2022-08-30 15:46:34 +00:00
|
|
|
[]loadAssignmentEndpointGroup{endpointGroup},
|
|
|
|
gatewayKey,
|
|
|
|
)
|
|
|
|
resources = append(resources, la)
|
|
|
|
}
|
2020-04-16 21:00:48 +00:00
|
|
|
}
|
|
|
|
|
2022-06-28 19:52:25 +00:00
|
|
|
return resources, nil
|
|
|
|
}
|
|
|
|
|
2022-07-12 16:03:41 +00:00
|
|
|
func (s *ResourceGenerator) makeExportedUpstreamEndpointsForMeshGateway(cfgSnap *proxycfg.ConfigSnapshot) ([]proto.Message, error) {
|
2022-06-28 19:52:25 +00:00
|
|
|
var resources []proto.Message
|
|
|
|
|
|
|
|
populatedExportedClusters := make(map[string]struct{}) // key=clusterName
|
|
|
|
for _, svc := range cfgSnap.MeshGatewayValidExportedServices() {
|
|
|
|
chain := cfgSnap.MeshGateway.DiscoveryChain[svc]
|
|
|
|
|
|
|
|
chainEndpoints := make(map[string]structs.CheckServiceNodes)
|
|
|
|
for _, target := range chain.Targets {
|
2022-07-12 16:03:41 +00:00
|
|
|
if !cfgSnap.Locality.Matches(target.Datacenter, target.Partition) {
|
|
|
|
s.Logger.Warn("ignoring discovery chain target that crosses a datacenter or partition boundary in a mesh gateway",
|
|
|
|
"target", target,
|
|
|
|
"gatewayLocality", cfgSnap.Locality,
|
|
|
|
)
|
|
|
|
continue
|
|
|
|
}
|
2022-06-28 19:52:25 +00:00
|
|
|
|
2022-07-12 16:03:41 +00:00
|
|
|
targetSvc := target.ServiceName()
|
|
|
|
|
|
|
|
endpoints, ok := cfgSnap.MeshGateway.ServiceGroups[targetSvc]
|
|
|
|
if !ok {
|
|
|
|
continue // ignore; not ready
|
|
|
|
}
|
|
|
|
|
|
|
|
if target.ServiceSubset == "" {
|
|
|
|
chainEndpoints[target.ID] = endpoints
|
|
|
|
} else {
|
|
|
|
resolver, ok := cfgSnap.MeshGateway.ServiceResolvers[targetSvc]
|
2022-06-28 19:52:25 +00:00
|
|
|
if !ok {
|
|
|
|
continue // ignore; not ready
|
|
|
|
}
|
2022-07-12 16:03:41 +00:00
|
|
|
subset, ok := resolver.Subsets[target.ServiceSubset]
|
|
|
|
if !ok {
|
|
|
|
continue // ignore; not ready
|
2022-06-28 19:52:25 +00:00
|
|
|
}
|
2022-07-12 16:03:41 +00:00
|
|
|
|
|
|
|
subsetEndpoints, err := s.filterSubsetEndpoints(&subset, endpoints)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2022-06-28 19:52:25 +00:00
|
|
|
}
|
2022-07-12 16:03:41 +00:00
|
|
|
chainEndpoints[target.ID] = subsetEndpoints
|
2022-06-28 19:52:25 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
clusterEndpoints, err := s.endpointsFromDiscoveryChain(
|
|
|
|
proxycfg.NewUpstreamIDFromServiceName(svc),
|
|
|
|
chain,
|
2022-08-30 15:46:34 +00:00
|
|
|
cfgSnap,
|
2022-06-28 19:52:25 +00:00
|
|
|
cfgSnap.Locality,
|
|
|
|
nil,
|
|
|
|
chainEndpoints,
|
2022-07-12 16:03:41 +00:00
|
|
|
nil,
|
2022-06-28 19:52:25 +00:00
|
|
|
true,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
for _, endpoints := range clusterEndpoints {
|
|
|
|
clusterName := getResourceName(endpoints)
|
|
|
|
if _, ok := populatedExportedClusters[clusterName]; ok {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
populatedExportedClusters[clusterName] = struct{}{}
|
|
|
|
resources = append(resources, endpoints)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return resources, nil
|
2020-04-16 21:00:48 +00:00
|
|
|
}
|
|
|
|
|
2019-07-24 01:20:24 +00:00
|
|
|
type loadAssignmentEndpointGroup struct {
|
2019-08-05 18:30:35 +00:00
|
|
|
Endpoints structs.CheckServiceNodes
|
|
|
|
OnlyPassing bool
|
2021-02-26 22:23:15 +00:00
|
|
|
OverrideHealth envoy_core_v3.HealthStatus
|
2019-07-24 01:20:24 +00:00
|
|
|
}
|
|
|
|
|
2021-10-29 00:41:58 +00:00
|
|
|
func makeLoadAssignment(clusterName string, endpointGroups []loadAssignmentEndpointGroup, localKey proxycfg.GatewayKey) *envoy_endpoint_v3.ClusterLoadAssignment {
|
2021-02-26 22:23:15 +00:00
|
|
|
cla := &envoy_endpoint_v3.ClusterLoadAssignment{
|
2019-07-02 03:10:51 +00:00
|
|
|
ClusterName: clusterName,
|
2021-02-26 22:23:15 +00:00
|
|
|
Endpoints: make([]*envoy_endpoint_v3.LocalityLbEndpoints, 0, len(endpointGroups)),
|
2019-07-02 03:10:51 +00:00
|
|
|
}
|
2019-08-02 20:34:54 +00:00
|
|
|
|
|
|
|
if len(endpointGroups) > 1 {
|
2021-02-26 22:23:15 +00:00
|
|
|
cla.Policy = &envoy_endpoint_v3.ClusterLoadAssignment_Policy{
|
2019-08-02 20:34:54 +00:00
|
|
|
// We choose such a large value here that the failover math should
|
|
|
|
// in effect not happen until zero instances are healthy.
|
|
|
|
OverprovisioningFactor: makeUint32Value(100000),
|
2019-03-22 19:37:14 +00:00
|
|
|
}
|
2019-07-02 03:10:51 +00:00
|
|
|
}
|
|
|
|
|
2019-07-24 01:20:24 +00:00
|
|
|
for priority, endpointGroup := range endpointGroups {
|
|
|
|
endpoints := endpointGroup.Endpoints
|
2021-02-26 22:23:15 +00:00
|
|
|
es := make([]*envoy_endpoint_v3.LbEndpoint, 0, len(endpoints))
|
2019-07-02 03:10:51 +00:00
|
|
|
|
|
|
|
for _, ep := range endpoints {
|
|
|
|
// TODO (mesh-gateway) - should we respect the translate_wan_addrs configuration here or just always use the wan for cross-dc?
|
2022-01-28 06:49:06 +00:00
|
|
|
_, addr, port := ep.BestAddress(!localKey.Matches(ep.Node.Datacenter, ep.Node.PartitionOrDefault()))
|
2019-08-05 18:30:35 +00:00
|
|
|
healthStatus, weight := calculateEndpointHealthAndWeight(ep, endpointGroup.OnlyPassing)
|
2019-03-22 19:37:14 +00:00
|
|
|
|
2021-02-26 22:23:15 +00:00
|
|
|
if endpointGroup.OverrideHealth != envoy_core_v3.HealthStatus_UNKNOWN {
|
2019-08-05 18:30:35 +00:00
|
|
|
healthStatus = endpointGroup.OverrideHealth
|
2019-03-22 19:37:14 +00:00
|
|
|
}
|
2019-08-05 18:30:35 +00:00
|
|
|
|
2022-08-30 15:46:34 +00:00
|
|
|
endpoint := &envoy_endpoint_v3.Endpoint{
|
|
|
|
Address: makeAddress(addr, port),
|
|
|
|
}
|
2021-02-26 22:23:15 +00:00
|
|
|
es = append(es, &envoy_endpoint_v3.LbEndpoint{
|
|
|
|
HostIdentifier: &envoy_endpoint_v3.LbEndpoint_Endpoint{
|
2022-08-30 15:46:34 +00:00
|
|
|
Endpoint: endpoint,
|
2019-07-02 03:10:51 +00:00
|
|
|
},
|
|
|
|
HealthStatus: healthStatus,
|
|
|
|
LoadBalancingWeight: makeUint32Value(weight),
|
|
|
|
})
|
2019-03-22 19:37:14 +00:00
|
|
|
}
|
2019-07-02 03:10:51 +00:00
|
|
|
|
2021-02-26 22:23:15 +00:00
|
|
|
cla.Endpoints = append(cla.Endpoints, &envoy_endpoint_v3.LocalityLbEndpoints{
|
2019-07-02 03:10:51 +00:00
|
|
|
Priority: uint32(priority),
|
2018-10-03 18:18:55 +00:00
|
|
|
LbEndpoints: es,
|
2019-07-02 03:10:51 +00:00
|
|
|
})
|
2018-10-03 18:18:55 +00:00
|
|
|
}
|
2019-07-02 03:10:51 +00:00
|
|
|
|
|
|
|
return cla
|
2018-10-03 18:18:55 +00:00
|
|
|
}
|
2019-08-05 18:30:35 +00:00
|
|
|
|
|
|
|
func makeLoadAssignmentEndpointGroup(
|
|
|
|
targets map[string]*structs.DiscoveryTarget,
|
|
|
|
targetHealth map[string]structs.CheckServiceNodes,
|
|
|
|
gatewayHealth map[string]structs.CheckServiceNodes,
|
|
|
|
targetID string,
|
2021-10-22 21:30:42 +00:00
|
|
|
localKey proxycfg.GatewayKey,
|
2022-07-12 16:03:41 +00:00
|
|
|
forMeshGateway bool,
|
2019-08-05 18:30:35 +00:00
|
|
|
) (loadAssignmentEndpointGroup, bool) {
|
|
|
|
realEndpoints, ok := targetHealth[targetID]
|
|
|
|
if !ok {
|
|
|
|
// skip the cluster if we're still populating the snapshot
|
|
|
|
return loadAssignmentEndpointGroup{}, false
|
|
|
|
}
|
|
|
|
target := targets[targetID]
|
|
|
|
|
2021-10-22 21:30:42 +00:00
|
|
|
var gatewayKey proxycfg.GatewayKey
|
|
|
|
|
2019-08-05 18:30:35 +00:00
|
|
|
switch target.MeshGateway.Mode {
|
|
|
|
case structs.MeshGatewayModeRemote:
|
2021-10-22 21:30:42 +00:00
|
|
|
gatewayKey.Datacenter = target.Datacenter
|
|
|
|
gatewayKey.Partition = target.Partition
|
2019-08-05 18:30:35 +00:00
|
|
|
case structs.MeshGatewayModeLocal:
|
2021-10-22 21:30:42 +00:00
|
|
|
gatewayKey = localKey
|
2019-08-05 18:30:35 +00:00
|
|
|
}
|
|
|
|
|
2022-07-12 16:03:41 +00:00
|
|
|
if forMeshGateway || gatewayKey.IsEmpty() || localKey.Matches(target.Datacenter, target.Partition) {
|
2021-10-22 21:30:42 +00:00
|
|
|
// Gateways are not needed if the request isn't for a remote DC or partition.
|
2019-08-05 18:30:35 +00:00
|
|
|
return loadAssignmentEndpointGroup{
|
|
|
|
Endpoints: realEndpoints,
|
|
|
|
OnlyPassing: target.Subset.OnlyPassing,
|
|
|
|
}, true
|
|
|
|
}
|
|
|
|
|
|
|
|
// If using a mesh gateway we need to pull those endpoints instead.
|
2021-10-22 21:30:42 +00:00
|
|
|
gatewayEndpoints, ok := gatewayHealth[gatewayKey.String()]
|
2019-08-05 18:30:35 +00:00
|
|
|
if !ok {
|
|
|
|
// skip the cluster if we're still populating the snapshot
|
|
|
|
return loadAssignmentEndpointGroup{}, false
|
|
|
|
}
|
|
|
|
|
|
|
|
// But we will use the health from the actual backend service.
|
2021-02-26 22:23:15 +00:00
|
|
|
overallHealth := envoy_core_v3.HealthStatus_UNHEALTHY
|
2019-08-05 18:30:35 +00:00
|
|
|
for _, ep := range realEndpoints {
|
|
|
|
health, _ := calculateEndpointHealthAndWeight(ep, target.Subset.OnlyPassing)
|
2021-02-26 22:23:15 +00:00
|
|
|
if health == envoy_core_v3.HealthStatus_HEALTHY {
|
|
|
|
overallHealth = envoy_core_v3.HealthStatus_HEALTHY
|
2019-08-05 18:30:35 +00:00
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return loadAssignmentEndpointGroup{
|
|
|
|
Endpoints: gatewayEndpoints,
|
|
|
|
OverrideHealth: overallHealth,
|
|
|
|
}, true
|
|
|
|
}
|
|
|
|
|
|
|
|
func calculateEndpointHealthAndWeight(
|
|
|
|
ep structs.CheckServiceNode,
|
|
|
|
onlyPassing bool,
|
2021-02-26 22:23:15 +00:00
|
|
|
) (envoy_core_v3.HealthStatus, int) {
|
|
|
|
healthStatus := envoy_core_v3.HealthStatus_HEALTHY
|
2019-08-05 18:30:35 +00:00
|
|
|
weight := 1
|
|
|
|
if ep.Service.Weights != nil {
|
|
|
|
weight = ep.Service.Weights.Passing
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, chk := range ep.Checks {
|
|
|
|
if chk.Status == api.HealthCritical {
|
2021-02-26 22:23:15 +00:00
|
|
|
healthStatus = envoy_core_v3.HealthStatus_UNHEALTHY
|
2019-08-05 18:30:35 +00:00
|
|
|
}
|
|
|
|
if onlyPassing && chk.Status != api.HealthPassing {
|
2021-02-26 22:23:15 +00:00
|
|
|
healthStatus = envoy_core_v3.HealthStatus_UNHEALTHY
|
2019-08-05 18:30:35 +00:00
|
|
|
}
|
|
|
|
if chk.Status == api.HealthWarning && ep.Service.Weights != nil {
|
|
|
|
weight = ep.Service.Weights.Warning
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Make weights fit Envoy's limits. A zero weight means that either Warning
|
|
|
|
// (likely) or Passing (weirdly) weight has been set to 0 effectively making
|
|
|
|
// this instance unhealthy and should not be sent traffic.
|
|
|
|
if weight < 1 {
|
2021-02-26 22:23:15 +00:00
|
|
|
healthStatus = envoy_core_v3.HealthStatus_UNHEALTHY
|
2019-08-05 18:30:35 +00:00
|
|
|
weight = 1
|
|
|
|
}
|
|
|
|
if weight > 128 {
|
|
|
|
weight = 128
|
|
|
|
}
|
|
|
|
return healthStatus, weight
|
|
|
|
}
|