2018-10-03 18:18:55 +00:00
|
|
|
package xds
|
|
|
|
|
|
|
|
import (
|
|
|
|
"errors"
|
2019-06-24 19:05:36 +00:00
|
|
|
"fmt"
|
2018-10-03 18:18:55 +00:00
|
|
|
|
|
|
|
envoy "github.com/envoyproxy/go-control-plane/envoy/api/v2"
|
2019-03-22 19:37:14 +00:00
|
|
|
envoycore "github.com/envoyproxy/go-control-plane/envoy/api/v2/core"
|
2018-10-03 18:18:55 +00:00
|
|
|
envoyendpoint "github.com/envoyproxy/go-control-plane/envoy/api/v2/endpoint"
|
|
|
|
"github.com/gogo/protobuf/proto"
|
|
|
|
|
|
|
|
"github.com/hashicorp/consul/agent/proxycfg"
|
|
|
|
"github.com/hashicorp/consul/agent/structs"
|
2019-03-22 19:37:14 +00:00
|
|
|
"github.com/hashicorp/consul/api"
|
2018-10-03 18:18:55 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// endpointsFromSnapshot returns the xDS API representation of the "endpoints"
|
2019-06-18 00:52:01 +00:00
|
|
|
func (s *Server) endpointsFromSnapshot(cfgSnap *proxycfg.ConfigSnapshot, token string) ([]proto.Message, error) {
|
2018-10-03 18:18:55 +00:00
|
|
|
if cfgSnap == nil {
|
|
|
|
return nil, errors.New("nil config given")
|
|
|
|
}
|
2019-06-24 19:05:36 +00:00
|
|
|
|
|
|
|
switch cfgSnap.Kind {
|
|
|
|
case structs.ServiceKindConnectProxy:
|
2019-06-18 00:52:01 +00:00
|
|
|
return s.endpointsFromSnapshotConnectProxy(cfgSnap, token)
|
|
|
|
case structs.ServiceKindMeshGateway:
|
|
|
|
return s.endpointsFromSnapshotMeshGateway(cfgSnap, token)
|
2019-06-24 19:05:36 +00:00
|
|
|
default:
|
|
|
|
return nil, fmt.Errorf("Invalid service kind: %v", cfgSnap.Kind)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// endpointsFromSnapshotConnectProxy returns the xDS API representation of the "endpoints"
|
|
|
|
// (upstream instances) in the snapshot.
|
2019-06-18 00:52:01 +00:00
|
|
|
func (s *Server) endpointsFromSnapshotConnectProxy(cfgSnap *proxycfg.ConfigSnapshot, token string) ([]proto.Message, error) {
|
2019-07-02 03:10:51 +00:00
|
|
|
// TODO(rb): this sizing is a low bound.
|
2019-06-18 00:52:01 +00:00
|
|
|
resources := make([]proto.Message, 0, len(cfgSnap.ConnectProxy.UpstreamEndpoints))
|
2019-07-02 03:10:51 +00:00
|
|
|
|
|
|
|
// TODO(rb): should naming from 1.5 -> 1.6 for clusters remain unchanged?
|
|
|
|
|
|
|
|
for _, u := range cfgSnap.Proxy.Upstreams {
|
|
|
|
id := u.Identifier()
|
|
|
|
|
|
|
|
var chain *structs.CompiledDiscoveryChain
|
|
|
|
if u.DestinationType != structs.UpstreamDestTypePreparedQuery {
|
|
|
|
chain = cfgSnap.ConnectProxy.DiscoveryChain[id]
|
|
|
|
}
|
|
|
|
|
|
|
|
if chain == nil {
|
|
|
|
// We ONLY want this branch for prepared queries.
|
|
|
|
|
|
|
|
endpoints, ok := cfgSnap.ConnectProxy.UpstreamEndpoints[id]
|
|
|
|
if ok {
|
|
|
|
la := makeLoadAssignment(
|
|
|
|
id,
|
|
|
|
0,
|
|
|
|
[]structs.CheckServiceNodes{endpoints},
|
|
|
|
cfgSnap.Datacenter,
|
|
|
|
)
|
|
|
|
resources = append(resources, la)
|
|
|
|
}
|
|
|
|
|
|
|
|
} else {
|
|
|
|
// Newfangled discovery chain plumbing.
|
|
|
|
|
|
|
|
chainEndpointMap, ok := cfgSnap.ConnectProxy.WatchedUpstreamEndpoints[id]
|
|
|
|
if !ok {
|
|
|
|
continue // TODO(rb): whaaaa?
|
|
|
|
}
|
|
|
|
|
|
|
|
for target, node := range chain.GroupResolverNodes {
|
|
|
|
groupResolver := node.GroupResolver
|
|
|
|
failover := groupResolver.Failover
|
|
|
|
|
|
|
|
endpoints, ok := chainEndpointMap[target]
|
|
|
|
if !ok {
|
|
|
|
continue // TODO(rb): whaaaa?
|
|
|
|
}
|
|
|
|
|
|
|
|
var (
|
|
|
|
priorityEndpoints []structs.CheckServiceNodes
|
|
|
|
overprovisioningFactor int
|
|
|
|
)
|
|
|
|
|
|
|
|
if failover != nil && len(failover.Targets) > 0 {
|
|
|
|
priorityEndpoints = make([]structs.CheckServiceNodes, 0, len(failover.Targets)+1)
|
|
|
|
|
|
|
|
priorityEndpoints = append(priorityEndpoints, endpoints)
|
|
|
|
|
|
|
|
if failover.Definition.OverprovisioningFactor > 0 {
|
|
|
|
overprovisioningFactor = failover.Definition.OverprovisioningFactor
|
|
|
|
}
|
|
|
|
if overprovisioningFactor <= 0 {
|
|
|
|
// We choose such a large value here that the failover math should
|
|
|
|
// in effect not happen until zero instances are healthy.
|
|
|
|
overprovisioningFactor = 100000
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, failTarget := range failover.Targets {
|
|
|
|
failEndpoints, ok := chainEndpointMap[failTarget]
|
|
|
|
if ok {
|
|
|
|
priorityEndpoints = append(priorityEndpoints, failEndpoints)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
priorityEndpoints = []structs.CheckServiceNodes{
|
|
|
|
endpoints,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
clusterName := makeClusterName(id, target, cfgSnap.Datacenter)
|
|
|
|
|
|
|
|
la := makeLoadAssignment(
|
|
|
|
clusterName,
|
|
|
|
overprovisioningFactor,
|
|
|
|
priorityEndpoints,
|
|
|
|
cfgSnap.Datacenter,
|
|
|
|
)
|
|
|
|
resources = append(resources, la)
|
|
|
|
}
|
|
|
|
}
|
2019-06-18 00:52:01 +00:00
|
|
|
}
|
2019-07-02 03:10:51 +00:00
|
|
|
|
2019-06-18 00:52:01 +00:00
|
|
|
return resources, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Server) endpointsFromSnapshotMeshGateway(cfgSnap *proxycfg.ConfigSnapshot, token string) ([]proto.Message, error) {
|
|
|
|
resources := make([]proto.Message, 0, len(cfgSnap.MeshGateway.GatewayGroups)+len(cfgSnap.MeshGateway.ServiceGroups))
|
|
|
|
|
|
|
|
// generate the endpoints for the gateways in the remote datacenters
|
|
|
|
for dc, endpoints := range cfgSnap.MeshGateway.GatewayGroups {
|
|
|
|
clusterName := DatacenterSNI(dc, cfgSnap)
|
2019-07-02 03:10:51 +00:00
|
|
|
la := makeLoadAssignment(
|
|
|
|
clusterName,
|
|
|
|
0,
|
|
|
|
[]structs.CheckServiceNodes{
|
|
|
|
endpoints,
|
|
|
|
},
|
|
|
|
cfgSnap.Datacenter,
|
|
|
|
)
|
2019-06-18 00:52:01 +00:00
|
|
|
resources = append(resources, la)
|
2019-06-24 19:05:36 +00:00
|
|
|
}
|
2019-06-18 00:52:01 +00:00
|
|
|
|
|
|
|
// generate the endpoints for the local service groups
|
|
|
|
for svc, endpoints := range cfgSnap.MeshGateway.ServiceGroups {
|
|
|
|
clusterName := ServiceSNI(svc, "default", cfgSnap.Datacenter, cfgSnap)
|
2019-07-02 03:10:51 +00:00
|
|
|
la := makeLoadAssignment(
|
|
|
|
clusterName,
|
|
|
|
0,
|
|
|
|
[]structs.CheckServiceNodes{
|
|
|
|
endpoints,
|
|
|
|
},
|
|
|
|
cfgSnap.Datacenter,
|
|
|
|
)
|
2018-10-03 18:18:55 +00:00
|
|
|
resources = append(resources, la)
|
|
|
|
}
|
2019-06-18 00:52:01 +00:00
|
|
|
|
2018-10-03 18:18:55 +00:00
|
|
|
return resources, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func makeEndpoint(clusterName, host string, port int) envoyendpoint.LbEndpoint {
|
|
|
|
return envoyendpoint.LbEndpoint{
|
2019-07-02 03:10:51 +00:00
|
|
|
HostIdentifier: &envoyendpoint.LbEndpoint_Endpoint{
|
|
|
|
Endpoint: &envoyendpoint.Endpoint{
|
|
|
|
Address: makeAddressPtr(host, port),
|
|
|
|
},
|
2018-10-03 18:18:55 +00:00
|
|
|
},
|
2019-07-02 03:10:51 +00:00
|
|
|
}
|
2018-10-03 18:18:55 +00:00
|
|
|
}
|
|
|
|
|
2019-07-02 03:10:51 +00:00
|
|
|
func makeLoadAssignment(
|
|
|
|
clusterName string,
|
|
|
|
overprovisioningFactor int,
|
|
|
|
priorityEndpoints []structs.CheckServiceNodes,
|
|
|
|
localDatacenter string,
|
|
|
|
) *envoy.ClusterLoadAssignment {
|
|
|
|
cla := &envoy.ClusterLoadAssignment{
|
|
|
|
ClusterName: clusterName,
|
|
|
|
Endpoints: make([]envoyendpoint.LocalityLbEndpoints, 0, len(priorityEndpoints)),
|
|
|
|
}
|
|
|
|
if overprovisioningFactor > 0 {
|
|
|
|
cla.Policy = &envoy.ClusterLoadAssignment_Policy{
|
|
|
|
OverprovisioningFactor: makeUint32Value(overprovisioningFactor),
|
2019-03-22 19:37:14 +00:00
|
|
|
}
|
2019-07-02 03:10:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for priority, endpoints := range priorityEndpoints {
|
|
|
|
es := make([]envoyendpoint.LbEndpoint, 0, len(endpoints))
|
|
|
|
|
|
|
|
for _, ep := range endpoints {
|
|
|
|
// TODO (mesh-gateway) - should we respect the translate_wan_addrs configuration here or just always use the wan for cross-dc?
|
|
|
|
addr, port := ep.BestAddress(localDatacenter != ep.Node.Datacenter)
|
|
|
|
healthStatus := envoycore.HealthStatus_HEALTHY
|
|
|
|
weight := 1
|
|
|
|
if ep.Service.Weights != nil {
|
|
|
|
weight = ep.Service.Weights.Passing
|
|
|
|
}
|
2019-03-22 19:37:14 +00:00
|
|
|
|
2019-07-02 03:10:51 +00:00
|
|
|
for _, chk := range ep.Checks {
|
|
|
|
if chk.Status == api.HealthCritical {
|
|
|
|
// This can't actually happen now because health always filters critical
|
|
|
|
// but in the future it may not so set this correctly!
|
|
|
|
healthStatus = envoycore.HealthStatus_UNHEALTHY
|
|
|
|
}
|
|
|
|
if chk.Status == api.HealthWarning && ep.Service.Weights != nil {
|
|
|
|
weight = ep.Service.Weights.Warning
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Make weights fit Envoy's limits. A zero weight means that either Warning
|
|
|
|
// (likely) or Passing (weirdly) weight has been set to 0 effectively making
|
|
|
|
// this instance unhealthy and should not be sent traffic.
|
|
|
|
if weight < 1 {
|
2019-03-22 19:37:14 +00:00
|
|
|
healthStatus = envoycore.HealthStatus_UNHEALTHY
|
2019-07-02 03:10:51 +00:00
|
|
|
weight = 1
|
2019-03-22 19:37:14 +00:00
|
|
|
}
|
2019-07-02 03:10:51 +00:00
|
|
|
if weight > 128 {
|
|
|
|
weight = 128
|
2019-03-22 19:37:14 +00:00
|
|
|
}
|
2019-07-02 03:10:51 +00:00
|
|
|
es = append(es, envoyendpoint.LbEndpoint{
|
|
|
|
HostIdentifier: &envoyendpoint.LbEndpoint_Endpoint{
|
|
|
|
Endpoint: &envoyendpoint.Endpoint{
|
|
|
|
Address: makeAddressPtr(addr, port),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
HealthStatus: healthStatus,
|
|
|
|
LoadBalancingWeight: makeUint32Value(weight),
|
|
|
|
})
|
2019-03-22 19:37:14 +00:00
|
|
|
}
|
2019-07-02 03:10:51 +00:00
|
|
|
|
|
|
|
cla.Endpoints = append(cla.Endpoints, envoyendpoint.LocalityLbEndpoints{
|
|
|
|
Priority: uint32(priority),
|
2018-10-03 18:18:55 +00:00
|
|
|
LbEndpoints: es,
|
2019-07-02 03:10:51 +00:00
|
|
|
})
|
2018-10-03 18:18:55 +00:00
|
|
|
}
|
2019-07-02 03:10:51 +00:00
|
|
|
|
|
|
|
return cla
|
2018-10-03 18:18:55 +00:00
|
|
|
}
|