2020-12-24 19:11:13 +00:00
|
|
|
package proxycfg
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"fmt"
|
|
|
|
"strings"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/mitchellh/mapstructure"
|
|
|
|
|
|
|
|
"github.com/hashicorp/consul/agent/cache"
|
|
|
|
"github.com/hashicorp/consul/agent/connect"
|
|
|
|
|
|
|
|
cachetype "github.com/hashicorp/consul/agent/cache-types"
|
|
|
|
"github.com/hashicorp/consul/agent/structs"
|
|
|
|
)
|
|
|
|
|
|
|
|
type handlerUpstreams struct {
|
|
|
|
handlerState
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *handlerUpstreams) handleUpdateUpstreams(ctx context.Context, u cache.UpdateEvent, snap *ConfigSnapshot) error {
|
|
|
|
if u.Err != nil {
|
|
|
|
return fmt.Errorf("error filling agent cache: %v", u.Err)
|
|
|
|
}
|
|
|
|
|
|
|
|
upstreamsSnapshot := &snap.ConnectProxy.ConfigSnapshotUpstreams
|
|
|
|
if snap.Kind == structs.ServiceKindIngressGateway {
|
|
|
|
upstreamsSnapshot = &snap.IngressGateway.ConfigSnapshotUpstreams
|
|
|
|
}
|
|
|
|
|
|
|
|
switch {
|
|
|
|
case u.CorrelationID == leafWatchID:
|
|
|
|
leaf, ok := u.Result.(*structs.IssuedCert)
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("invalid type for response: %T", u.Result)
|
|
|
|
}
|
|
|
|
upstreamsSnapshot.Leaf = leaf
|
|
|
|
|
|
|
|
case strings.HasPrefix(u.CorrelationID, "discovery-chain:"):
|
|
|
|
resp, ok := u.Result.(*structs.DiscoveryChainResponse)
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("invalid type for response: %T", u.Result)
|
|
|
|
}
|
2022-01-20 16:12:04 +00:00
|
|
|
uidString := strings.TrimPrefix(u.CorrelationID, "discovery-chain:")
|
|
|
|
uid := UpstreamIDFromString(uidString)
|
2021-12-13 22:44:22 +00:00
|
|
|
|
2021-12-13 23:40:04 +00:00
|
|
|
switch snap.Kind {
|
|
|
|
case structs.ServiceKindIngressGateway:
|
2022-01-20 16:12:04 +00:00
|
|
|
if _, ok := snap.IngressGateway.UpstreamsSet[uid]; !ok {
|
2021-12-14 01:07:56 +00:00
|
|
|
// Discovery chain is not associated with a known explicit or implicit upstream so it is purged/skipped.
|
2021-12-13 23:40:04 +00:00
|
|
|
// The associated watch was likely cancelled.
|
2022-01-20 16:12:04 +00:00
|
|
|
delete(upstreamsSnapshot.DiscoveryChain, uid)
|
|
|
|
s.logger.Trace("discovery-chain watch fired for unknown upstream", "upstream", uid)
|
2021-12-13 23:40:04 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
case structs.ServiceKindConnectProxy:
|
2022-01-20 16:12:04 +00:00
|
|
|
explicit := snap.ConnectProxy.UpstreamConfig[uid].HasLocalPortOrSocket()
|
|
|
|
if _, implicit := snap.ConnectProxy.IntentionUpstreams[uid]; !implicit && !explicit {
|
2021-12-14 01:07:56 +00:00
|
|
|
// Discovery chain is not associated with a known explicit or implicit upstream so it is purged/skipped.
|
2021-12-13 23:40:04 +00:00
|
|
|
// The associated watch was likely cancelled.
|
2022-01-20 16:12:04 +00:00
|
|
|
delete(upstreamsSnapshot.DiscoveryChain, uid)
|
|
|
|
s.logger.Trace("discovery-chain watch fired for unknown upstream", "upstream", uid)
|
2021-12-13 23:40:04 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
return fmt.Errorf("discovery-chain watch fired for unsupported kind: %s", snap.Kind)
|
2021-12-13 22:44:22 +00:00
|
|
|
}
|
|
|
|
|
2022-01-20 16:12:04 +00:00
|
|
|
upstreamsSnapshot.DiscoveryChain[uid] = resp.Chain
|
2020-12-24 19:11:13 +00:00
|
|
|
|
2022-01-20 16:12:04 +00:00
|
|
|
if err := s.resetWatchesFromChain(ctx, uid, resp.Chain, upstreamsSnapshot); err != nil {
|
2020-12-24 19:11:13 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
case strings.HasPrefix(u.CorrelationID, "upstream-target:"):
|
|
|
|
resp, ok := u.Result.(*structs.IndexedCheckServiceNodes)
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("invalid type for response: %T", u.Result)
|
|
|
|
}
|
|
|
|
correlationID := strings.TrimPrefix(u.CorrelationID, "upstream-target:")
|
2022-01-20 16:12:04 +00:00
|
|
|
targetID, uidString, ok := removeColonPrefix(correlationID)
|
2020-12-24 19:11:13 +00:00
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("invalid correlation id %q", u.CorrelationID)
|
|
|
|
}
|
|
|
|
|
2022-01-20 16:12:04 +00:00
|
|
|
uid := UpstreamIDFromString(uidString)
|
|
|
|
|
|
|
|
if _, ok := upstreamsSnapshot.WatchedUpstreamEndpoints[uid]; !ok {
|
|
|
|
upstreamsSnapshot.WatchedUpstreamEndpoints[uid] = make(map[string]structs.CheckServiceNodes)
|
2020-12-24 19:11:13 +00:00
|
|
|
}
|
2022-01-20 16:12:04 +00:00
|
|
|
upstreamsSnapshot.WatchedUpstreamEndpoints[uid][targetID] = resp.Nodes
|
2020-12-24 19:11:13 +00:00
|
|
|
|
|
|
|
var passthroughAddrs map[string]ServicePassthroughAddrs
|
|
|
|
|
|
|
|
for _, node := range resp.Nodes {
|
|
|
|
if snap.Proxy.Mode == structs.ProxyModeTransparent && node.Service.Proxy.TransparentProxy.DialedDirectly {
|
|
|
|
if passthroughAddrs == nil {
|
|
|
|
passthroughAddrs = make(map[string]ServicePassthroughAddrs)
|
|
|
|
}
|
|
|
|
|
|
|
|
svc := node.Service.CompoundServiceName()
|
|
|
|
|
|
|
|
// Overwrite the name if it's a connect proxy (as opposed to Connect native).
|
|
|
|
// We don't reference the proxy name directly for things like SNI, but rather the name
|
|
|
|
// of the destination. The enterprise meta of a proxy will always be the same as that of
|
|
|
|
// the destination service, so that remains intact.
|
|
|
|
if node.Service.Kind == structs.ServiceKindConnectProxy {
|
|
|
|
dst := node.Service.Proxy.DestinationServiceName
|
|
|
|
if dst == "" {
|
|
|
|
dst = node.Service.Proxy.DestinationServiceID
|
|
|
|
}
|
|
|
|
svc.Name = dst
|
|
|
|
}
|
|
|
|
|
2021-09-01 14:35:39 +00:00
|
|
|
sni := connect.ServiceSNI(svc.Name, "", svc.NamespaceOrDefault(), svc.PartitionOrDefault(), snap.Datacenter, snap.Roots.TrustDomain)
|
2020-12-24 19:11:13 +00:00
|
|
|
|
2021-06-30 16:16:33 +00:00
|
|
|
spiffeID := connect.SpiffeIDService{
|
|
|
|
Host: snap.Roots.TrustDomain,
|
2021-06-30 22:16:16 +00:00
|
|
|
Partition: svc.PartitionOrDefault(),
|
2021-06-30 16:16:33 +00:00
|
|
|
Namespace: svc.NamespaceOrDefault(),
|
|
|
|
Datacenter: snap.Datacenter,
|
|
|
|
Service: svc.Name,
|
|
|
|
}
|
|
|
|
|
2022-01-20 16:12:04 +00:00
|
|
|
svcUID := NewUpstreamIDFromServiceName(svc)
|
|
|
|
if _, ok := upstreamsSnapshot.PassthroughUpstreams[svcUID]; !ok {
|
|
|
|
upstreamsSnapshot.PassthroughUpstreams[svcUID] = ServicePassthroughAddrs{
|
2021-06-30 16:16:33 +00:00
|
|
|
SNI: sni,
|
|
|
|
SpiffeID: spiffeID,
|
2020-12-24 19:11:13 +00:00
|
|
|
|
|
|
|
// Stored in a set because it's possible for these to be duplicated
|
|
|
|
// when the upstream-target is targeted by multiple discovery chains.
|
|
|
|
Addrs: make(map[string]struct{}),
|
|
|
|
}
|
|
|
|
}
|
2021-10-29 00:41:48 +00:00
|
|
|
|
2021-12-13 20:34:49 +00:00
|
|
|
// Make sure to use an external address when crossing partitions.
|
|
|
|
isRemote := !structs.EqualPartitions(svc.PartitionOrDefault(), s.proxyID.PartitionOrDefault())
|
|
|
|
addr, _ := node.BestAddress(isRemote)
|
|
|
|
|
2022-01-20 16:12:04 +00:00
|
|
|
upstreamsSnapshot.PassthroughUpstreams[NewUpstreamIDFromServiceName(svc)].Addrs[addr] = struct{}{}
|
2020-12-24 19:11:13 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
case strings.HasPrefix(u.CorrelationID, "mesh-gateway:"):
|
|
|
|
resp, ok := u.Result.(*structs.IndexedNodesWithGateways)
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("invalid type for response: %T", u.Result)
|
|
|
|
}
|
|
|
|
correlationID := strings.TrimPrefix(u.CorrelationID, "mesh-gateway:")
|
2022-01-20 16:12:04 +00:00
|
|
|
key, uidString, ok := removeColonPrefix(correlationID)
|
2020-12-24 19:11:13 +00:00
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("invalid correlation id %q", u.CorrelationID)
|
|
|
|
}
|
2022-01-20 16:12:04 +00:00
|
|
|
uid := UpstreamIDFromString(uidString)
|
|
|
|
|
|
|
|
if _, ok = upstreamsSnapshot.WatchedGatewayEndpoints[uid]; !ok {
|
|
|
|
upstreamsSnapshot.WatchedGatewayEndpoints[uid] = make(map[string]structs.CheckServiceNodes)
|
2020-12-24 19:11:13 +00:00
|
|
|
}
|
2022-01-20 16:12:04 +00:00
|
|
|
upstreamsSnapshot.WatchedGatewayEndpoints[uid][key] = resp.Nodes
|
2021-10-22 21:22:55 +00:00
|
|
|
|
2020-12-24 19:11:13 +00:00
|
|
|
default:
|
|
|
|
return fmt.Errorf("unknown correlation ID: %s", u.CorrelationID)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func removeColonPrefix(s string) (string, string, bool) {
|
|
|
|
idx := strings.Index(s, ":")
|
|
|
|
if idx == -1 {
|
|
|
|
return "", "", false
|
|
|
|
}
|
|
|
|
return s[0:idx], s[idx+1:], true
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *handlerUpstreams) resetWatchesFromChain(
|
|
|
|
ctx context.Context,
|
2022-01-20 16:12:04 +00:00
|
|
|
uid UpstreamID,
|
2020-12-24 19:11:13 +00:00
|
|
|
chain *structs.CompiledDiscoveryChain,
|
|
|
|
snap *ConfigSnapshotUpstreams,
|
|
|
|
) error {
|
2022-01-20 16:12:04 +00:00
|
|
|
s.logger.Trace("resetting watches for discovery chain", "id", uid)
|
2020-12-24 19:11:13 +00:00
|
|
|
if chain == nil {
|
|
|
|
return fmt.Errorf("not possible to arrive here with no discovery chain")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Initialize relevant sub maps.
|
2022-01-20 16:12:04 +00:00
|
|
|
if _, ok := snap.WatchedUpstreams[uid]; !ok {
|
|
|
|
snap.WatchedUpstreams[uid] = make(map[string]context.CancelFunc)
|
2020-12-24 19:11:13 +00:00
|
|
|
}
|
2022-01-20 16:12:04 +00:00
|
|
|
if _, ok := snap.WatchedUpstreamEndpoints[uid]; !ok {
|
|
|
|
snap.WatchedUpstreamEndpoints[uid] = make(map[string]structs.CheckServiceNodes)
|
2020-12-24 19:11:13 +00:00
|
|
|
}
|
2022-01-20 16:12:04 +00:00
|
|
|
if _, ok := snap.WatchedGateways[uid]; !ok {
|
|
|
|
snap.WatchedGateways[uid] = make(map[string]context.CancelFunc)
|
2020-12-24 19:11:13 +00:00
|
|
|
}
|
2022-01-20 16:12:04 +00:00
|
|
|
if _, ok := snap.WatchedGatewayEndpoints[uid]; !ok {
|
|
|
|
snap.WatchedGatewayEndpoints[uid] = make(map[string]structs.CheckServiceNodes)
|
2020-12-24 19:11:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// We could invalidate this selectively based on a hash of the relevant
|
|
|
|
// resolver information, but for now just reset anything about this
|
|
|
|
// upstream when the chain changes in any way.
|
|
|
|
//
|
|
|
|
// TODO(rb): content hash based add/remove
|
2022-01-20 16:12:04 +00:00
|
|
|
for targetID, cancelFn := range snap.WatchedUpstreams[uid] {
|
2020-12-24 19:11:13 +00:00
|
|
|
s.logger.Trace("stopping watch of target",
|
2022-01-20 16:12:04 +00:00
|
|
|
"upstream", uid,
|
2020-12-24 19:11:13 +00:00
|
|
|
"chain", chain.ServiceName,
|
|
|
|
"target", targetID,
|
|
|
|
)
|
2022-01-20 16:12:04 +00:00
|
|
|
delete(snap.WatchedUpstreams[uid], targetID)
|
|
|
|
delete(snap.WatchedUpstreamEndpoints[uid], targetID)
|
2020-12-24 19:11:13 +00:00
|
|
|
cancelFn()
|
|
|
|
}
|
|
|
|
|
|
|
|
var (
|
|
|
|
watchedChainEndpoints bool
|
|
|
|
needGateways = make(map[string]struct{})
|
|
|
|
)
|
|
|
|
|
|
|
|
chainID := chain.ID()
|
|
|
|
for _, target := range chain.Targets {
|
|
|
|
if target.ID == chainID {
|
|
|
|
watchedChainEndpoints = true
|
|
|
|
}
|
|
|
|
|
|
|
|
opts := targetWatchOpts{
|
2022-01-20 16:12:04 +00:00
|
|
|
upstreamID: uid,
|
2020-12-24 19:11:13 +00:00
|
|
|
chainID: target.ID,
|
|
|
|
service: target.Service,
|
|
|
|
filter: target.Subset.Filter,
|
|
|
|
datacenter: target.Datacenter,
|
|
|
|
entMeta: target.GetEnterpriseMetadata(),
|
|
|
|
}
|
|
|
|
err := s.watchUpstreamTarget(ctx, snap, opts)
|
|
|
|
if err != nil {
|
2022-01-20 16:12:04 +00:00
|
|
|
return fmt.Errorf("failed to watch target %q for upstream %q", target.ID, uid)
|
2020-12-24 19:11:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// We'll get endpoints from the gateway query, but the health still has
|
|
|
|
// to come from the backing service query.
|
2021-10-22 21:22:55 +00:00
|
|
|
var gk GatewayKey
|
|
|
|
|
2020-12-24 19:11:13 +00:00
|
|
|
switch target.MeshGateway.Mode {
|
|
|
|
case structs.MeshGatewayModeRemote:
|
2021-10-22 21:22:55 +00:00
|
|
|
gk = GatewayKey{
|
|
|
|
Partition: target.Partition,
|
|
|
|
Datacenter: target.Datacenter,
|
|
|
|
}
|
2020-12-24 19:11:13 +00:00
|
|
|
case structs.MeshGatewayModeLocal:
|
2021-10-22 21:22:55 +00:00
|
|
|
gk = GatewayKey{
|
|
|
|
Partition: s.source.NodePartitionOrDefault(),
|
|
|
|
Datacenter: s.source.Datacenter,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if s.source.Datacenter != target.Datacenter || s.proxyID.PartitionOrDefault() != target.Partition {
|
|
|
|
needGateways[gk.String()] = struct{}{}
|
2020-12-24 19:11:13 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the discovery chain's targets do not lead to watching all endpoints
|
|
|
|
// for the upstream, then create a separate watch for those too.
|
|
|
|
// This is needed in transparent mode because if there is some service A that
|
|
|
|
// redirects to service B, the dialing proxy needs to associate A's virtual IP
|
|
|
|
// with A's discovery chain.
|
|
|
|
//
|
|
|
|
// Outside of transparent mode we only watch the chain target, B,
|
|
|
|
// since A is a virtual service and traffic will not be sent to it.
|
|
|
|
if !watchedChainEndpoints && s.proxyCfg.Mode == structs.ProxyModeTransparent {
|
2021-09-07 20:29:32 +00:00
|
|
|
chainEntMeta := structs.NewEnterpriseMetaWithPartition(chain.Partition, chain.Namespace)
|
2020-12-24 19:11:13 +00:00
|
|
|
|
|
|
|
opts := targetWatchOpts{
|
2022-01-20 16:12:04 +00:00
|
|
|
upstreamID: uid,
|
2020-12-24 19:11:13 +00:00
|
|
|
chainID: chainID,
|
|
|
|
service: chain.ServiceName,
|
|
|
|
filter: "",
|
|
|
|
datacenter: chain.Datacenter,
|
|
|
|
entMeta: &chainEntMeta,
|
|
|
|
}
|
|
|
|
err := s.watchUpstreamTarget(ctx, snap, opts)
|
|
|
|
if err != nil {
|
2022-01-20 16:12:04 +00:00
|
|
|
return fmt.Errorf("failed to watch target %q for upstream %q", chainID, uid)
|
2020-12-24 19:11:13 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-10-22 21:22:55 +00:00
|
|
|
for key := range needGateways {
|
2022-01-20 16:12:04 +00:00
|
|
|
if _, ok := snap.WatchedGateways[uid][key]; ok {
|
2020-12-24 19:11:13 +00:00
|
|
|
continue
|
|
|
|
}
|
2021-10-22 21:22:55 +00:00
|
|
|
gwKey := gatewayKeyFromString(key)
|
2020-12-24 19:11:13 +00:00
|
|
|
|
2021-10-22 21:22:55 +00:00
|
|
|
s.logger.Trace("initializing watch of mesh gateway",
|
2022-01-20 16:12:04 +00:00
|
|
|
"upstream", uid,
|
2020-12-24 19:11:13 +00:00
|
|
|
"chain", chain.ServiceName,
|
2021-10-22 21:22:55 +00:00
|
|
|
"datacenter", gwKey.Datacenter,
|
|
|
|
"partition", gwKey.Partition,
|
2020-12-24 19:11:13 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
ctx, cancel := context.WithCancel(ctx)
|
2021-10-22 21:22:55 +00:00
|
|
|
opts := gatewayWatchOpts{
|
|
|
|
notifier: s.cache,
|
|
|
|
notifyCh: s.ch,
|
|
|
|
source: *s.source,
|
|
|
|
token: s.token,
|
|
|
|
key: gwKey,
|
2022-01-20 16:12:04 +00:00
|
|
|
upstreamID: uid,
|
2021-10-22 21:22:55 +00:00
|
|
|
}
|
|
|
|
err := watchMeshGateway(ctx, opts)
|
2020-12-24 19:11:13 +00:00
|
|
|
if err != nil {
|
|
|
|
cancel()
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2022-01-20 16:12:04 +00:00
|
|
|
snap.WatchedGateways[uid][key] = cancel
|
2020-12-24 19:11:13 +00:00
|
|
|
}
|
|
|
|
|
2022-01-20 16:12:04 +00:00
|
|
|
for key, cancelFn := range snap.WatchedGateways[uid] {
|
2021-10-22 21:22:55 +00:00
|
|
|
if _, ok := needGateways[key]; ok {
|
2020-12-24 19:11:13 +00:00
|
|
|
continue
|
|
|
|
}
|
2021-10-22 21:22:55 +00:00
|
|
|
gwKey := gatewayKeyFromString(key)
|
|
|
|
|
|
|
|
s.logger.Trace("stopping watch of mesh gateway",
|
2022-01-20 16:12:04 +00:00
|
|
|
"upstream", uid,
|
2020-12-24 19:11:13 +00:00
|
|
|
"chain", chain.ServiceName,
|
2021-10-22 21:22:55 +00:00
|
|
|
"datacenter", gwKey.Datacenter,
|
|
|
|
"partition", gwKey.Partition,
|
2020-12-24 19:11:13 +00:00
|
|
|
)
|
2022-01-20 16:12:04 +00:00
|
|
|
delete(snap.WatchedGateways[uid], key)
|
|
|
|
delete(snap.WatchedGatewayEndpoints[uid], key)
|
2020-12-24 19:11:13 +00:00
|
|
|
cancelFn()
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
type targetWatchOpts struct {
|
2022-01-20 16:12:04 +00:00
|
|
|
upstreamID UpstreamID
|
2020-12-24 19:11:13 +00:00
|
|
|
chainID string
|
|
|
|
service string
|
|
|
|
filter string
|
|
|
|
datacenter string
|
|
|
|
entMeta *structs.EnterpriseMeta
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *handlerUpstreams) watchUpstreamTarget(ctx context.Context, snap *ConfigSnapshotUpstreams, opts targetWatchOpts) error {
|
|
|
|
s.logger.Trace("initializing watch of target",
|
|
|
|
"upstream", opts.upstreamID,
|
|
|
|
"chain", opts.service,
|
|
|
|
"target", opts.chainID,
|
|
|
|
)
|
|
|
|
|
|
|
|
var finalMeta structs.EnterpriseMeta
|
|
|
|
finalMeta.Merge(opts.entMeta)
|
|
|
|
|
2022-01-20 16:12:04 +00:00
|
|
|
correlationID := "upstream-target:" + opts.chainID + ":" + opts.upstreamID.String()
|
2020-12-24 19:11:13 +00:00
|
|
|
|
|
|
|
ctx, cancel := context.WithCancel(ctx)
|
|
|
|
err := s.health.Notify(ctx, structs.ServiceSpecificRequest{
|
|
|
|
Datacenter: opts.datacenter,
|
|
|
|
QueryOptions: structs.QueryOptions{
|
|
|
|
Token: s.token,
|
|
|
|
Filter: opts.filter,
|
|
|
|
},
|
|
|
|
ServiceName: opts.service,
|
|
|
|
Connect: true,
|
|
|
|
// Note that Identifier doesn't type-prefix for service any more as it's
|
|
|
|
// the default and makes metrics and other things much cleaner. It's
|
|
|
|
// simpler for us if we have the type to make things unambiguous.
|
|
|
|
Source: *s.source,
|
|
|
|
EnterpriseMeta: finalMeta,
|
|
|
|
}, correlationID, s.ch)
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
cancel()
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
snap.WatchedUpstreams[opts.upstreamID][opts.chainID] = cancel
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
type discoveryChainWatchOpts struct {
|
2022-01-20 16:12:04 +00:00
|
|
|
id UpstreamID
|
2020-12-24 19:11:13 +00:00
|
|
|
name string
|
|
|
|
namespace string
|
2021-08-20 16:57:45 +00:00
|
|
|
partition string
|
2020-12-24 19:11:13 +00:00
|
|
|
datacenter string
|
|
|
|
cfg reducedUpstreamConfig
|
|
|
|
meshGateway structs.MeshGatewayConfig
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *handlerUpstreams) watchDiscoveryChain(ctx context.Context, snap *ConfigSnapshot, opts discoveryChainWatchOpts) error {
|
|
|
|
if _, ok := snap.ConnectProxy.WatchedDiscoveryChains[opts.id]; ok {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx, cancel := context.WithCancel(ctx)
|
|
|
|
err := s.cache.Notify(ctx, cachetype.CompiledDiscoveryChainName, &structs.DiscoveryChainRequest{
|
|
|
|
Datacenter: s.source.Datacenter,
|
|
|
|
QueryOptions: structs.QueryOptions{Token: s.token},
|
|
|
|
Name: opts.name,
|
|
|
|
EvaluateInDatacenter: opts.datacenter,
|
|
|
|
EvaluateInNamespace: opts.namespace,
|
2021-09-07 20:29:32 +00:00
|
|
|
EvaluateInPartition: opts.partition,
|
2020-12-24 19:11:13 +00:00
|
|
|
OverrideProtocol: opts.cfg.Protocol,
|
|
|
|
OverrideConnectTimeout: opts.cfg.ConnectTimeout(),
|
|
|
|
OverrideMeshGateway: opts.meshGateway,
|
2022-01-20 16:12:04 +00:00
|
|
|
}, "discovery-chain:"+opts.id.String(), s.ch)
|
2020-12-24 19:11:13 +00:00
|
|
|
if err != nil {
|
|
|
|
cancel()
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
switch s.kind {
|
|
|
|
case structs.ServiceKindIngressGateway:
|
|
|
|
snap.IngressGateway.WatchedDiscoveryChains[opts.id] = cancel
|
|
|
|
case structs.ServiceKindConnectProxy:
|
|
|
|
snap.ConnectProxy.WatchedDiscoveryChains[opts.id] = cancel
|
|
|
|
default:
|
|
|
|
cancel()
|
|
|
|
return fmt.Errorf("unsupported kind %s", s.kind)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// reducedUpstreamConfig represents the basic opaque config values that are now
|
|
|
|
// managed with the discovery chain but for backwards compatibility reasons
|
|
|
|
// should still affect how the proxy is configured.
|
|
|
|
//
|
|
|
|
// The full-blown config is agent/xds.UpstreamConfig
|
|
|
|
type reducedUpstreamConfig struct {
|
|
|
|
Protocol string `mapstructure:"protocol"`
|
|
|
|
ConnectTimeoutMs int `mapstructure:"connect_timeout_ms"`
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *reducedUpstreamConfig) ConnectTimeout() time.Duration {
|
|
|
|
return time.Duration(c.ConnectTimeoutMs) * time.Millisecond
|
|
|
|
}
|
|
|
|
|
|
|
|
func parseReducedUpstreamConfig(m map[string]interface{}) (reducedUpstreamConfig, error) {
|
|
|
|
var cfg reducedUpstreamConfig
|
|
|
|
err := mapstructure.WeakDecode(m, &cfg)
|
|
|
|
return cfg, err
|
|
|
|
}
|