2021-04-29 18:54:05 +00:00
|
|
|
|
package xds
|
|
|
|
|
|
|
|
|
|
import (
|
|
|
|
|
"crypto/sha256"
|
|
|
|
|
"encoding/hex"
|
2023-01-18 18:33:21 +00:00
|
|
|
|
"errors"
|
2021-04-29 18:54:05 +00:00
|
|
|
|
"fmt"
|
2022-10-12 19:17:58 +00:00
|
|
|
|
"sync"
|
2021-04-29 18:54:05 +00:00
|
|
|
|
"sync/atomic"
|
|
|
|
|
"time"
|
|
|
|
|
|
2022-09-09 14:02:01 +00:00
|
|
|
|
"github.com/armon/go-metrics"
|
2021-04-29 18:54:05 +00:00
|
|
|
|
envoy_cluster_v3 "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3"
|
|
|
|
|
envoy_config_core_v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
|
|
|
|
|
envoy_endpoint_v3 "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3"
|
|
|
|
|
envoy_listener_v3 "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3"
|
|
|
|
|
envoy_route_v3 "github.com/envoyproxy/go-control-plane/envoy/config/route/v3"
|
|
|
|
|
envoy_discovery_v3 "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3"
|
|
|
|
|
|
|
|
|
|
"github.com/hashicorp/go-hclog"
|
|
|
|
|
"google.golang.org/grpc/codes"
|
|
|
|
|
"google.golang.org/grpc/status"
|
2023-01-11 14:39:10 +00:00
|
|
|
|
"google.golang.org/protobuf/proto"
|
|
|
|
|
"google.golang.org/protobuf/types/known/anypb"
|
2021-04-29 18:54:05 +00:00
|
|
|
|
|
2022-07-13 15:33:48 +00:00
|
|
|
|
external "github.com/hashicorp/consul/agent/grpc-external"
|
2023-01-18 18:33:21 +00:00
|
|
|
|
"github.com/hashicorp/consul/agent/grpc-external/limiter"
|
2021-04-29 18:54:05 +00:00
|
|
|
|
"github.com/hashicorp/consul/agent/proxycfg"
|
|
|
|
|
"github.com/hashicorp/consul/agent/structs"
|
2022-03-08 19:37:24 +00:00
|
|
|
|
"github.com/hashicorp/consul/agent/xds/xdscommon"
|
2021-04-29 18:54:05 +00:00
|
|
|
|
"github.com/hashicorp/consul/logging"
|
|
|
|
|
)
|
|
|
|
|
|
2022-09-09 14:02:01 +00:00
|
|
|
|
var errOverwhelmed = status.Error(codes.ResourceExhausted, "this server has too many xDS streams open, please try another")
|
|
|
|
|
|
2022-02-10 22:37:36 +00:00
|
|
|
|
type deltaRecvResponse int
|
|
|
|
|
|
|
|
|
|
const (
|
|
|
|
|
deltaRecvResponseNack deltaRecvResponse = iota
|
|
|
|
|
deltaRecvResponseAck
|
|
|
|
|
deltaRecvNewSubscription
|
|
|
|
|
deltaRecvUnknownType
|
|
|
|
|
)
|
|
|
|
|
|
2021-04-29 18:54:05 +00:00
|
|
|
|
// ADSDeltaStream is a shorter way of referring to this thing...
|
|
|
|
|
type ADSDeltaStream = envoy_discovery_v3.AggregatedDiscoveryService_DeltaAggregatedResourcesServer
|
|
|
|
|
|
|
|
|
|
// DeltaAggregatedResources implements envoy_discovery_v3.AggregatedDiscoveryServiceServer
|
|
|
|
|
func (s *Server) DeltaAggregatedResources(stream ADSDeltaStream) error {
|
2023-01-16 17:31:56 +00:00
|
|
|
|
defer s.activeStreams.Increment(stream.Context())()
|
2021-05-14 18:59:13 +00:00
|
|
|
|
|
2021-04-29 18:54:05 +00:00
|
|
|
|
// a channel for receiving incoming requests
|
|
|
|
|
reqCh := make(chan *envoy_discovery_v3.DeltaDiscoveryRequest)
|
|
|
|
|
reqStop := int32(0)
|
|
|
|
|
go func() {
|
|
|
|
|
for {
|
|
|
|
|
req, err := stream.Recv()
|
|
|
|
|
if atomic.LoadInt32(&reqStop) != 0 {
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
if err != nil {
|
|
|
|
|
s.Logger.Error("Error receiving new DeltaDiscoveryRequest; closing request channel", "error", err)
|
|
|
|
|
close(reqCh)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
reqCh <- req
|
|
|
|
|
}
|
|
|
|
|
}()
|
|
|
|
|
|
|
|
|
|
err := s.processDelta(stream, reqCh)
|
|
|
|
|
if err != nil {
|
|
|
|
|
s.Logger.Error("Error handling ADS delta stream", "xdsVersion", "v3", "error", err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// prevents writing to a closed channel if send failed on blocked recv
|
|
|
|
|
atomic.StoreInt32(&reqStop, 1)
|
|
|
|
|
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
const (
|
|
|
|
|
stateDeltaInit int = iota
|
|
|
|
|
stateDeltaPendingInitialConfig
|
|
|
|
|
stateDeltaRunning
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
func (s *Server) processDelta(stream ADSDeltaStream, reqCh <-chan *envoy_discovery_v3.DeltaDiscoveryRequest) error {
|
2022-08-11 09:19:36 +00:00
|
|
|
|
// Handle invalid ACL tokens up-front.
|
|
|
|
|
if _, err := s.authenticate(stream.Context()); err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
|
2021-04-29 18:54:05 +00:00
|
|
|
|
// Loop state
|
|
|
|
|
var (
|
|
|
|
|
cfgSnap *proxycfg.ConfigSnapshot
|
|
|
|
|
node *envoy_config_core_v3.Node
|
|
|
|
|
stateCh <-chan *proxycfg.ConfigSnapshot
|
2023-01-18 18:33:21 +00:00
|
|
|
|
drainCh limiter.SessionTerminatedChan
|
2021-04-29 18:54:05 +00:00
|
|
|
|
watchCancel func()
|
|
|
|
|
proxyID structs.ServiceID
|
|
|
|
|
nonce uint64 // xDS requires a unique nonce to correlate response/request pairs
|
|
|
|
|
ready bool // set to true after the first snapshot arrives
|
2022-10-12 19:17:58 +00:00
|
|
|
|
|
|
|
|
|
streamStartTime = time.Now()
|
|
|
|
|
streamStartOnce sync.Once
|
2021-04-29 18:54:05 +00:00
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
var (
|
|
|
|
|
// resourceMap is the SoTW we are incrementally attempting to sync to envoy.
|
|
|
|
|
//
|
|
|
|
|
// type => name => proto
|
2022-03-08 19:37:24 +00:00
|
|
|
|
resourceMap = xdscommon.EmptyIndexedResources()
|
2021-04-29 18:54:05 +00:00
|
|
|
|
|
|
|
|
|
// currentVersions is the the xDS versioning represented by Resources.
|
|
|
|
|
//
|
|
|
|
|
// type => name => version (as consul knows right now)
|
|
|
|
|
currentVersions = make(map[string]map[string]string)
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
generator := newResourceGenerator(
|
|
|
|
|
s.Logger.Named(logging.XDS).With("xdsVersion", "v3"),
|
|
|
|
|
s.CfgFetcher,
|
|
|
|
|
true,
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
// need to run a small state machine to get through initial authentication.
|
|
|
|
|
var state = stateDeltaInit
|
|
|
|
|
|
|
|
|
|
// Configure handlers for each type of request we currently care about.
|
|
|
|
|
handlers := map[string]*xDSDeltaType{
|
2022-03-08 19:37:24 +00:00
|
|
|
|
xdscommon.ListenerType: newDeltaType(generator, stream, xdscommon.ListenerType, func(kind structs.ServiceKind) bool {
|
2021-04-29 18:54:05 +00:00
|
|
|
|
return cfgSnap.Kind == structs.ServiceKindIngressGateway
|
|
|
|
|
}),
|
2022-03-08 19:37:24 +00:00
|
|
|
|
xdscommon.RouteType: newDeltaType(generator, stream, xdscommon.RouteType, func(kind structs.ServiceKind) bool {
|
2021-04-29 18:54:05 +00:00
|
|
|
|
return cfgSnap.Kind == structs.ServiceKindIngressGateway
|
|
|
|
|
}),
|
2022-03-08 19:37:24 +00:00
|
|
|
|
xdscommon.ClusterType: newDeltaType(generator, stream, xdscommon.ClusterType, func(kind structs.ServiceKind) bool {
|
2021-04-29 18:54:05 +00:00
|
|
|
|
// Mesh, Ingress, and Terminating gateways are allowed to inform CDS of
|
|
|
|
|
// no clusters.
|
|
|
|
|
return cfgSnap.Kind == structs.ServiceKindMeshGateway ||
|
|
|
|
|
cfgSnap.Kind == structs.ServiceKindTerminatingGateway ||
|
|
|
|
|
cfgSnap.Kind == structs.ServiceKindIngressGateway
|
|
|
|
|
}),
|
2022-03-08 19:37:24 +00:00
|
|
|
|
xdscommon.EndpointType: newDeltaType(generator, stream, xdscommon.EndpointType, nil),
|
2021-04-29 18:54:05 +00:00
|
|
|
|
}
|
|
|
|
|
|
2021-06-14 22:20:27 +00:00
|
|
|
|
// Endpoints are stored within a Cluster (and Routes
|
|
|
|
|
// are stored within a Listener) so whenever the
|
|
|
|
|
// enclosing resource is updated the inner resource
|
|
|
|
|
// list is cleared implicitly.
|
|
|
|
|
//
|
|
|
|
|
// When this happens we should update our local
|
|
|
|
|
// representation of envoy state to force an update.
|
|
|
|
|
//
|
|
|
|
|
// see: https://github.com/envoyproxy/envoy/issues/13009
|
2022-11-08 01:10:42 +00:00
|
|
|
|
handlers[xdscommon.ListenerType].deltaChild = &xDSDeltaChild{
|
|
|
|
|
childType: handlers[xdscommon.RouteType],
|
|
|
|
|
childrenNames: make(map[string][]string),
|
|
|
|
|
}
|
|
|
|
|
handlers[xdscommon.ClusterType].deltaChild = &xDSDeltaChild{
|
|
|
|
|
childType: handlers[xdscommon.EndpointType],
|
|
|
|
|
childrenNames: make(map[string][]string),
|
|
|
|
|
}
|
2021-06-14 22:20:27 +00:00
|
|
|
|
|
2021-04-29 18:54:05 +00:00
|
|
|
|
var authTimer <-chan time.Time
|
|
|
|
|
extendAuthTimer := func() {
|
|
|
|
|
authTimer = time.After(s.AuthCheckFrequency)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
checkStreamACLs := func(cfgSnap *proxycfg.ConfigSnapshot) error {
|
2021-08-13 15:53:19 +00:00
|
|
|
|
return s.authorize(stream.Context(), cfgSnap)
|
2021-04-29 18:54:05 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for {
|
|
|
|
|
select {
|
2023-01-18 18:33:21 +00:00
|
|
|
|
case <-drainCh:
|
2022-09-09 14:02:01 +00:00
|
|
|
|
generator.Logger.Debug("draining stream to rebalance load")
|
|
|
|
|
metrics.IncrCounter([]string{"xds", "server", "streamDrained"}, 1)
|
|
|
|
|
return errOverwhelmed
|
2021-04-29 18:54:05 +00:00
|
|
|
|
case <-authTimer:
|
|
|
|
|
// It's been too long since a Discovery{Request,Response} so recheck ACLs.
|
|
|
|
|
if err := checkStreamACLs(cfgSnap); err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
extendAuthTimer()
|
|
|
|
|
|
|
|
|
|
case req, ok := <-reqCh:
|
|
|
|
|
if !ok {
|
|
|
|
|
// reqCh is closed when stream.Recv errors which is how we detect client
|
|
|
|
|
// going away. AFAICT the stream.Context() is only canceled once the
|
|
|
|
|
// RPC method returns which it can't until we return from this one so
|
|
|
|
|
// there's no point in blocking on that.
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
generator.logTraceRequest("Incremental xDS v3", req)
|
|
|
|
|
|
|
|
|
|
if req.TypeUrl == "" {
|
|
|
|
|
return status.Errorf(codes.InvalidArgument, "type URL is required for ADS")
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if node == nil && req.Node != nil {
|
|
|
|
|
node = req.Node
|
|
|
|
|
var err error
|
|
|
|
|
generator.ProxyFeatures, err = determineSupportedProxyFeatures(req.Node)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return status.Errorf(codes.InvalidArgument, err.Error())
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2022-01-25 17:24:27 +00:00
|
|
|
|
if handler, ok := handlers[req.TypeUrl]; ok {
|
2022-02-10 22:37:36 +00:00
|
|
|
|
switch handler.Recv(req, generator.ProxyFeatures) {
|
|
|
|
|
case deltaRecvNewSubscription:
|
2022-01-25 17:24:27 +00:00
|
|
|
|
generator.Logger.Trace("subscribing to type", "typeUrl", req.TypeUrl)
|
2022-02-10 22:37:36 +00:00
|
|
|
|
|
|
|
|
|
case deltaRecvResponseNack:
|
|
|
|
|
generator.Logger.Trace("got nack response for type", "typeUrl", req.TypeUrl)
|
|
|
|
|
|
|
|
|
|
// There is no reason to believe that generating new xDS resources from the same snapshot
|
|
|
|
|
// would lead to an ACK from Envoy. Instead we continue to the top of this for loop and wait
|
|
|
|
|
// for a new request or snapshot.
|
|
|
|
|
continue
|
2022-01-25 17:24:27 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2022-08-11 09:19:36 +00:00
|
|
|
|
case cs, ok := <-stateCh:
|
|
|
|
|
if !ok {
|
|
|
|
|
// stateCh is closed either when *we* cancel the watch (on-exit via defer)
|
|
|
|
|
// or by the proxycfg.Manager when an irrecoverable error is encountered
|
|
|
|
|
// such as the ACL token getting deleted.
|
|
|
|
|
//
|
|
|
|
|
// We know for sure that this is the latter case, because in the former we
|
|
|
|
|
// would've already exited this loop.
|
|
|
|
|
return status.Error(codes.Aborted, "xDS stream terminated due to an irrecoverable error, please try again")
|
|
|
|
|
}
|
|
|
|
|
cfgSnap = cs
|
|
|
|
|
|
2021-04-29 18:54:05 +00:00
|
|
|
|
newRes, err := generator.allResourcesFromSnapshot(cfgSnap)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return status.Errorf(codes.Unavailable, "failed to generate all xDS resources from the snapshot: %v", err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// index and hash the xDS structures
|
|
|
|
|
newResourceMap := indexResources(generator.Logger, newRes)
|
|
|
|
|
|
2021-09-21 14:58:56 +00:00
|
|
|
|
if s.ResourceMapMutateFn != nil {
|
|
|
|
|
s.ResourceMapMutateFn(newResourceMap)
|
|
|
|
|
}
|
|
|
|
|
|
2023-01-06 17:13:40 +00:00
|
|
|
|
if err = s.applyEnvoyExtensions(newResourceMap, cfgSnap); err != nil {
|
|
|
|
|
// err is already the result of calling status.Errorf
|
|
|
|
|
return err
|
2022-03-15 14:07:40 +00:00
|
|
|
|
}
|
|
|
|
|
|
2021-06-14 22:20:27 +00:00
|
|
|
|
if err := populateChildIndexMap(newResourceMap); err != nil {
|
|
|
|
|
return status.Errorf(codes.Unavailable, "failed to index xDS resource versions: %v", err)
|
|
|
|
|
}
|
|
|
|
|
|
2021-04-29 18:54:05 +00:00
|
|
|
|
newVersions, err := computeResourceVersions(newResourceMap)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return status.Errorf(codes.Unavailable, "failed to compute xDS resource versions: %v", err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
resourceMap = newResourceMap
|
|
|
|
|
currentVersions = newVersions
|
|
|
|
|
ready = true
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Trigger state machine
|
|
|
|
|
switch state {
|
|
|
|
|
case stateDeltaInit:
|
|
|
|
|
if node == nil {
|
|
|
|
|
// This can't happen (tm) since stateCh is nil until after the first req
|
|
|
|
|
// is received but lets not panic about it.
|
|
|
|
|
continue
|
|
|
|
|
}
|
2022-05-27 11:38:52 +00:00
|
|
|
|
|
|
|
|
|
nodeName := node.GetMetadata().GetFields()["node_name"].GetStringValue()
|
|
|
|
|
if nodeName == "" {
|
|
|
|
|
nodeName = s.NodeName
|
|
|
|
|
}
|
|
|
|
|
|
2021-04-29 18:54:05 +00:00
|
|
|
|
// Start authentication process, we need the proxyID
|
|
|
|
|
proxyID = structs.NewServiceID(node.Id, parseEnterpriseMeta(node))
|
|
|
|
|
|
|
|
|
|
// Start watching config for that proxy
|
2022-05-27 11:38:52 +00:00
|
|
|
|
var err error
|
2022-09-28 16:56:59 +00:00
|
|
|
|
options, err := external.QueryOptionsFromContext(stream.Context())
|
|
|
|
|
if err != nil {
|
|
|
|
|
return status.Errorf(codes.Internal, "failed to watch proxy service: %s", err)
|
|
|
|
|
}
|
|
|
|
|
|
2023-01-18 18:33:21 +00:00
|
|
|
|
stateCh, drainCh, watchCancel, err = s.CfgSrc.Watch(proxyID, nodeName, options.Token)
|
|
|
|
|
switch {
|
|
|
|
|
case errors.Is(err, limiter.ErrCapacityReached):
|
|
|
|
|
return errOverwhelmed
|
|
|
|
|
case err != nil:
|
2022-05-27 11:38:52 +00:00
|
|
|
|
return status.Errorf(codes.Internal, "failed to watch proxy service: %s", err)
|
|
|
|
|
}
|
2021-04-29 18:54:05 +00:00
|
|
|
|
// Note that in this case we _intend_ the defer to only be triggered when
|
|
|
|
|
// this whole process method ends (i.e. when streaming RPC aborts) not at
|
|
|
|
|
// the end of the current loop iteration. We have to do it in the loop
|
|
|
|
|
// here since we can't start watching until we get to this state in the
|
|
|
|
|
// state machine.
|
|
|
|
|
defer watchCancel()
|
|
|
|
|
|
|
|
|
|
generator.Logger = generator.Logger.With("service_id", proxyID.String()) // enhance future logs
|
|
|
|
|
|
|
|
|
|
generator.Logger.Trace("watching proxy, pending initial proxycfg snapshot for xDS")
|
|
|
|
|
|
|
|
|
|
// Now wait for the config so we can check ACL
|
|
|
|
|
state = stateDeltaPendingInitialConfig
|
|
|
|
|
case stateDeltaPendingInitialConfig:
|
|
|
|
|
if cfgSnap == nil {
|
|
|
|
|
// Nothing we can do until we get the initial config
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Got config, try to authenticate next.
|
|
|
|
|
state = stateDeltaRunning
|
|
|
|
|
|
|
|
|
|
// Upgrade the logger
|
|
|
|
|
switch cfgSnap.Kind {
|
|
|
|
|
case structs.ServiceKindConnectProxy:
|
|
|
|
|
case structs.ServiceKindTerminatingGateway:
|
|
|
|
|
generator.Logger = generator.Logger.Named(logging.TerminatingGateway)
|
|
|
|
|
case structs.ServiceKindMeshGateway:
|
|
|
|
|
generator.Logger = generator.Logger.Named(logging.MeshGateway)
|
|
|
|
|
case structs.ServiceKindIngressGateway:
|
|
|
|
|
generator.Logger = generator.Logger.Named(logging.IngressGateway)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
generator.Logger.Trace("Got initial config snapshot")
|
|
|
|
|
|
|
|
|
|
// Lets actually process the config we just got or we'll mis responding
|
|
|
|
|
fallthrough
|
|
|
|
|
case stateDeltaRunning:
|
|
|
|
|
// Check ACLs on every Discovery{Request,Response}.
|
|
|
|
|
if err := checkStreamACLs(cfgSnap); err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
// For the first time through the state machine, this is when the
|
|
|
|
|
// timer is first started.
|
|
|
|
|
extendAuthTimer()
|
|
|
|
|
|
|
|
|
|
if !ready {
|
|
|
|
|
generator.Logger.Trace("Skipping delta computation because we haven't gotten a snapshot yet")
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
generator.Logger.Trace("Invoking all xDS resource handlers and sending changed data if there are any")
|
|
|
|
|
|
2022-10-12 19:17:58 +00:00
|
|
|
|
streamStartOnce.Do(func() {
|
|
|
|
|
metrics.MeasureSince([]string{"xds", "server", "streamStart"}, streamStartTime)
|
|
|
|
|
})
|
|
|
|
|
|
2021-04-29 18:54:05 +00:00
|
|
|
|
for _, op := range xDSUpdateOrder {
|
2022-11-08 01:10:42 +00:00
|
|
|
|
if op.TypeUrl == xdscommon.ListenerType || op.TypeUrl == xdscommon.RouteType {
|
|
|
|
|
if clusterHandler := handlers[xdscommon.ClusterType]; clusterHandler.registered && len(clusterHandler.pendingUpdates) > 0 {
|
|
|
|
|
generator.Logger.Trace("Skipping delta computation for resource because there are dependent updates pending",
|
|
|
|
|
"typeUrl", op.TypeUrl, "dependent", xdscommon.ClusterType)
|
|
|
|
|
|
|
|
|
|
// Receiving an ACK from Envoy will unblock the select statement above,
|
|
|
|
|
// and re-trigger an attempt to send these skipped updates.
|
|
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
if endpointHandler := handlers[xdscommon.EndpointType]; endpointHandler.registered && len(endpointHandler.pendingUpdates) > 0 {
|
|
|
|
|
generator.Logger.Trace("Skipping delta computation for resource because there are dependent updates pending",
|
|
|
|
|
"typeUrl", op.TypeUrl, "dependent", xdscommon.EndpointType)
|
|
|
|
|
|
|
|
|
|
// Receiving an ACK from Envoy will unblock the select statement above,
|
|
|
|
|
// and re-trigger an attempt to send these skipped updates.
|
|
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
err, _ := handlers[op.TypeUrl].SendIfNew(
|
2021-04-29 18:54:05 +00:00
|
|
|
|
cfgSnap.Kind,
|
|
|
|
|
currentVersions[op.TypeUrl],
|
|
|
|
|
resourceMap,
|
|
|
|
|
&nonce,
|
|
|
|
|
op.Upsert,
|
|
|
|
|
op.Remove,
|
|
|
|
|
)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return status.Errorf(codes.Unavailable,
|
|
|
|
|
"failed to send %sreply for type %q: %v",
|
|
|
|
|
op.errorLogNameReplyPrefix(),
|
|
|
|
|
op.TypeUrl, err)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2023-01-06 17:13:40 +00:00
|
|
|
|
func (s *Server) applyEnvoyExtensions(resources *xdscommon.IndexedResources, cfgSnap *proxycfg.ConfigSnapshot) error {
|
|
|
|
|
var err error
|
|
|
|
|
cfgs := xdscommon.GetExtensionConfigurations(cfgSnap)
|
|
|
|
|
|
|
|
|
|
for _, extensions := range cfgs {
|
|
|
|
|
for _, ext := range extensions {
|
|
|
|
|
logFn := s.Logger.Warn
|
|
|
|
|
if ext.EnvoyExtension.Required {
|
|
|
|
|
logFn = s.Logger.Error
|
|
|
|
|
}
|
|
|
|
|
extensionContext := []interface{}{
|
|
|
|
|
"extension", ext.EnvoyExtension.Name,
|
|
|
|
|
"service", ext.ServiceName.Name,
|
|
|
|
|
"namespace", ext.ServiceName.Namespace,
|
|
|
|
|
"partition", ext.ServiceName.Partition,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
extension, ok := GetBuiltInExtension(ext)
|
|
|
|
|
if !ok {
|
|
|
|
|
logFn("failed to find extension", extensionContext...)
|
|
|
|
|
|
|
|
|
|
if ext.EnvoyExtension.Required {
|
|
|
|
|
return status.Errorf(codes.Unavailable, "failed to find extension %q for service %q", ext.EnvoyExtension.Name, ext.ServiceName.Name)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
err = extension.Validate(ext)
|
|
|
|
|
if err != nil {
|
|
|
|
|
extensionContext = append(extensionContext, "error", err)
|
|
|
|
|
logFn("failed to validate extension arguments", extensionContext...)
|
|
|
|
|
if ext.EnvoyExtension.Required {
|
|
|
|
|
return status.Errorf(codes.Unavailable, "failed to validate arguments for extension %q for service %q", ext.EnvoyExtension.Name, ext.ServiceName.Name)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
resources, err = extension.Extend(resources, ext)
|
|
|
|
|
if err == nil {
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
logFn("failed to apply envoy extension", extensionContext...)
|
|
|
|
|
if ext.EnvoyExtension.Required {
|
|
|
|
|
return status.Errorf(codes.Unavailable, "failed to patch xDS resources in the %q plugin: %v", ext.EnvoyExtension.Name, err)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
2021-04-29 18:54:05 +00:00
|
|
|
|
var xDSUpdateOrder = []xDSUpdateOperation{
|
|
|
|
|
// 1. CDS updates (if any) must always be pushed first.
|
2022-03-08 19:37:24 +00:00
|
|
|
|
{TypeUrl: xdscommon.ClusterType, Upsert: true},
|
2021-04-29 18:54:05 +00:00
|
|
|
|
// 2. EDS updates (if any) must arrive after CDS updates for the respective clusters.
|
2022-03-08 19:37:24 +00:00
|
|
|
|
{TypeUrl: xdscommon.EndpointType, Upsert: true},
|
2021-04-29 18:54:05 +00:00
|
|
|
|
// 3. LDS updates must arrive after corresponding CDS/EDS updates.
|
2022-03-08 19:37:24 +00:00
|
|
|
|
{TypeUrl: xdscommon.ListenerType, Upsert: true, Remove: true},
|
2021-04-29 18:54:05 +00:00
|
|
|
|
// 4. RDS updates related to the newly added listeners must arrive after CDS/EDS/LDS updates.
|
2022-03-08 19:37:24 +00:00
|
|
|
|
{TypeUrl: xdscommon.RouteType, Upsert: true, Remove: true},
|
2021-04-29 18:54:05 +00:00
|
|
|
|
// 5. (NOT IMPLEMENTED YET IN CONSUL) VHDS updates (if any) related to the newly added RouteConfigurations must arrive after RDS updates.
|
|
|
|
|
// {},
|
|
|
|
|
// 6. Stale CDS clusters and related EDS endpoints (ones no longer being referenced) can then be removed.
|
2022-03-08 19:37:24 +00:00
|
|
|
|
{TypeUrl: xdscommon.ClusterType, Remove: true},
|
|
|
|
|
{TypeUrl: xdscommon.EndpointType, Remove: true},
|
2021-04-29 18:54:05 +00:00
|
|
|
|
// xDS updates can be pushed independently if no new
|
|
|
|
|
// clusters/routes/listeners are added or if it’s acceptable to
|
|
|
|
|
// temporarily drop traffic during updates. Note that in case of
|
|
|
|
|
// LDS updates, the listeners will be warmed before they receive
|
|
|
|
|
// traffic, i.e. the dependent routes are fetched through RDS if
|
|
|
|
|
// configured. Clusters are warmed when adding/removing/updating
|
|
|
|
|
// clusters. On the other hand, routes are not warmed, i.e., the
|
|
|
|
|
// management plane must ensure that clusters referenced by a route
|
|
|
|
|
// are in place, before pushing the updates for a route.
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type xDSUpdateOperation struct {
|
|
|
|
|
TypeUrl string
|
|
|
|
|
Upsert bool
|
|
|
|
|
Remove bool
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (op *xDSUpdateOperation) errorLogNameReplyPrefix() string {
|
|
|
|
|
switch {
|
|
|
|
|
case op.Upsert && op.Remove:
|
|
|
|
|
return "upsert/remove "
|
|
|
|
|
case op.Upsert:
|
|
|
|
|
return "upsert "
|
|
|
|
|
case op.Remove:
|
|
|
|
|
return "remove "
|
|
|
|
|
default:
|
|
|
|
|
return ""
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2022-11-08 01:10:42 +00:00
|
|
|
|
type xDSDeltaChild struct {
|
|
|
|
|
// childType is a type that in Envoy is actually stored within this type.
|
|
|
|
|
// Upserts of THIS type should potentially trigger dependent named
|
|
|
|
|
// resources within the child to be re-configured.
|
|
|
|
|
childType *xDSDeltaType
|
|
|
|
|
|
|
|
|
|
// childrenNames is map of parent resource names to a list of associated child resource
|
|
|
|
|
// names.
|
|
|
|
|
childrenNames map[string][]string
|
|
|
|
|
}
|
|
|
|
|
|
2021-04-29 18:54:05 +00:00
|
|
|
|
type xDSDeltaType struct {
|
|
|
|
|
generator *ResourceGenerator
|
|
|
|
|
stream ADSDeltaStream
|
|
|
|
|
typeURL string
|
|
|
|
|
allowEmptyFn func(kind structs.ServiceKind) bool
|
|
|
|
|
|
2022-11-08 01:10:42 +00:00
|
|
|
|
// deltaChild contains data for an xDS child type if there is one.
|
|
|
|
|
// For example, endpoints are a child type of clusters.
|
|
|
|
|
deltaChild *xDSDeltaChild
|
2021-06-14 22:20:27 +00:00
|
|
|
|
|
2021-04-29 18:54:05 +00:00
|
|
|
|
// registered indicates if this type has been requested at least once by
|
|
|
|
|
// the proxy
|
|
|
|
|
registered bool
|
|
|
|
|
|
|
|
|
|
// wildcard indicates that this type was requested with no preference for
|
|
|
|
|
// specific resource names. subscribe/unsubscribe are ignored.
|
|
|
|
|
wildcard bool
|
|
|
|
|
|
|
|
|
|
// sentToEnvoyOnce is true after we've sent one response to envoy.
|
|
|
|
|
sentToEnvoyOnce bool
|
|
|
|
|
|
2021-09-21 14:58:56 +00:00
|
|
|
|
// subscriptions is the set of currently subscribed envoy resources.
|
|
|
|
|
// If wildcard == true, this will be empty.
|
|
|
|
|
subscriptions map[string]struct{}
|
|
|
|
|
|
2021-04-29 18:54:05 +00:00
|
|
|
|
// resourceVersions is the current view of CONFIRMED/ACKed updates to
|
|
|
|
|
// envoy's view of the loaded resources.
|
|
|
|
|
//
|
|
|
|
|
// name => version
|
|
|
|
|
resourceVersions map[string]string
|
|
|
|
|
|
|
|
|
|
// pendingUpdates is a set of un-ACKed updates to the 'resourceVersions'
|
|
|
|
|
// map. Once we get an ACK from envoy we'll update the resourceVersions map
|
|
|
|
|
// and strike the entry from this map.
|
|
|
|
|
//
|
2021-06-14 22:20:27 +00:00
|
|
|
|
// nonce -> name -> {version}
|
|
|
|
|
pendingUpdates map[string]map[string]PendingUpdate
|
|
|
|
|
}
|
|
|
|
|
|
2021-09-21 14:58:56 +00:00
|
|
|
|
func (t *xDSDeltaType) subscribed(name string) bool {
|
|
|
|
|
if t.wildcard {
|
|
|
|
|
return true
|
|
|
|
|
}
|
|
|
|
|
_, subscribed := t.subscriptions[name]
|
|
|
|
|
return subscribed
|
|
|
|
|
}
|
|
|
|
|
|
2021-06-14 22:20:27 +00:00
|
|
|
|
type PendingUpdate struct {
|
2022-11-08 01:10:42 +00:00
|
|
|
|
Remove bool
|
|
|
|
|
Version string
|
2021-04-29 18:54:05 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func newDeltaType(
|
|
|
|
|
generator *ResourceGenerator,
|
|
|
|
|
stream ADSDeltaStream,
|
|
|
|
|
typeUrl string,
|
|
|
|
|
allowEmptyFn func(kind structs.ServiceKind) bool,
|
|
|
|
|
) *xDSDeltaType {
|
|
|
|
|
return &xDSDeltaType{
|
|
|
|
|
generator: generator,
|
|
|
|
|
stream: stream,
|
|
|
|
|
typeURL: typeUrl,
|
|
|
|
|
allowEmptyFn: allowEmptyFn,
|
2021-09-21 14:58:56 +00:00
|
|
|
|
subscriptions: make(map[string]struct{}),
|
2021-04-29 18:54:05 +00:00
|
|
|
|
resourceVersions: make(map[string]string),
|
2021-06-14 22:20:27 +00:00
|
|
|
|
pendingUpdates: make(map[string]map[string]PendingUpdate),
|
2021-04-29 18:54:05 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Recv handles new discovery requests from envoy.
|
|
|
|
|
//
|
|
|
|
|
// Returns true the first time a type receives a request.
|
2022-02-10 22:37:36 +00:00
|
|
|
|
func (t *xDSDeltaType) Recv(req *envoy_discovery_v3.DeltaDiscoveryRequest, sf supportedProxyFeatures) deltaRecvResponse {
|
2021-04-29 18:54:05 +00:00
|
|
|
|
if t == nil {
|
2022-02-10 22:37:36 +00:00
|
|
|
|
return deltaRecvUnknownType // not something we care about
|
2021-04-29 18:54:05 +00:00
|
|
|
|
}
|
|
|
|
|
logger := t.generator.Logger.With("typeUrl", t.typeURL)
|
|
|
|
|
|
|
|
|
|
registeredThisTime := false
|
|
|
|
|
if !t.registered {
|
|
|
|
|
// We are in the wildcard mode if the first request of a particular
|
|
|
|
|
// type has empty subscription list
|
|
|
|
|
t.wildcard = len(req.ResourceNamesSubscribe) == 0
|
|
|
|
|
t.registered = true
|
|
|
|
|
registeredThisTime = true
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
DeltaDiscoveryRequest can be sent in the following situations:
|
|
|
|
|
|
|
|
|
|
Initial message in a xDS bidirectional gRPC stream.
|
|
|
|
|
|
|
|
|
|
As an ACK or NACK response to a previous DeltaDiscoveryResponse. In
|
|
|
|
|
this case the response_nonce is set to the nonce value in the Response.
|
|
|
|
|
ACK or NACK is determined by the absence or presence of error_detail.
|
|
|
|
|
|
|
|
|
|
Spontaneous DeltaDiscoveryRequests from the client. This can be done to
|
|
|
|
|
dynamically add or remove elements from the tracked resource_names set.
|
|
|
|
|
In this case response_nonce must be omitted.
|
|
|
|
|
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
DeltaDiscoveryRequest plays two independent roles. Any
|
|
|
|
|
DeltaDiscoveryRequest can be either or both of:
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
if req.ResponseNonce != "" {
|
|
|
|
|
/*
|
|
|
|
|
[2] (N)ACKing an earlier resource update from the server (using
|
|
|
|
|
response_nonce, with presence of error_detail making it a NACK).
|
|
|
|
|
*/
|
|
|
|
|
if req.ErrorDetail == nil {
|
|
|
|
|
logger.Trace("got ok response from envoy proxy", "nonce", req.ResponseNonce)
|
|
|
|
|
t.ack(req.ResponseNonce)
|
|
|
|
|
} else {
|
|
|
|
|
logger.Error("got error response from envoy proxy", "nonce", req.ResponseNonce,
|
|
|
|
|
"error", status.ErrorProto(req.ErrorDetail))
|
|
|
|
|
t.nack(req.ResponseNonce)
|
2022-02-10 22:37:36 +00:00
|
|
|
|
return deltaRecvResponseNack
|
2021-04-29 18:54:05 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if registeredThisTime && len(req.InitialResourceVersions) > 0 {
|
|
|
|
|
/*
|
|
|
|
|
Additionally, the first message (for a given type_url) of a
|
|
|
|
|
reconnected gRPC stream has a third role:
|
|
|
|
|
|
|
|
|
|
[3] informing the server of the resources (and their versions) that
|
|
|
|
|
the client already possesses, using the initial_resource_versions
|
|
|
|
|
field.
|
|
|
|
|
*/
|
|
|
|
|
logger.Trace("setting initial resource versions for stream",
|
|
|
|
|
"resources", req.InitialResourceVersions)
|
|
|
|
|
t.resourceVersions = req.InitialResourceVersions
|
2021-09-21 14:58:56 +00:00
|
|
|
|
if !t.wildcard {
|
|
|
|
|
for k := range req.InitialResourceVersions {
|
|
|
|
|
t.subscriptions[k] = struct{}{}
|
|
|
|
|
}
|
|
|
|
|
}
|
2021-04-29 18:54:05 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if !t.wildcard {
|
|
|
|
|
/*
|
|
|
|
|
[1] informing the server of what resources the client has
|
|
|
|
|
gained/lost interest in (using resource_names_subscribe and
|
|
|
|
|
resource_names_unsubscribe), or
|
|
|
|
|
*/
|
|
|
|
|
for _, name := range req.ResourceNamesSubscribe {
|
|
|
|
|
// A resource_names_subscribe field may contain resource names that
|
|
|
|
|
// the server believes the client is already subscribed to, and
|
|
|
|
|
// furthermore has the most recent versions of. However, the server
|
|
|
|
|
// must still provide those resources in the response; due to
|
|
|
|
|
// implementation details hidden from the server, the client may
|
|
|
|
|
// have “forgotten” those resources despite apparently remaining
|
|
|
|
|
// subscribed.
|
|
|
|
|
//
|
|
|
|
|
// NOTE: the server must respond with all resources listed in
|
|
|
|
|
// resource_names_subscribe, even if it believes the client has the
|
|
|
|
|
// most recent version of them. The reason: the client may have
|
|
|
|
|
// dropped them, but then regained interest before it had a chance
|
|
|
|
|
// to send the unsubscribe message.
|
|
|
|
|
//
|
|
|
|
|
// We handle that here by ALWAYS wiping the version so the diff
|
|
|
|
|
// decides to send the value.
|
2021-09-21 14:58:56 +00:00
|
|
|
|
_, alreadySubscribed := t.subscriptions[name]
|
|
|
|
|
t.subscriptions[name] = struct{}{}
|
|
|
|
|
|
|
|
|
|
// Reset the tracked version so we force a reply.
|
|
|
|
|
if _, alreadyTracked := t.resourceVersions[name]; alreadyTracked {
|
|
|
|
|
t.resourceVersions[name] = ""
|
|
|
|
|
}
|
2021-04-29 18:54:05 +00:00
|
|
|
|
|
2022-11-08 01:10:42 +00:00
|
|
|
|
// Certain xDS types are children of other types, meaning that if Envoy subscribes to a parent.
|
|
|
|
|
// We MUST assume that if Envoy ever had data for the children of this parent, then the child's
|
|
|
|
|
// data is gone.
|
|
|
|
|
if t.deltaChild != nil && t.deltaChild.childType.registered {
|
|
|
|
|
for _, childName := range t.deltaChild.childrenNames[name] {
|
|
|
|
|
t.ensureChildResend(name, childName)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-04-29 18:54:05 +00:00
|
|
|
|
if alreadySubscribed {
|
|
|
|
|
logger.Trace("re-subscribing resource for stream", "resource", name)
|
|
|
|
|
} else {
|
|
|
|
|
logger.Trace("subscribing resource for stream", "resource", name)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for _, name := range req.ResourceNamesUnsubscribe {
|
2021-09-21 14:58:56 +00:00
|
|
|
|
if _, ok := t.subscriptions[name]; !ok {
|
2021-04-29 18:54:05 +00:00
|
|
|
|
continue
|
|
|
|
|
}
|
2021-09-21 14:58:56 +00:00
|
|
|
|
delete(t.subscriptions, name)
|
2021-04-29 18:54:05 +00:00
|
|
|
|
logger.Trace("unsubscribing resource for stream", "resource", name)
|
2021-09-21 14:58:56 +00:00
|
|
|
|
// NOTE: we'll let the normal differential comparison handle cleaning up resourceVersions
|
2021-04-29 18:54:05 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2022-02-10 22:37:36 +00:00
|
|
|
|
if registeredThisTime {
|
|
|
|
|
return deltaRecvNewSubscription
|
|
|
|
|
}
|
|
|
|
|
return deltaRecvResponseAck
|
2021-04-29 18:54:05 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (t *xDSDeltaType) ack(nonce string) {
|
|
|
|
|
pending, ok := t.pendingUpdates[nonce]
|
|
|
|
|
if !ok {
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2021-06-14 22:20:27 +00:00
|
|
|
|
for name, obj := range pending {
|
2021-09-21 14:58:56 +00:00
|
|
|
|
if obj.Remove {
|
2021-04-29 18:54:05 +00:00
|
|
|
|
delete(t.resourceVersions, name)
|
2021-09-21 14:58:56 +00:00
|
|
|
|
continue
|
2021-06-14 22:20:27 +00:00
|
|
|
|
}
|
2021-09-21 14:58:56 +00:00
|
|
|
|
|
|
|
|
|
t.resourceVersions[name] = obj.Version
|
2021-04-29 18:54:05 +00:00
|
|
|
|
}
|
|
|
|
|
t.sentToEnvoyOnce = true
|
|
|
|
|
delete(t.pendingUpdates, nonce)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (t *xDSDeltaType) nack(nonce string) {
|
|
|
|
|
delete(t.pendingUpdates, nonce)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (t *xDSDeltaType) SendIfNew(
|
|
|
|
|
kind structs.ServiceKind,
|
|
|
|
|
currentVersions map[string]string, // type => name => version (as consul knows right now)
|
2022-03-08 19:37:24 +00:00
|
|
|
|
resourceMap *xdscommon.IndexedResources,
|
2021-04-29 18:54:05 +00:00
|
|
|
|
nonce *uint64,
|
|
|
|
|
upsert, remove bool,
|
|
|
|
|
) (error, bool) {
|
|
|
|
|
if t == nil || !t.registered {
|
|
|
|
|
return nil, false
|
|
|
|
|
}
|
2022-11-08 01:10:42 +00:00
|
|
|
|
|
|
|
|
|
// Wait for Envoy to catch up with this delta type before sending something new.
|
|
|
|
|
if len(t.pendingUpdates) > 0 {
|
|
|
|
|
return nil, false
|
|
|
|
|
}
|
|
|
|
|
|
2021-04-29 18:54:05 +00:00
|
|
|
|
logger := t.generator.Logger.With("typeUrl", t.typeURL)
|
|
|
|
|
|
|
|
|
|
allowEmpty := t.allowEmptyFn != nil && t.allowEmptyFn(kind)
|
|
|
|
|
|
|
|
|
|
// Zero length resource responses should be ignored and are the result of no
|
|
|
|
|
// data yet. Notice that this caused a bug originally where we had zero
|
|
|
|
|
// healthy endpoints for an upstream that would cause Envoy to hang waiting
|
|
|
|
|
// for the EDS response. This is fixed though by ensuring we send an explicit
|
|
|
|
|
// empty LoadAssignment resource for the cluster rather than allowing junky
|
|
|
|
|
// empty resources.
|
|
|
|
|
if len(currentVersions) == 0 && !allowEmpty {
|
|
|
|
|
// Nothing to send yet
|
|
|
|
|
return nil, false
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
resp, updates, err := t.createDeltaResponse(currentVersions, resourceMap, upsert, remove)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return err, false
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if resp == nil {
|
|
|
|
|
return nil, false
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
*nonce++
|
|
|
|
|
resp.Nonce = fmt.Sprintf("%08x", *nonce)
|
|
|
|
|
|
|
|
|
|
t.generator.logTraceResponse("Incremental xDS v3", resp)
|
|
|
|
|
|
|
|
|
|
logger.Trace("sending response", "nonce", resp.Nonce)
|
|
|
|
|
if err := t.stream.Send(resp); err != nil {
|
|
|
|
|
return err, false
|
|
|
|
|
}
|
|
|
|
|
logger.Trace("sent response", "nonce", resp.Nonce)
|
|
|
|
|
|
2022-11-08 01:10:42 +00:00
|
|
|
|
// Certain xDS types are children of other types, meaning that if an update is pushed for a parent,
|
|
|
|
|
// we MUST send new data for all its children. Envoy will NOT re-subscribe to the child data upon
|
|
|
|
|
// receiving updates for the parent, so we need to handle this ourselves.
|
|
|
|
|
//
|
|
|
|
|
// Note that we do not check whether the deltaChild.childType is registered here, since we send
|
|
|
|
|
// parent types before child types, meaning that it's expected on first send of a parent that
|
|
|
|
|
// there are no subscriptions for the child type.
|
|
|
|
|
if t.deltaChild != nil {
|
|
|
|
|
for name := range updates {
|
2021-06-14 22:20:27 +00:00
|
|
|
|
if children, ok := resourceMap.ChildIndex[t.typeURL][name]; ok {
|
2022-11-08 01:10:42 +00:00
|
|
|
|
// Capture the relevant child resource names on this pending update so
|
|
|
|
|
// we can know the linked children if Envoy ever re-subscribes to the parent resource.
|
|
|
|
|
t.deltaChild.childrenNames[name] = children
|
|
|
|
|
|
|
|
|
|
for _, childName := range children {
|
|
|
|
|
t.ensureChildResend(name, childName)
|
|
|
|
|
}
|
2021-06-14 22:20:27 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2021-04-29 18:54:05 +00:00
|
|
|
|
t.pendingUpdates[resp.Nonce] = updates
|
|
|
|
|
|
|
|
|
|
return nil, true
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (t *xDSDeltaType) createDeltaResponse(
|
|
|
|
|
currentVersions map[string]string, // name => version (as consul knows right now)
|
2022-03-08 19:37:24 +00:00
|
|
|
|
resourceMap *xdscommon.IndexedResources,
|
2021-04-29 18:54:05 +00:00
|
|
|
|
upsert, remove bool,
|
2021-06-14 22:20:27 +00:00
|
|
|
|
) (*envoy_discovery_v3.DeltaDiscoveryResponse, map[string]PendingUpdate, error) {
|
2021-04-29 18:54:05 +00:00
|
|
|
|
// compute difference
|
|
|
|
|
var (
|
|
|
|
|
hasRelevantUpdates = false
|
2021-06-14 22:20:27 +00:00
|
|
|
|
updates = make(map[string]PendingUpdate)
|
2021-04-29 18:54:05 +00:00
|
|
|
|
)
|
2021-09-21 14:58:56 +00:00
|
|
|
|
|
|
|
|
|
if t.wildcard {
|
|
|
|
|
// First find things that need updating or deleting
|
|
|
|
|
for name, envoyVers := range t.resourceVersions {
|
|
|
|
|
currVers, ok := currentVersions[name]
|
|
|
|
|
if !ok {
|
|
|
|
|
if remove {
|
|
|
|
|
hasRelevantUpdates = true
|
|
|
|
|
}
|
|
|
|
|
updates[name] = PendingUpdate{Remove: true}
|
|
|
|
|
} else if currVers != envoyVers {
|
|
|
|
|
if upsert {
|
|
|
|
|
hasRelevantUpdates = true
|
|
|
|
|
}
|
|
|
|
|
updates[name] = PendingUpdate{Version: currVers}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Now find new things
|
|
|
|
|
for name, currVers := range currentVersions {
|
|
|
|
|
if _, known := t.resourceVersions[name]; known {
|
|
|
|
|
continue
|
2021-04-29 18:54:05 +00:00
|
|
|
|
}
|
|
|
|
|
if upsert {
|
|
|
|
|
hasRelevantUpdates = true
|
|
|
|
|
}
|
2021-06-14 22:20:27 +00:00
|
|
|
|
updates[name] = PendingUpdate{Version: currVers}
|
2021-04-29 18:54:05 +00:00
|
|
|
|
}
|
2021-09-21 14:58:56 +00:00
|
|
|
|
} else {
|
|
|
|
|
// First find things that need updating or deleting
|
|
|
|
|
|
|
|
|
|
// Walk the list of things currently stored in envoy
|
|
|
|
|
for name, envoyVers := range t.resourceVersions {
|
|
|
|
|
if t.subscribed(name) {
|
|
|
|
|
if currVers, ok := currentVersions[name]; ok {
|
|
|
|
|
if currVers != envoyVers {
|
|
|
|
|
if upsert {
|
|
|
|
|
hasRelevantUpdates = true
|
|
|
|
|
}
|
|
|
|
|
updates[name] = PendingUpdate{Version: currVers}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2021-04-29 18:54:05 +00:00
|
|
|
|
|
2021-09-21 14:58:56 +00:00
|
|
|
|
// Now find new things not in envoy yet
|
|
|
|
|
for name := range t.subscriptions {
|
|
|
|
|
if _, known := t.resourceVersions[name]; known {
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
if currVers, ok := currentVersions[name]; ok {
|
2021-06-14 22:20:27 +00:00
|
|
|
|
updates[name] = PendingUpdate{Version: currVers}
|
2021-04-29 18:54:05 +00:00
|
|
|
|
if upsert {
|
|
|
|
|
hasRelevantUpdates = true
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if !hasRelevantUpdates && t.sentToEnvoyOnce {
|
|
|
|
|
return nil, nil, nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// now turn this into a disco response
|
|
|
|
|
resp := &envoy_discovery_v3.DeltaDiscoveryResponse{
|
|
|
|
|
// TODO(rb): consider putting something in SystemVersionInfo?
|
|
|
|
|
TypeUrl: t.typeURL,
|
|
|
|
|
}
|
2021-06-14 22:20:27 +00:00
|
|
|
|
realUpdates := make(map[string]PendingUpdate)
|
|
|
|
|
for name, obj := range updates {
|
2021-09-21 14:58:56 +00:00
|
|
|
|
if obj.Remove {
|
2021-04-29 18:54:05 +00:00
|
|
|
|
if remove {
|
|
|
|
|
resp.RemovedResources = append(resp.RemovedResources, name)
|
2021-09-21 14:58:56 +00:00
|
|
|
|
realUpdates[name] = PendingUpdate{Remove: true}
|
2021-04-29 18:54:05 +00:00
|
|
|
|
}
|
|
|
|
|
} else if upsert {
|
2021-06-14 22:20:27 +00:00
|
|
|
|
resources, ok := resourceMap.Index[t.typeURL]
|
2021-04-29 18:54:05 +00:00
|
|
|
|
if !ok {
|
|
|
|
|
return nil, nil, fmt.Errorf("unknown type url: %s", t.typeURL)
|
|
|
|
|
}
|
|
|
|
|
res, ok := resources[name]
|
|
|
|
|
if !ok {
|
|
|
|
|
return nil, nil, fmt.Errorf("unknown name for type url %q: %s", t.typeURL, name)
|
|
|
|
|
}
|
2023-01-11 14:39:10 +00:00
|
|
|
|
any, err := anypb.New(res)
|
2021-04-29 18:54:05 +00:00
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, nil, err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
resp.Resources = append(resp.Resources, &envoy_discovery_v3.Resource{
|
|
|
|
|
Name: name,
|
|
|
|
|
Resource: any,
|
2021-06-14 22:20:27 +00:00
|
|
|
|
Version: obj.Version,
|
2021-04-29 18:54:05 +00:00
|
|
|
|
})
|
2021-06-14 22:20:27 +00:00
|
|
|
|
realUpdates[name] = obj
|
2021-04-29 18:54:05 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return resp, realUpdates, nil
|
|
|
|
|
}
|
|
|
|
|
|
2022-11-08 01:10:42 +00:00
|
|
|
|
func (t *xDSDeltaType) ensureChildResend(parentName, childName string) {
|
|
|
|
|
if _, exist := t.deltaChild.childType.resourceVersions[childName]; !exist {
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
if !t.subscribed(childName) {
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
t.generator.Logger.Trace(
|
|
|
|
|
"triggering implicit update of resource",
|
|
|
|
|
"typeUrl", t.typeURL,
|
|
|
|
|
"resource", parentName,
|
|
|
|
|
"childTypeUrl", t.deltaChild.childType.typeURL,
|
|
|
|
|
"childResource", childName,
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
// resourceVersions tracks the last known version for this childName that Envoy
|
|
|
|
|
// has ACKed. By setting this to empty it effectively tells us that Envoy does
|
|
|
|
|
// not have any data for that child, and we need to re-send.
|
|
|
|
|
t.deltaChild.childType.resourceVersions[childName] = ""
|
|
|
|
|
}
|
|
|
|
|
|
2022-03-08 19:37:24 +00:00
|
|
|
|
func computeResourceVersions(resourceMap *xdscommon.IndexedResources) (map[string]map[string]string, error) {
|
2021-04-29 18:54:05 +00:00
|
|
|
|
out := make(map[string]map[string]string)
|
2021-06-14 22:20:27 +00:00
|
|
|
|
for typeUrl, resources := range resourceMap.Index {
|
2021-04-29 18:54:05 +00:00
|
|
|
|
m, err := hashResourceMap(resources)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, fmt.Errorf("failed to hash resources for %q: %v", typeUrl, err)
|
|
|
|
|
}
|
|
|
|
|
out[typeUrl] = m
|
|
|
|
|
}
|
|
|
|
|
return out, nil
|
|
|
|
|
}
|
|
|
|
|
|
2022-03-08 19:37:24 +00:00
|
|
|
|
func populateChildIndexMap(resourceMap *xdscommon.IndexedResources) error {
|
2021-06-14 22:20:27 +00:00
|
|
|
|
// LDS and RDS have a more complicated relationship.
|
2022-03-08 19:37:24 +00:00
|
|
|
|
for name, res := range resourceMap.Index[xdscommon.ListenerType] {
|
2021-06-14 22:20:27 +00:00
|
|
|
|
listener := res.(*envoy_listener_v3.Listener)
|
|
|
|
|
rdsRouteNames, err := extractRdsResourceNames(listener)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
2022-03-08 19:37:24 +00:00
|
|
|
|
resourceMap.ChildIndex[xdscommon.ListenerType][name] = rdsRouteNames
|
2021-06-14 22:20:27 +00:00
|
|
|
|
}
|
2021-04-29 18:54:05 +00:00
|
|
|
|
|
2021-06-14 22:20:27 +00:00
|
|
|
|
// CDS and EDS share exact names.
|
2022-03-08 19:37:24 +00:00
|
|
|
|
for name := range resourceMap.Index[xdscommon.ClusterType] {
|
|
|
|
|
resourceMap.ChildIndex[xdscommon.ClusterType][name] = []string{name}
|
2021-04-29 18:54:05 +00:00
|
|
|
|
}
|
2021-06-14 22:20:27 +00:00
|
|
|
|
|
|
|
|
|
return nil
|
2021-04-29 18:54:05 +00:00
|
|
|
|
}
|
|
|
|
|
|
2022-03-08 19:37:24 +00:00
|
|
|
|
func indexResources(logger hclog.Logger, resources map[string][]proto.Message) *xdscommon.IndexedResources {
|
|
|
|
|
data := xdscommon.EmptyIndexedResources()
|
2021-04-29 18:54:05 +00:00
|
|
|
|
|
|
|
|
|
for typeURL, typeRes := range resources {
|
|
|
|
|
for _, res := range typeRes {
|
|
|
|
|
name := getResourceName(res)
|
|
|
|
|
if name == "" {
|
|
|
|
|
logger.Warn("skipping unexpected xDS type found in delta snapshot", "typeURL", typeURL)
|
|
|
|
|
} else {
|
2021-06-14 22:20:27 +00:00
|
|
|
|
data.Index[typeURL][name] = res
|
2021-04-29 18:54:05 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return data
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func getResourceName(res proto.Message) string {
|
|
|
|
|
// NOTE: this only covers types that we currently care about for LDS/RDS/CDS/EDS
|
|
|
|
|
switch x := res.(type) {
|
|
|
|
|
case *envoy_listener_v3.Listener: // LDS
|
|
|
|
|
return x.Name
|
|
|
|
|
case *envoy_route_v3.RouteConfiguration: // RDS
|
|
|
|
|
return x.Name
|
|
|
|
|
case *envoy_cluster_v3.Cluster: // CDS
|
|
|
|
|
return x.Name
|
|
|
|
|
case *envoy_endpoint_v3.ClusterLoadAssignment: // EDS
|
|
|
|
|
return x.ClusterName
|
|
|
|
|
default:
|
|
|
|
|
return ""
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func hashResourceMap(resources map[string]proto.Message) (map[string]string, error) {
|
|
|
|
|
m := make(map[string]string)
|
|
|
|
|
for name, res := range resources {
|
|
|
|
|
h, err := hashResource(res)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, fmt.Errorf("failed to hash resource %q: %v", name, err)
|
|
|
|
|
}
|
|
|
|
|
m[name] = h
|
|
|
|
|
}
|
|
|
|
|
return m, nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// hashResource will take a resource and create a SHA256 hash sum out of the marshaled bytes
|
|
|
|
|
func hashResource(res proto.Message) (string, error) {
|
|
|
|
|
h := sha256.New()
|
2023-01-11 14:39:10 +00:00
|
|
|
|
marshaller := proto.MarshalOptions{Deterministic: true}
|
2021-04-29 18:54:05 +00:00
|
|
|
|
|
2023-01-11 14:39:10 +00:00
|
|
|
|
data, err := marshaller.Marshal(res)
|
2021-04-29 18:54:05 +00:00
|
|
|
|
if err != nil {
|
|
|
|
|
return "", err
|
|
|
|
|
}
|
2023-01-11 14:39:10 +00:00
|
|
|
|
h.Write(data)
|
2021-04-29 18:54:05 +00:00
|
|
|
|
|
|
|
|
|
return hex.EncodeToString(h.Sum(nil)), nil
|
|
|
|
|
}
|