2022-07-08 17:01:13 +00:00
|
|
|
package peerstream
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2022-08-01 14:33:18 +00:00
|
|
|
"crypto/subtle"
|
2022-07-08 17:01:13 +00:00
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"strings"
|
2022-07-20 22:48:18 +00:00
|
|
|
"sync"
|
2022-07-21 17:03:27 +00:00
|
|
|
"time"
|
2022-07-08 17:01:13 +00:00
|
|
|
|
2022-08-01 14:33:18 +00:00
|
|
|
"github.com/armon/go-metrics"
|
2022-07-08 17:01:13 +00:00
|
|
|
"github.com/golang/protobuf/jsonpb"
|
|
|
|
"github.com/golang/protobuf/proto"
|
|
|
|
"github.com/hashicorp/go-hclog"
|
2022-08-01 14:33:18 +00:00
|
|
|
"google.golang.org/grpc"
|
2022-07-08 17:01:13 +00:00
|
|
|
"google.golang.org/grpc/codes"
|
|
|
|
grpcstatus "google.golang.org/grpc/status"
|
|
|
|
|
|
|
|
"github.com/hashicorp/consul/agent/connect"
|
2022-07-13 15:33:48 +00:00
|
|
|
external "github.com/hashicorp/consul/agent/grpc-external"
|
2022-08-01 14:33:18 +00:00
|
|
|
"github.com/hashicorp/consul/agent/structs"
|
|
|
|
"github.com/hashicorp/consul/lib"
|
2022-07-08 17:01:13 +00:00
|
|
|
"github.com/hashicorp/consul/proto/pbpeering"
|
|
|
|
"github.com/hashicorp/consul/proto/pbpeerstream"
|
|
|
|
)
|
|
|
|
|
|
|
|
type BidirectionalStream interface {
|
|
|
|
Send(*pbpeerstream.ReplicationMessage) error
|
|
|
|
Recv() (*pbpeerstream.ReplicationMessage, error)
|
|
|
|
Context() context.Context
|
|
|
|
}
|
|
|
|
|
2022-08-01 14:33:18 +00:00
|
|
|
// ExchangeSecret exchanges the one-time secret embedded in a peering token for a
|
|
|
|
// long-lived secret for use with the peering stream handler. This secret exchange
|
|
|
|
// prevents peering tokens from being reused.
|
|
|
|
//
|
|
|
|
// Note that if the peering secret exchange fails, a peering token may need to be
|
|
|
|
// re-generated, since the one-time initiation secret may have been invalidated.
|
|
|
|
func (s *Server) ExchangeSecret(ctx context.Context, req *pbpeerstream.ExchangeSecretRequest) (*pbpeerstream.ExchangeSecretResponse, error) {
|
|
|
|
// For private/internal gRPC handlers, protoc-gen-rpc-glue generates the
|
|
|
|
// requisite methods to satisfy the structs.RPCInfo interface using fields
|
|
|
|
// from the pbcommon package. This service is public, so we can't use those
|
|
|
|
// fields in our proto definition. Instead, we construct our RPCInfo manually.
|
|
|
|
//
|
|
|
|
// Embedding WriteRequest ensures RPCs are forwarded to the leader, embedding
|
|
|
|
// DCSpecificRequest adds the RequestDatacenter method (but as we're not
|
|
|
|
// setting Datacenter it has the effect of *not* doing DC forwarding).
|
|
|
|
var rpcInfo struct {
|
|
|
|
structs.WriteRequest
|
|
|
|
structs.DCSpecificRequest
|
|
|
|
}
|
|
|
|
|
|
|
|
var resp *pbpeerstream.ExchangeSecretResponse
|
|
|
|
handled, err := s.ForwardRPC(&rpcInfo, func(conn *grpc.ClientConn) error {
|
|
|
|
var err error
|
|
|
|
resp, err = pbpeerstream.NewPeerStreamServiceClient(conn).ExchangeSecret(ctx, req)
|
|
|
|
return err
|
|
|
|
})
|
|
|
|
if handled || err != nil {
|
|
|
|
return resp, err
|
|
|
|
}
|
|
|
|
|
|
|
|
defer metrics.MeasureSince([]string{"peering", "exchange_secret"}, time.Now())
|
|
|
|
|
|
|
|
// Validate the given establishment secret against the one stored on the server.
|
|
|
|
existing, err := s.GetStore().PeeringSecretsRead(nil, req.PeerID)
|
|
|
|
if err != nil {
|
|
|
|
return nil, grpcstatus.Errorf(codes.Internal, "failed to read peering secret: %v", err)
|
|
|
|
}
|
|
|
|
if existing == nil || subtle.ConstantTimeCompare([]byte(existing.GetEstablishment().GetSecretID()), []byte(req.EstablishmentSecret)) == 0 {
|
|
|
|
return nil, grpcstatus.Error(codes.PermissionDenied, "invalid peering establishment secret")
|
|
|
|
}
|
|
|
|
|
|
|
|
id, err := s.generateNewStreamSecret()
|
|
|
|
if err != nil {
|
|
|
|
return nil, grpcstatus.Errorf(codes.Internal, "failed to generate peering stream secret: %v", err)
|
|
|
|
}
|
|
|
|
|
2022-08-08 07:41:00 +00:00
|
|
|
writeReq := &pbpeering.SecretsWriteRequest{
|
|
|
|
PeerID: req.PeerID,
|
|
|
|
Request: &pbpeering.SecretsWriteRequest_ExchangeSecret{
|
|
|
|
ExchangeSecret: &pbpeering.SecretsWriteRequest_ExchangeSecretRequest{
|
2022-08-08 15:06:07 +00:00
|
|
|
// Pass the given establishment secret to that it can be re-validated at the state store.
|
|
|
|
// Validating the establishment secret at the RPC is not enough because there can be
|
|
|
|
// concurrent callers with the same establishment secret.
|
|
|
|
EstablishmentSecret: req.EstablishmentSecret,
|
|
|
|
|
2022-08-08 07:41:00 +00:00
|
|
|
// Overwrite any existing un-utilized pending stream secret.
|
|
|
|
PendingStreamSecret: id,
|
2022-08-02 22:20:07 +00:00
|
|
|
},
|
2022-08-01 14:33:18 +00:00
|
|
|
},
|
|
|
|
}
|
2022-08-02 22:20:07 +00:00
|
|
|
err = s.Backend.PeeringSecretsWrite(writeReq)
|
2022-08-01 14:33:18 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, grpcstatus.Errorf(codes.Internal, "failed to persist peering secret: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return &pbpeerstream.ExchangeSecretResponse{StreamSecret: id}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Server) generateNewStreamSecret() (string, error) {
|
|
|
|
id, err := lib.GenerateUUID(s.Backend.ValidateProposedPeeringSecret)
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
return id, nil
|
|
|
|
}
|
|
|
|
|
2022-07-08 17:01:13 +00:00
|
|
|
// StreamResources handles incoming streaming connections.
|
|
|
|
func (s *Server) StreamResources(stream pbpeerstream.PeerStreamService_StreamResourcesServer) error {
|
2022-07-13 15:33:48 +00:00
|
|
|
logger := s.Logger.Named("stream-resources").With("request_id", external.TraceID())
|
2022-07-08 17:01:13 +00:00
|
|
|
|
|
|
|
logger.Trace("Started processing request")
|
|
|
|
defer logger.Trace("Finished processing request")
|
|
|
|
|
2022-07-13 15:00:35 +00:00
|
|
|
// NOTE: this code should have similar error handling to the new-request
|
|
|
|
// handling code in HandleStream()
|
|
|
|
|
2022-07-08 17:01:13 +00:00
|
|
|
if !s.Backend.IsLeader() {
|
2022-07-29 20:04:32 +00:00
|
|
|
// We are not the leader so we will hang up on the dialer.
|
|
|
|
logger.Debug("cannot establish a peering stream on a follower node")
|
2022-07-08 17:01:13 +00:00
|
|
|
|
|
|
|
st, err := grpcstatus.New(codes.FailedPrecondition,
|
|
|
|
"cannot establish a peering stream on a follower node").WithDetails(
|
|
|
|
&pbpeerstream.LeaderAddress{Address: s.Backend.GetLeaderAddress()})
|
|
|
|
if err != nil {
|
|
|
|
logger.Error(fmt.Sprintf("failed to marshal the leader address in response; err: %v", err))
|
|
|
|
return grpcstatus.Error(codes.FailedPrecondition, "cannot establish a peering stream on a follower node")
|
|
|
|
} else {
|
|
|
|
return st.Err()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Initial message on a new stream must be a new subscription request.
|
|
|
|
first, err := stream.Recv()
|
|
|
|
if err != nil {
|
|
|
|
logger.Error("failed to establish stream", "error", err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO(peering) Make request contain a list of resources, so that roots and services can be
|
|
|
|
// subscribed to with a single request. See:
|
|
|
|
// https://github.com/envoyproxy/data-plane-api/blob/main/envoy/service/discovery/v3/discovery.proto#L46
|
2022-08-01 14:33:18 +00:00
|
|
|
req := first.GetOpen()
|
2022-07-08 17:01:13 +00:00
|
|
|
if req == nil {
|
2022-08-01 14:33:18 +00:00
|
|
|
return grpcstatus.Error(codes.InvalidArgument, "first message when initiating a peering must be: Open")
|
2022-07-08 17:01:13 +00:00
|
|
|
}
|
|
|
|
logger.Trace("received initial replication request from peer")
|
|
|
|
logTraceRecv(logger, req)
|
|
|
|
|
|
|
|
if req.PeerID == "" {
|
|
|
|
return grpcstatus.Error(codes.InvalidArgument, "initial subscription request must specify a PeerID")
|
|
|
|
}
|
|
|
|
|
2022-10-05 13:10:19 +00:00
|
|
|
var p *pbpeering.Peering
|
|
|
|
_, p, err = s.GetStore().PeeringReadByID(nil, req.PeerID)
|
2022-07-08 17:01:13 +00:00
|
|
|
if err != nil {
|
|
|
|
logger.Error("failed to look up peer", "peer_id", req.PeerID, "error", err)
|
|
|
|
return grpcstatus.Error(codes.Internal, "failed to find PeerID: "+req.PeerID)
|
|
|
|
}
|
|
|
|
if p == nil {
|
|
|
|
return grpcstatus.Error(codes.InvalidArgument, "initial subscription for unknown PeerID: "+req.PeerID)
|
|
|
|
}
|
2022-10-05 13:10:19 +00:00
|
|
|
// Clone the peering because we will modify and rewrite it.
|
|
|
|
p, ok := proto.Clone(p).(*pbpeering.Peering)
|
|
|
|
if !ok {
|
|
|
|
return grpcstatus.Errorf(codes.Internal, "unexpected error while cloning a Peering object.")
|
|
|
|
}
|
|
|
|
|
2022-08-22 14:21:20 +00:00
|
|
|
if !p.IsActive() {
|
2022-09-06 14:28:20 +00:00
|
|
|
// If peering is terminated, then our peer sent the termination message.
|
|
|
|
// For other non-active states, send the termination message.
|
|
|
|
if p.State != pbpeering.PeeringState_TERMINATED {
|
|
|
|
term := &pbpeerstream.ReplicationMessage{
|
|
|
|
Payload: &pbpeerstream.ReplicationMessage_Terminated_{
|
|
|
|
Terminated: &pbpeerstream.ReplicationMessage_Terminated{},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
logTraceSend(logger, term)
|
|
|
|
|
|
|
|
// we don't care if send fails; stream will be killed by termination message or grpc error
|
|
|
|
_ = stream.Send(term)
|
2022-08-22 14:21:20 +00:00
|
|
|
}
|
2022-09-06 14:28:20 +00:00
|
|
|
return grpcstatus.Error(codes.Aborted, "peering is marked as deleted: "+req.PeerID)
|
2022-08-22 14:21:20 +00:00
|
|
|
}
|
2022-08-01 14:33:18 +00:00
|
|
|
|
|
|
|
secrets, err := s.GetStore().PeeringSecretsRead(nil, req.PeerID)
|
|
|
|
if err != nil {
|
|
|
|
logger.Error("failed to look up secrets for peering", "peer_id", req.PeerID, "error", err)
|
|
|
|
return grpcstatus.Error(codes.Internal, "failed to find peering secrets for PeerID: "+req.PeerID)
|
|
|
|
}
|
|
|
|
if secrets == nil {
|
|
|
|
logger.Error("no known secrets for peering", "peer_id", req.PeerID, "error", err)
|
|
|
|
return grpcstatus.Error(codes.Internal, "unable to authorize connection, peering must be re-established")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check the given secret ID against the active stream secret.
|
|
|
|
var authorized bool
|
|
|
|
if active := secrets.GetStream().GetActiveSecretID(); active != "" {
|
|
|
|
if subtle.ConstantTimeCompare([]byte(active), []byte(req.StreamSecretID)) == 1 {
|
|
|
|
authorized = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Next check the given stream secret against the locally stored pending stream secret.
|
|
|
|
// A pending stream secret is one that has not been seen by this handler.
|
|
|
|
if pending := secrets.GetStream().GetPendingSecretID(); pending != "" && !authorized {
|
|
|
|
// If the given secret is the currently pending secret, it gets promoted to be the active secret.
|
|
|
|
// This is the case where a server recently exchanged for a stream secret.
|
|
|
|
if subtle.ConstantTimeCompare([]byte(pending), []byte(req.StreamSecretID)) == 0 {
|
|
|
|
return grpcstatus.Error(codes.PermissionDenied, "invalid peering stream secret")
|
|
|
|
}
|
|
|
|
authorized = true
|
|
|
|
|
2022-08-08 07:41:00 +00:00
|
|
|
promoted := &pbpeering.SecretsWriteRequest{
|
|
|
|
PeerID: p.ID,
|
|
|
|
Request: &pbpeering.SecretsWriteRequest_PromotePending{
|
|
|
|
PromotePending: &pbpeering.SecretsWriteRequest_PromotePendingRequest{
|
|
|
|
// Overwrite any existing un-utilized pending stream secret.
|
|
|
|
ActiveStreamSecret: pending,
|
2022-08-02 22:20:07 +00:00
|
|
|
},
|
2022-08-01 14:33:18 +00:00
|
|
|
},
|
|
|
|
}
|
2022-10-05 13:10:19 +00:00
|
|
|
|
|
|
|
p.Remote = req.Remote
|
|
|
|
err = s.Backend.PeeringWrite(&pbpeering.PeeringWriteRequest{
|
|
|
|
Peering: p,
|
|
|
|
SecretsRequest: promoted,
|
|
|
|
})
|
2022-08-01 14:33:18 +00:00
|
|
|
if err != nil {
|
2022-10-05 13:10:19 +00:00
|
|
|
return grpcstatus.Errorf(codes.Internal, "failed to persist peering: %v", err)
|
2022-08-01 14:33:18 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if !authorized {
|
|
|
|
return grpcstatus.Error(codes.PermissionDenied, "invalid peering stream secret")
|
|
|
|
}
|
|
|
|
|
2022-07-08 17:01:13 +00:00
|
|
|
logger.Info("accepted initial replication request from peer", "peer_id", p.ID)
|
|
|
|
|
2022-07-13 15:00:35 +00:00
|
|
|
if p.PeerID != "" {
|
|
|
|
return grpcstatus.Error(codes.InvalidArgument, "expected PeerID to be empty; the wrong end of peering is being dialed")
|
|
|
|
}
|
|
|
|
|
2022-07-08 17:01:13 +00:00
|
|
|
streamReq := HandleStreamRequest{
|
2022-08-01 14:33:18 +00:00
|
|
|
LocalID: p.ID,
|
|
|
|
RemoteID: "",
|
|
|
|
PeerName: p.Name,
|
|
|
|
Partition: p.Partition,
|
|
|
|
Stream: stream,
|
2022-07-08 17:01:13 +00:00
|
|
|
}
|
|
|
|
err = s.HandleStream(streamReq)
|
|
|
|
// A nil error indicates that the peering was deleted and the stream needs to be gracefully shutdown.
|
|
|
|
if err == nil {
|
|
|
|
s.DrainStream(streamReq)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
logger.Error("error handling stream", "peer_name", p.Name, "peer_id", req.PeerID, "error", err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
type HandleStreamRequest struct {
|
|
|
|
// LocalID is the UUID for the peering in the local Consul datacenter.
|
|
|
|
LocalID string
|
|
|
|
|
|
|
|
// RemoteID is the UUID for the peering from the perspective of the peer.
|
|
|
|
RemoteID string
|
|
|
|
|
|
|
|
// PeerName is the name of the peering.
|
|
|
|
PeerName string
|
|
|
|
|
|
|
|
// Partition is the local partition associated with the peer.
|
|
|
|
Partition string
|
|
|
|
|
|
|
|
// Stream is the open stream to the peer cluster.
|
|
|
|
Stream BidirectionalStream
|
|
|
|
}
|
|
|
|
|
2022-07-13 15:00:35 +00:00
|
|
|
func (r HandleStreamRequest) WasDialed() bool {
|
|
|
|
return r.RemoteID == ""
|
|
|
|
}
|
|
|
|
|
2022-07-08 17:01:13 +00:00
|
|
|
// DrainStream attempts to gracefully drain the stream when the connection is going to be torn down.
|
|
|
|
// Tearing down the connection too quickly can lead our peer receiving a context cancellation error before the stream termination message.
|
|
|
|
// Handling the termination message is important to set the expectation that the peering will not be reestablished unless recreated.
|
|
|
|
func (s *Server) DrainStream(req HandleStreamRequest) {
|
|
|
|
for {
|
|
|
|
// Ensure that we read until an error, or the peer has nothing more to send.
|
|
|
|
if _, err := req.Stream.Recv(); err != nil {
|
|
|
|
if err != io.EOF {
|
|
|
|
s.Logger.Warn("failed to tear down stream gracefully: peer may not have received termination message",
|
|
|
|
"peer_name", req.PeerName, "peer_id", req.LocalID, "error", err)
|
|
|
|
}
|
|
|
|
break
|
|
|
|
}
|
|
|
|
// Since the peering is being torn down we discard all replication messages without an error.
|
|
|
|
// We want to avoid importing new data at this point.
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-07-25 21:27:53 +00:00
|
|
|
func (s *Server) HandleStream(streamReq HandleStreamRequest) error {
|
|
|
|
if err := s.realHandleStream(streamReq); err != nil {
|
|
|
|
s.Tracker.DisconnectedDueToError(streamReq.LocalID, err.Error())
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
// TODO(peering) Also need to clear subscriptions associated with the peer
|
|
|
|
s.Tracker.DisconnectedGracefully(streamReq.LocalID)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-07-08 17:01:13 +00:00
|
|
|
// The localID provided is the locally-generated identifier for the peering.
|
|
|
|
// The remoteID is an identifier that the remote peer recognizes for the peering.
|
2022-07-25 21:27:53 +00:00
|
|
|
func (s *Server) realHandleStream(streamReq HandleStreamRequest) error {
|
2022-07-08 17:01:13 +00:00
|
|
|
// TODO: pass logger down from caller?
|
2022-07-13 15:00:35 +00:00
|
|
|
logger := s.Logger.Named("stream").
|
|
|
|
With("peer_name", streamReq.PeerName).
|
|
|
|
With("peer_id", streamReq.LocalID).
|
|
|
|
With("dialed", streamReq.WasDialed())
|
2022-07-08 17:01:13 +00:00
|
|
|
logger.Trace("handling stream for peer")
|
|
|
|
|
2022-07-29 20:04:32 +00:00
|
|
|
// handleStreamCtx is local to this function.
|
|
|
|
handleStreamCtx, cancel := context.WithCancel(streamReq.Stream.Context())
|
|
|
|
defer cancel()
|
|
|
|
|
2022-07-13 15:00:35 +00:00
|
|
|
status, err := s.Tracker.Connected(streamReq.LocalID)
|
2022-07-08 17:01:13 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to register stream: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
var trustDomain string
|
|
|
|
if s.ConnectEnabled {
|
|
|
|
// Read the TrustDomain up front - we do not allow users to change the ClusterID
|
|
|
|
// so reading it once at the beginning of the stream is sufficient.
|
|
|
|
trustDomain, err = getTrustDomain(s.GetStore(), logger)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-07-15 20:03:40 +00:00
|
|
|
remoteSubTracker := newResourceSubscriptionTracker()
|
2022-07-08 17:01:13 +00:00
|
|
|
mgr := newSubscriptionManager(
|
2022-07-13 15:00:35 +00:00
|
|
|
streamReq.Stream.Context(),
|
2022-07-08 17:01:13 +00:00
|
|
|
logger,
|
|
|
|
s.Config,
|
|
|
|
trustDomain,
|
|
|
|
s.Backend,
|
|
|
|
s.GetStore,
|
2022-07-15 20:03:40 +00:00
|
|
|
remoteSubTracker,
|
2022-07-08 17:01:13 +00:00
|
|
|
)
|
2022-07-13 15:00:35 +00:00
|
|
|
subCh := mgr.subscribe(streamReq.Stream.Context(), streamReq.LocalID, streamReq.PeerName, streamReq.Partition)
|
|
|
|
|
2022-07-20 22:48:18 +00:00
|
|
|
// We need a mutex to protect against simultaneous sends to the client.
|
|
|
|
var sendMutex sync.Mutex
|
|
|
|
|
|
|
|
// streamSend is a helper function that sends msg over the stream
|
|
|
|
// respecting the send mutex. It also logs the send and calls status.TrackSendError
|
|
|
|
// on error.
|
|
|
|
streamSend := func(msg *pbpeerstream.ReplicationMessage) error {
|
|
|
|
logTraceSend(logger, msg)
|
|
|
|
|
|
|
|
sendMutex.Lock()
|
|
|
|
err := streamReq.Stream.Send(msg)
|
|
|
|
sendMutex.Unlock()
|
|
|
|
|
2022-10-10 19:18:08 +00:00
|
|
|
// We only track send successes and errors for response types because this is meant to track
|
|
|
|
// resources, not request/ack messages.
|
|
|
|
if msg.GetResponse() != nil {
|
|
|
|
if err != nil {
|
2022-10-13 20:46:51 +00:00
|
|
|
if id := msg.GetResponse().GetResourceID(); id != "" {
|
|
|
|
logger.Error("failed to send resource", "resourceID", id, "error", err)
|
|
|
|
status.TrackSendError(err.Error())
|
|
|
|
return nil
|
|
|
|
}
|
2022-10-10 19:18:08 +00:00
|
|
|
status.TrackSendError(err.Error())
|
|
|
|
} else {
|
|
|
|
status.TrackSendSuccess()
|
|
|
|
}
|
2022-07-20 22:48:18 +00:00
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2022-07-15 20:03:40 +00:00
|
|
|
// Subscribe to all relevant resource types.
|
|
|
|
for _, resourceURL := range []string{
|
|
|
|
pbpeerstream.TypeURLExportedService,
|
2022-09-29 19:37:19 +00:00
|
|
|
pbpeerstream.TypeURLExportedServiceList,
|
2022-07-15 20:03:40 +00:00
|
|
|
pbpeerstream.TypeURLPeeringTrustBundle,
|
2022-08-22 14:21:20 +00:00
|
|
|
pbpeerstream.TypeURLPeeringServerAddresses,
|
2022-07-15 20:03:40 +00:00
|
|
|
} {
|
|
|
|
sub := makeReplicationRequest(&pbpeerstream.ReplicationMessage_Request{
|
|
|
|
ResourceURL: resourceURL,
|
|
|
|
PeerID: streamReq.RemoteID,
|
|
|
|
})
|
2022-07-20 22:48:18 +00:00
|
|
|
if err := streamSend(sub); err != nil {
|
2022-07-15 20:03:40 +00:00
|
|
|
// TODO(peering) Test error handling in calls to Send/Recv
|
|
|
|
return fmt.Errorf("failed to send subscription for %q to stream: %w", resourceURL, err)
|
2022-07-08 17:01:13 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-07-29 20:04:32 +00:00
|
|
|
// recvCh sends messages from the gRPC stream.
|
|
|
|
recvCh := make(chan *pbpeerstream.ReplicationMessage)
|
|
|
|
// recvErrCh sends errors received from the gRPC stream.
|
|
|
|
recvErrCh := make(chan error)
|
|
|
|
|
|
|
|
// Start a goroutine to read from the stream and pass to recvCh and recvErrCh.
|
|
|
|
// Using a separate goroutine allows us to process sends and receives all in the main for{} loop.
|
2022-07-08 17:01:13 +00:00
|
|
|
go func() {
|
|
|
|
for {
|
2022-07-13 15:00:35 +00:00
|
|
|
msg, err := streamReq.Stream.Recv()
|
2022-07-29 20:04:32 +00:00
|
|
|
if err != nil {
|
|
|
|
recvErrCh <- err
|
|
|
|
return
|
2022-07-08 17:01:13 +00:00
|
|
|
}
|
2022-07-29 20:04:32 +00:00
|
|
|
logTraceRecv(logger, msg)
|
|
|
|
select {
|
|
|
|
case recvCh <- msg:
|
|
|
|
case <-handleStreamCtx.Done():
|
2022-07-08 17:01:13 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2022-07-29 20:04:32 +00:00
|
|
|
// Start a goroutine to send heartbeats at a regular interval.
|
2022-07-21 17:03:27 +00:00
|
|
|
go func() {
|
|
|
|
tick := time.NewTicker(s.outgoingHeartbeatInterval)
|
|
|
|
defer tick.Stop()
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
2022-07-29 20:04:32 +00:00
|
|
|
case <-handleStreamCtx.Done():
|
2022-07-21 17:03:27 +00:00
|
|
|
return
|
|
|
|
|
|
|
|
case <-tick.C:
|
2022-07-29 20:04:32 +00:00
|
|
|
heartbeat := &pbpeerstream.ReplicationMessage{
|
|
|
|
Payload: &pbpeerstream.ReplicationMessage_Heartbeat_{
|
|
|
|
Heartbeat: &pbpeerstream.ReplicationMessage_Heartbeat{},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
if err := streamSend(heartbeat); err != nil {
|
|
|
|
logger.Warn("error sending heartbeat", "err", err)
|
|
|
|
}
|
2022-07-21 17:03:27 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
// incomingHeartbeatCtx will complete if incoming heartbeats time out.
|
|
|
|
incomingHeartbeatCtx, incomingHeartbeatCtxCancel :=
|
2022-08-26 20:49:03 +00:00
|
|
|
context.WithTimeout(context.Background(), s.incomingHeartbeatTimeout)
|
2022-07-21 17:03:27 +00:00
|
|
|
// NOTE: It's important that we wrap the call to cancel in a wrapper func because during the loop we're
|
|
|
|
// re-assigning the value of incomingHeartbeatCtxCancel and we want the defer to run on the last assigned
|
|
|
|
// value, not the current value.
|
|
|
|
defer func() {
|
|
|
|
incomingHeartbeatCtxCancel()
|
|
|
|
}()
|
|
|
|
|
2022-10-12 01:02:04 +00:00
|
|
|
// The nonce is used to correlate response/(ack|nack) pairs.
|
|
|
|
var nonce uint64
|
|
|
|
|
2022-07-29 20:04:32 +00:00
|
|
|
// The main loop that processes sends and receives.
|
2022-07-08 17:01:13 +00:00
|
|
|
for {
|
|
|
|
select {
|
|
|
|
// When the doneCh is closed that means that the peering was deleted locally.
|
|
|
|
case <-status.Done():
|
|
|
|
logger.Info("ending stream")
|
|
|
|
|
|
|
|
term := &pbpeerstream.ReplicationMessage{
|
|
|
|
Payload: &pbpeerstream.ReplicationMessage_Terminated_{
|
|
|
|
Terminated: &pbpeerstream.ReplicationMessage_Terminated{},
|
|
|
|
},
|
|
|
|
}
|
2022-07-20 22:48:18 +00:00
|
|
|
if err := streamSend(term); err != nil {
|
2022-07-21 17:03:27 +00:00
|
|
|
// Nolint directive needed due to bug in govet that doesn't see that the cancel
|
|
|
|
// func of the incomingHeartbeatTimer _does_ get called.
|
|
|
|
//nolint:govet
|
2022-07-08 17:01:13 +00:00
|
|
|
return fmt.Errorf("failed to send to stream: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
logger.Trace("deleting stream status")
|
2022-07-13 15:00:35 +00:00
|
|
|
s.Tracker.DeleteStatus(streamReq.LocalID)
|
2022-07-08 17:01:13 +00:00
|
|
|
|
|
|
|
return nil
|
|
|
|
|
2022-07-29 20:04:32 +00:00
|
|
|
// Handle errors received from the stream by shutting down our handler.
|
|
|
|
case err := <-recvErrCh:
|
|
|
|
if err == io.EOF {
|
|
|
|
// NOTE: We don't expect to receive an io.EOF error here when the stream is disconnected gracefully.
|
|
|
|
// When the peering is deleted locally, status.Done() returns which is handled elsewhere and this method
|
|
|
|
// exits. When we receive a Terminated message, that's also handled elsewhere and this method
|
|
|
|
// exits. After the method exits this code here won't receive any recv errors and those will be handled
|
|
|
|
// by DrainStream().
|
|
|
|
err = fmt.Errorf("stream ended unexpectedly")
|
2022-08-01 21:06:18 +00:00
|
|
|
} else {
|
|
|
|
err = fmt.Errorf("unexpected error receiving from the stream: %w", err)
|
2022-07-29 20:04:32 +00:00
|
|
|
}
|
|
|
|
status.TrackRecvError(err.Error())
|
|
|
|
return err
|
|
|
|
|
2022-07-21 17:03:27 +00:00
|
|
|
// We haven't received a heartbeat within the expected interval. Kill the stream.
|
|
|
|
case <-incomingHeartbeatCtx.Done():
|
|
|
|
return fmt.Errorf("heartbeat timeout")
|
|
|
|
|
2022-07-29 20:04:32 +00:00
|
|
|
case msg := <-recvCh:
|
2022-07-13 15:00:35 +00:00
|
|
|
// NOTE: this code should have similar error handling to the
|
|
|
|
// initial handling code in StreamResources()
|
|
|
|
|
2022-07-08 17:01:13 +00:00
|
|
|
if !s.Backend.IsLeader() {
|
2022-07-29 20:04:32 +00:00
|
|
|
// We are not the leader anymore, so we will hang up on the dialer.
|
|
|
|
logger.Info("node is not a leader anymore; cannot continue streaming")
|
2022-07-08 17:01:13 +00:00
|
|
|
|
|
|
|
st, err := grpcstatus.New(codes.FailedPrecondition,
|
|
|
|
"node is not a leader anymore; cannot continue streaming").WithDetails(
|
|
|
|
&pbpeerstream.LeaderAddress{Address: s.Backend.GetLeaderAddress()})
|
|
|
|
if err != nil {
|
|
|
|
logger.Error(fmt.Sprintf("failed to marshal the leader address in response; err: %v", err))
|
|
|
|
return grpcstatus.Error(codes.FailedPrecondition, "node is not a leader anymore; cannot continue streaming")
|
|
|
|
} else {
|
|
|
|
return st.Err()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if req := msg.GetRequest(); req != nil {
|
2022-07-13 15:00:35 +00:00
|
|
|
if !pbpeerstream.KnownTypeURL(req.ResourceURL) {
|
|
|
|
return grpcstatus.Errorf(codes.InvalidArgument, "subscription request to unknown resource URL: %s", req.ResourceURL)
|
|
|
|
}
|
2022-07-15 20:03:40 +00:00
|
|
|
|
|
|
|
// There are different formats of requests depending upon where in the stream lifecycle we are.
|
|
|
|
//
|
|
|
|
// 1. Initial Request: This is the first request being received
|
|
|
|
// FROM the establishing peer. This is handled specially in
|
|
|
|
// (*Server).StreamResources BEFORE calling
|
|
|
|
// (*Server).HandleStream. This takes care of determining what
|
2022-08-01 14:33:18 +00:00
|
|
|
// the PeerID is for the stream.
|
2022-07-15 20:03:40 +00:00
|
|
|
//
|
|
|
|
// 2. Subscription Request: This is the first request for a
|
|
|
|
// given ResourceURL within a stream. The Initial Request (1)
|
|
|
|
// is always one of these as well.
|
|
|
|
//
|
|
|
|
// These must contain a valid ResourceURL with no Error or
|
|
|
|
// ResponseNonce set.
|
|
|
|
//
|
|
|
|
// It is valid to subscribe to the same ResourceURL twice
|
|
|
|
// within the lifetime of a stream, but all duplicate
|
|
|
|
// subscriptions are treated as no-ops upon receipt.
|
|
|
|
//
|
|
|
|
// 3. ACK Request: This is the message sent in reaction to an
|
|
|
|
// earlier Response to indicate that the response was processed
|
|
|
|
// by the other side successfully.
|
|
|
|
//
|
|
|
|
// These must contain a ResponseNonce and no Error.
|
|
|
|
//
|
|
|
|
// 4. NACK Request: This is the message sent in reaction to an
|
|
|
|
// earlier Response to indicate that the response was NOT
|
|
|
|
// processed by the other side successfully.
|
|
|
|
//
|
|
|
|
// These must contain a ResponseNonce and an Error.
|
|
|
|
//
|
|
|
|
if !remoteSubTracker.IsSubscribed(req.ResourceURL) {
|
|
|
|
// This must be a new subscription request to add a new
|
|
|
|
// resource type, vet it like a new request.
|
|
|
|
|
|
|
|
if !streamReq.WasDialed() {
|
|
|
|
if req.PeerID != "" && req.PeerID != streamReq.RemoteID {
|
|
|
|
// Not necessary after the first request from the dialer,
|
|
|
|
// but if provided must match.
|
|
|
|
return grpcstatus.Errorf(codes.InvalidArgument,
|
|
|
|
"initial subscription requests for a resource type must have consistent PeerID values: got=%q expected=%q",
|
|
|
|
req.PeerID,
|
|
|
|
streamReq.RemoteID,
|
|
|
|
)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if req.ResponseNonce != "" {
|
|
|
|
return grpcstatus.Error(codes.InvalidArgument, "initial subscription requests for a resource type must not contain a nonce")
|
|
|
|
}
|
|
|
|
if req.Error != nil {
|
|
|
|
return grpcstatus.Error(codes.InvalidArgument, "initial subscription request for a resource type must not contain an error")
|
|
|
|
}
|
|
|
|
|
|
|
|
if remoteSubTracker.Subscribe(req.ResourceURL) {
|
|
|
|
logger.Info("subscribing to resource type", "resourceURL", req.ResourceURL)
|
|
|
|
}
|
|
|
|
status.TrackAck()
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// At this point we have a valid ResourceURL and we are subscribed to it.
|
|
|
|
|
2022-07-08 17:01:13 +00:00
|
|
|
switch {
|
2022-08-22 17:18:59 +00:00
|
|
|
case req.Error == nil: // ACK
|
2022-07-15 20:03:40 +00:00
|
|
|
// TODO(peering): handle ACK fully
|
|
|
|
status.TrackAck()
|
2022-07-08 17:01:13 +00:00
|
|
|
|
2022-08-22 17:18:59 +00:00
|
|
|
case req.Error != nil: // NACK
|
2022-07-15 20:03:40 +00:00
|
|
|
// TODO(peering): handle NACK fully
|
2022-07-08 17:01:13 +00:00
|
|
|
logger.Warn("client peer was unable to apply resource", "code", req.Error.Code, "error", req.Error.Message)
|
|
|
|
status.TrackNack(fmt.Sprintf("client peer was unable to apply resource: %s", req.Error.Message))
|
|
|
|
|
|
|
|
default:
|
2022-07-15 20:03:40 +00:00
|
|
|
// This branch might be dead code, but it could also happen
|
|
|
|
// during a stray 're-subscribe' so just ignore the
|
|
|
|
// message.
|
2022-07-08 17:01:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if resp := msg.GetResponse(); resp != nil {
|
2022-08-22 14:09:47 +00:00
|
|
|
reply, err := s.processResponse(streamReq.PeerName, streamReq.Partition, status, resp)
|
2022-07-08 17:01:13 +00:00
|
|
|
if err != nil {
|
|
|
|
logger.Error("failed to persist resource", "resourceURL", resp.ResourceURL, "resourceID", resp.ResourceID)
|
2022-07-25 23:08:03 +00:00
|
|
|
status.TrackRecvError(err.Error())
|
2022-07-08 17:01:13 +00:00
|
|
|
} else {
|
2022-07-25 23:08:03 +00:00
|
|
|
status.TrackRecvResourceSuccess()
|
2022-07-08 17:01:13 +00:00
|
|
|
}
|
|
|
|
|
2022-08-26 20:49:03 +00:00
|
|
|
// We are replying ACK or NACK depending on whether we successfully processed the response.
|
2022-07-20 22:48:18 +00:00
|
|
|
if err := streamSend(reply); err != nil {
|
2022-07-08 17:01:13 +00:00
|
|
|
return fmt.Errorf("failed to send to stream: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if term := msg.GetTerminated(); term != nil {
|
|
|
|
logger.Info("peering was deleted by our peer: marking peering as terminated and cleaning up imported resources")
|
|
|
|
|
|
|
|
// Once marked as terminated, a separate deferred deletion routine will clean up imported resources.
|
2022-07-13 15:00:35 +00:00
|
|
|
if err := s.Backend.PeeringTerminateByID(&pbpeering.PeeringTerminateByIDRequest{ID: streamReq.LocalID}); err != nil {
|
2022-07-08 17:01:13 +00:00
|
|
|
logger.Error("failed to mark peering as terminated: %w", err)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-07-21 17:03:27 +00:00
|
|
|
if msg.GetHeartbeat() != nil {
|
2022-07-25 23:08:03 +00:00
|
|
|
status.TrackRecvHeartbeat()
|
2022-07-25 21:27:53 +00:00
|
|
|
|
2022-07-21 17:03:27 +00:00
|
|
|
// Reset the heartbeat timeout by creating a new context.
|
|
|
|
// We first must cancel the old context so there's no leaks. This is safe to do because we're only
|
|
|
|
// reading that context within this for{} loop, and so we won't accidentally trigger the heartbeat
|
|
|
|
// timeout.
|
|
|
|
incomingHeartbeatCtxCancel()
|
|
|
|
// NOTE: IDEs and govet think that the reassigned cancel below never gets
|
|
|
|
// called, but it does by the defer when the heartbeat ctx is first created.
|
|
|
|
// They just can't trace the execution properly for some reason (possibly golang/go#29587).
|
|
|
|
//nolint:govet
|
|
|
|
incomingHeartbeatCtx, incomingHeartbeatCtxCancel =
|
2022-08-26 20:49:03 +00:00
|
|
|
context.WithTimeout(context.Background(), s.incomingHeartbeatTimeout)
|
2022-07-21 17:03:27 +00:00
|
|
|
}
|
|
|
|
|
2022-07-08 17:01:13 +00:00
|
|
|
case update := <-subCh:
|
2022-07-13 15:00:35 +00:00
|
|
|
var resp *pbpeerstream.ReplicationMessage_Response
|
2022-07-08 17:01:13 +00:00
|
|
|
switch {
|
2022-09-29 19:37:19 +00:00
|
|
|
case strings.HasPrefix(update.CorrelationID, subExportedServiceList):
|
|
|
|
resp, err = makeExportedServiceListResponse(status, update)
|
|
|
|
if err != nil {
|
|
|
|
// Log the error and skip this response to avoid locking up peering due to a bad update event.
|
|
|
|
logger.Error("failed to create exported service list response", "error", err)
|
|
|
|
continue
|
|
|
|
}
|
2022-07-08 17:01:13 +00:00
|
|
|
case strings.HasPrefix(update.CorrelationID, subExportedService):
|
2022-08-22 14:09:47 +00:00
|
|
|
resp, err = makeServiceResponse(status, update)
|
2022-07-13 15:00:35 +00:00
|
|
|
if err != nil {
|
|
|
|
// Log the error and skip this response to avoid locking up peering due to a bad update event.
|
|
|
|
logger.Error("failed to create service response", "error", err)
|
|
|
|
continue
|
|
|
|
}
|
2022-07-08 17:01:13 +00:00
|
|
|
|
|
|
|
case update.CorrelationID == subCARoot:
|
2022-08-22 14:09:47 +00:00
|
|
|
resp, err = makeCARootsResponse(update)
|
2022-07-13 15:00:35 +00:00
|
|
|
if err != nil {
|
|
|
|
// Log the error and skip this response to avoid locking up peering due to a bad update event.
|
|
|
|
logger.Error("failed to create ca roots response", "error", err)
|
|
|
|
continue
|
|
|
|
}
|
2022-07-08 17:01:13 +00:00
|
|
|
|
2022-08-22 14:21:20 +00:00
|
|
|
case update.CorrelationID == subServerAddrs:
|
|
|
|
resp, err = makeServerAddrsResponse(update)
|
|
|
|
if err != nil {
|
|
|
|
logger.Error("failed to create server address response", "error", err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2022-07-08 17:01:13 +00:00
|
|
|
default:
|
|
|
|
logger.Warn("unrecognized update type from subscription manager: " + update.CorrelationID)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if resp == nil {
|
|
|
|
continue
|
|
|
|
}
|
2022-07-13 15:00:35 +00:00
|
|
|
|
2022-10-12 01:02:04 +00:00
|
|
|
// Assign a new unique nonce to the response.
|
|
|
|
nonce++
|
|
|
|
resp.Nonce = fmt.Sprintf("%08x", nonce)
|
|
|
|
|
2022-07-13 15:00:35 +00:00
|
|
|
replResp := makeReplicationResponse(resp)
|
2022-07-20 22:48:18 +00:00
|
|
|
if err := streamSend(replResp); err != nil {
|
2022-08-22 14:21:20 +00:00
|
|
|
// note: govet warns of context leak but it is cleaned up in a defer
|
2022-07-08 17:01:13 +00:00
|
|
|
return fmt.Errorf("failed to push data for %q: %w", update.CorrelationID, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func getTrustDomain(store StateStore, logger hclog.Logger) (string, error) {
|
|
|
|
_, cfg, err := store.CAConfig(nil)
|
|
|
|
switch {
|
|
|
|
case err != nil:
|
|
|
|
logger.Error("failed to read Connect CA Config", "error", err)
|
|
|
|
return "", grpcstatus.Error(codes.Internal, "failed to read Connect CA Config")
|
|
|
|
case cfg == nil:
|
|
|
|
logger.Warn("cannot begin stream because Connect CA is not yet initialized")
|
2022-10-10 19:56:38 +00:00
|
|
|
return "", grpcstatus.Error(codes.Unavailable, "Connect CA is not yet initialized")
|
2022-07-08 17:01:13 +00:00
|
|
|
}
|
|
|
|
return connect.SpiffeIDSigningForCluster(cfg.ClusterID).Host(), nil
|
|
|
|
}
|
|
|
|
|
2022-07-22 19:05:08 +00:00
|
|
|
func (s *Server) StreamStatus(peerID string) (resp Status, found bool) {
|
|
|
|
return s.Tracker.StreamStatus(peerID)
|
2022-07-08 17:01:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// ConnectedStreams returns a map of connected stream IDs to the corresponding channel for tearing them down.
|
|
|
|
func (s *Server) ConnectedStreams() map[string]chan struct{} {
|
|
|
|
return s.Tracker.ConnectedStreams()
|
|
|
|
}
|
|
|
|
|
|
|
|
func logTraceRecv(logger hclog.Logger, pb proto.Message) {
|
|
|
|
logTraceProto(logger, pb, true)
|
|
|
|
}
|
|
|
|
|
|
|
|
func logTraceSend(logger hclog.Logger, pb proto.Message) {
|
|
|
|
logTraceProto(logger, pb, false)
|
|
|
|
}
|
|
|
|
|
|
|
|
func logTraceProto(logger hclog.Logger, pb proto.Message, received bool) {
|
|
|
|
if !logger.IsTrace() {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
dir := "sent"
|
|
|
|
if received {
|
|
|
|
dir = "received"
|
|
|
|
}
|
|
|
|
|
2022-08-01 21:06:18 +00:00
|
|
|
// Redact the long-lived stream secret to avoid leaking it in trace logs.
|
|
|
|
pbToLog := pb
|
|
|
|
switch msg := pb.(type) {
|
|
|
|
case *pbpeerstream.ReplicationMessage:
|
|
|
|
clone := &pbpeerstream.ReplicationMessage{}
|
|
|
|
proto.Merge(clone, msg)
|
|
|
|
|
|
|
|
if clone.GetOpen() != nil {
|
|
|
|
clone.GetOpen().StreamSecretID = "hidden"
|
|
|
|
pbToLog = clone
|
|
|
|
}
|
|
|
|
case *pbpeerstream.ReplicationMessage_Open:
|
|
|
|
clone := &pbpeerstream.ReplicationMessage_Open{}
|
|
|
|
proto.Merge(clone, msg)
|
|
|
|
|
|
|
|
clone.StreamSecretID = "hidden"
|
|
|
|
pbToLog = clone
|
|
|
|
}
|
|
|
|
|
2022-07-08 17:01:13 +00:00
|
|
|
m := jsonpb.Marshaler{
|
|
|
|
Indent: " ",
|
|
|
|
}
|
2022-08-01 21:06:18 +00:00
|
|
|
out, err := m.MarshalToString(pbToLog)
|
2022-07-08 17:01:13 +00:00
|
|
|
if err != nil {
|
|
|
|
out = "<ERROR: " + err.Error() + ">"
|
|
|
|
}
|
|
|
|
|
|
|
|
logger.Trace("replication message", "direction", dir, "protobuf", out)
|
|
|
|
}
|
2022-07-15 20:03:40 +00:00
|
|
|
|
|
|
|
// resourceSubscriptionTracker is used to keep track of the ResourceURLs that a
|
|
|
|
// stream has subscribed to and can notify you when a subscription comes in by
|
|
|
|
// closing the channels returned by SubscribedChan.
|
|
|
|
type resourceSubscriptionTracker struct {
|
|
|
|
// notifierMap keeps track of a notification channel for each resourceURL.
|
|
|
|
// Keys may exist in here even when they do not exist in 'subscribed' as
|
|
|
|
// calling SubscribedChan has to possibly create and and hand out a
|
|
|
|
// notification channel in advance of any notification.
|
|
|
|
notifierMap map[string]chan struct{}
|
|
|
|
|
|
|
|
// subscribed is a set that keeps track of resourceURLs that are currently
|
|
|
|
// subscribed to. Keys are never deleted. If a key is present in this map
|
|
|
|
// it is also present in 'notifierMap'.
|
|
|
|
subscribed map[string]struct{}
|
|
|
|
}
|
|
|
|
|
|
|
|
func newResourceSubscriptionTracker() *resourceSubscriptionTracker {
|
|
|
|
return &resourceSubscriptionTracker{
|
|
|
|
subscribed: make(map[string]struct{}),
|
|
|
|
notifierMap: make(map[string]chan struct{}),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// IsSubscribed returns true if the given ResourceURL has an active subscription.
|
|
|
|
func (t *resourceSubscriptionTracker) IsSubscribed(resourceURL string) bool {
|
|
|
|
_, ok := t.subscribed[resourceURL]
|
|
|
|
return ok
|
|
|
|
}
|
|
|
|
|
|
|
|
// Subscribe subscribes to the given ResourceURL. It will return true if this
|
|
|
|
// was the FIRST time a subscription occurred. It will also close the
|
|
|
|
// notification channel associated with this ResourceURL.
|
|
|
|
func (t *resourceSubscriptionTracker) Subscribe(resourceURL string) bool {
|
|
|
|
if _, ok := t.subscribed[resourceURL]; ok {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
t.subscribed[resourceURL] = struct{}{}
|
|
|
|
|
|
|
|
// and notify
|
|
|
|
ch := t.ensureNotifierChan(resourceURL)
|
|
|
|
close(ch)
|
|
|
|
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
// SubscribedChan returns a channel that will be closed when the ResourceURL is
|
|
|
|
// subscribed using the Subscribe method.
|
|
|
|
func (t *resourceSubscriptionTracker) SubscribedChan(resourceURL string) <-chan struct{} {
|
|
|
|
return t.ensureNotifierChan(resourceURL)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (t *resourceSubscriptionTracker) ensureNotifierChan(resourceURL string) chan struct{} {
|
|
|
|
if ch, ok := t.notifierMap[resourceURL]; ok {
|
|
|
|
return ch
|
|
|
|
}
|
|
|
|
ch := make(chan struct{})
|
|
|
|
t.notifierMap[resourceURL] = ch
|
|
|
|
return ch
|
|
|
|
}
|