Merge branch 'main' into dev-portal

This commit is contained in:
Tu Nguyen 2022-08-31 11:21:14 -07:00 committed by GitHub
commit 84d09cc2b9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
97 changed files with 3368 additions and 1142 deletions

3
.changelog/11742.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:improvement
api: Add filtering support to Catalog's List Services (v1/catalog/services)
```

3
.changelog/14364.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bugfix
peering: Fix issue preventing deletion and recreation of peerings in TERMINATED state.
```

3
.changelog/14373.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:improvement
xds: Set `max_ejection_percent` on Envoy's outlier detection to 100% for peered services.
```

5
.changelog/14378.txt Normal file
View File

@ -0,0 +1,5 @@
```release-note:bug
api: Fix a breaking change caused by renaming `QueryDatacenterOptions` to
`QueryFailoverOptions`. This adds `QueryDatacenterOptions` back as an alias to
`QueryFailoverOptions` and marks it as deprecated.
```

3
.changelog/14396.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:feature
peering: Add support to failover to services running on cluster peers.
```

3
.changelog/_2271.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:improvement
snapshot agent: **(Enterprise only)** Add support for path-based addressing when using s3 backend.
```

View File

@ -816,7 +816,7 @@ jobs:
# Get go binary from workspace
- attach_workspace:
at: .
# Build the consul-dev image from the already built binary
# Build the consul:local image from the already built binary
- run:
command: |
sudo rm -rf /usr/local/go
@ -887,8 +887,8 @@ jobs:
- attach_workspace:
at: .
- run: *install-gotestsum
# Build the consul-dev image from the already built binary
- run: docker build -t consul-dev -f ./build-support/docker/Consul-Dev.dockerfile .
# Build the consul:local image from the already built binary
- run: docker build -t consul:local -f ./build-support/docker/Consul-Dev.dockerfile .
- run:
name: Envoy Integration Tests
command: |
@ -902,6 +902,7 @@ jobs:
GOTESTSUM_JUNITFILE: /tmp/test-results/results.xml
GOTESTSUM_FORMAT: standard-verbose
COMPOSE_INTERACTIVE_NO_CLI: 1
LAMBDA_TESTS_ENABLED: "true"
# tput complains if this isn't set to something.
TERM: ansi
- store_artifacts:

View File

@ -16,7 +16,7 @@ jobs:
backport:
if: github.event.pull_request.merged
runs-on: ubuntu-latest
container: hashicorpdev/backport-assistant:0.2.3
container: hashicorpdev/backport-assistant:0.2.5
steps:
- name: Run Backport Assistant for stable-website
run: |
@ -24,6 +24,7 @@ jobs:
env:
BACKPORT_LABEL_REGEXP: "type/docs-(?P<target>cherrypick)"
BACKPORT_TARGET_TEMPLATE: "stable-website"
BACKPORT_MERGE_COMMIT: true
GITHUB_TOKEN: ${{ secrets.ELEVATED_GITHUB_TOKEN }}
- name: Backport changes to latest release branch
run: |

View File

@ -16,6 +16,7 @@ PROTOC_GO_INJECT_TAG_VERSION='v1.3.0'
GOTAGS ?=
GOPATH=$(shell go env GOPATH)
GOARCH?=$(shell go env GOARCH)
MAIN_GOPATH=$(shell go env GOPATH | cut -d: -f1)
export PATH := $(PWD)/bin:$(GOPATH)/bin:$(PATH)
@ -129,7 +130,7 @@ export GOLDFLAGS
# Allow skipping docker build during integration tests in CI since we already
# have a built binary
ENVOY_INTEG_DEPS?=dev-docker
ENVOY_INTEG_DEPS?=docker-envoy-integ
ifdef SKIP_DOCKER_BUILD
ENVOY_INTEG_DEPS=noop
endif
@ -152,7 +153,28 @@ dev-docker: linux
@docker pull consul:$(CONSUL_IMAGE_VERSION) >/dev/null
@echo "Building Consul Development container - $(CONSUL_DEV_IMAGE)"
# 'consul:local' tag is needed to run the integration tests
@DOCKER_DEFAULT_PLATFORM=linux/amd64 docker build $(NOCACHE) $(QUIET) -t '$(CONSUL_DEV_IMAGE)' -t 'consul:local' --build-arg CONSUL_IMAGE_VERSION=$(CONSUL_IMAGE_VERSION) $(CURDIR)/pkg/bin/linux_amd64 -f $(CURDIR)/build-support/docker/Consul-Dev.dockerfile
@docker buildx use default && docker buildx build -t 'consul:local' \
--platform linux/$(GOARCH) \
--build-arg CONSUL_IMAGE_VERSION=$(CONSUL_IMAGE_VERSION) \
--load \
-f $(CURDIR)/build-support/docker/Consul-Dev-Multiarch.dockerfile $(CURDIR)/pkg/bin/
check-remote-dev-image-env:
ifndef REMOTE_DEV_IMAGE
$(error REMOTE_DEV_IMAGE is undefined: set this image to <your_docker_repo>/<your_docker_image>:<image_tag>, e.g. hashicorp/consul-k8s-dev:latest)
endif
remote-docker: check-remote-dev-image-env
$(MAKE) GOARCH=amd64 linux
$(MAKE) GOARCH=arm64 linux
@echo "Pulling consul container image - $(CONSUL_IMAGE_VERSION)"
@docker pull consul:$(CONSUL_IMAGE_VERSION) >/dev/null
@echo "Building and Pushing Consul Development container - $(REMOTE_DEV_IMAGE)"
@docker buildx use default && docker buildx build -t '$(REMOTE_DEV_IMAGE)' \
--platform linux/amd64,linux/arm64 \
--build-arg CONSUL_IMAGE_VERSION=$(CONSUL_IMAGE_VERSION) \
--push \
-f $(CURDIR)/build-support/docker/Consul-Dev-Multiarch.dockerfile $(CURDIR)/pkg/bin/
# In CircleCI, the linux binary will be attached from a previous step at bin/. This make target
# should only run in CI and not locally.
@ -174,10 +196,10 @@ ifeq ($(CIRCLE_BRANCH), main)
@docker push $(CI_DEV_DOCKER_NAMESPACE)/$(CI_DEV_DOCKER_IMAGE_NAME):latest
endif
# linux builds a linux binary independent of the source platform
# linux builds a linux binary compatible with the source platform
linux:
@mkdir -p ./pkg/bin/linux_amd64
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o ./pkg/bin/linux_amd64 -ldflags "$(GOLDFLAGS)" -tags "$(GOTAGS)"
@mkdir -p ./pkg/bin/linux_$(GOARCH)
CGO_ENABLED=0 GOOS=linux GOARCH=$(GOARCH) go build -o ./pkg/bin/linux_$(GOARCH) -ldflags "$(GOLDFLAGS)" -tags "$(GOTAGS)"
# dist builds binaries for all platforms and packages them for distribution
dist:
@ -324,8 +346,22 @@ consul-docker: go-build-image
ui-docker: ui-build-image
@$(SHELL) $(CURDIR)/build-support/scripts/build-docker.sh ui
# Build image used to run integration tests locally.
docker-envoy-integ:
$(MAKE) GOARCH=amd64 linux
docker build \
--platform linux/amd64 $(NOCACHE) $(QUIET) \
-t 'consul:local' \
--build-arg CONSUL_IMAGE_VERSION=$(CONSUL_IMAGE_VERSION) \
$(CURDIR)/pkg/bin/linux_amd64 \
-f $(CURDIR)/build-support/docker/Consul-Dev.dockerfile
# Run integration tests.
# Use GO_TEST_FLAGS to run specific tests:
# make test-envoy-integ GO_TEST_FLAGS="-run TestEnvoy/case-basic"
# NOTE: Always uses amd64 images, even when running on M1 macs, to match CI/CD environment.
test-envoy-integ: $(ENVOY_INTEG_DEPS)
@go test -v -timeout=30m -tags integration ./test/integration/connect/envoy
@go test -v -timeout=30m -tags integration $(GO_TEST_FLAGS) ./test/integration/connect/envoy
.PHONY: test-compat-integ
test-compat-integ: dev-docker

View File

@ -565,6 +565,11 @@ func (c *Catalog) ListServices(args *structs.DCSpecificRequest, reply *structs.I
return err
}
filter, err := bexpr.CreateFilter(args.Filter, nil, []*structs.ServiceNode{})
if err != nil {
return err
}
// Set reply enterprise metadata after resolving and validating the token so
// that we can properly infer metadata from the token.
reply.EnterpriseMeta = args.EnterpriseMeta
@ -574,10 +579,11 @@ func (c *Catalog) ListServices(args *structs.DCSpecificRequest, reply *structs.I
&reply.QueryMeta,
func(ws memdb.WatchSet, state *state.Store) error {
var err error
var serviceNodes structs.ServiceNodes
if len(args.NodeMetaFilters) > 0 {
reply.Index, reply.Services, err = state.ServicesByNodeMeta(ws, args.NodeMetaFilters, &args.EnterpriseMeta, args.PeerName)
reply.Index, serviceNodes, err = state.ServicesByNodeMeta(ws, args.NodeMetaFilters, &args.EnterpriseMeta, args.PeerName)
} else {
reply.Index, reply.Services, err = state.Services(ws, &args.EnterpriseMeta, args.PeerName)
reply.Index, serviceNodes, err = state.Services(ws, &args.EnterpriseMeta, args.PeerName)
}
if err != nil {
return err
@ -588,11 +594,43 @@ func (c *Catalog) ListServices(args *structs.DCSpecificRequest, reply *structs.I
return nil
}
raw, err := filter.Execute(serviceNodes)
if err != nil {
return err
}
reply.Services = servicesTagsByName(raw.(structs.ServiceNodes))
c.srv.filterACLWithAuthorizer(authz, reply)
return nil
})
}
func servicesTagsByName(services []*structs.ServiceNode) structs.Services {
unique := make(map[string]map[string]struct{})
for _, svc := range services {
tags, ok := unique[svc.ServiceName]
if !ok {
unique[svc.ServiceName] = make(map[string]struct{})
tags = unique[svc.ServiceName]
}
for _, tag := range svc.ServiceTags {
tags[tag] = struct{}{}
}
}
// Generate the output structure.
var results = make(structs.Services)
for service, tags := range unique {
results[service] = make([]string, 0, len(tags))
for tag := range tags {
results[service] = append(results[service], tag)
}
}
return results
}
// ServiceList is used to query the services in a DC.
// Returns services as a list of ServiceNames.
func (c *Catalog) ServiceList(args *structs.DCSpecificRequest, reply *structs.IndexedServiceList) error {

View File

@ -1523,6 +1523,45 @@ func TestCatalog_ListServices_NodeMetaFilter(t *testing.T) {
}
}
func TestCatalog_ListServices_Filter(t *testing.T) {
t.Parallel()
_, s1 := testServer(t)
codec := rpcClient(t, s1)
testrpc.WaitForTestAgent(t, s1.RPC, "dc1")
// prep the cluster with some data we can use in our filters
registerTestCatalogEntries(t, codec)
// Run the tests against the test server
t.Run("ListServices", func(t *testing.T) {
args := structs.DCSpecificRequest{
Datacenter: "dc1",
}
args.Filter = "ServiceName == redis"
out := new(structs.IndexedServices)
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.ListServices", &args, out))
require.Contains(t, out.Services, "redis")
require.ElementsMatch(t, []string{"v1", "v2"}, out.Services["redis"])
args.Filter = "NodeMeta.os == NoSuchOS"
out = new(structs.IndexedServices)
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.ListServices", &args, out))
require.Len(t, out.Services, 0)
args.Filter = "NodeMeta.NoSuchMetadata == linux"
out = new(structs.IndexedServices)
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.ListServices", &args, out))
require.Len(t, out.Services, 0)
args.Filter = "InvalidField == linux"
out = new(structs.IndexedServices)
require.Error(t, msgpackrpc.CallWithCodec(codec, "Catalog.ListServices", &args, out))
})
}
func TestCatalog_ListServices_Blocking(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")

View File

@ -39,6 +39,7 @@ func TestCompile(t *testing.T) {
"service redirect": testcase_ServiceRedirect(),
"service and subset redirect": testcase_ServiceAndSubsetRedirect(),
"datacenter redirect": testcase_DatacenterRedirect(),
"redirect to cluster peer": testcase_PeerRedirect(),
"datacenter redirect with mesh gateways": testcase_DatacenterRedirect_WithMeshGateways(),
"service failover": testcase_ServiceFailover(),
"service failover through redirect": testcase_ServiceFailoverThroughRedirect(),
@ -1084,6 +1085,47 @@ func testcase_DatacenterRedirect() compileTestCase {
return compileTestCase{entries: entries, expect: expect}
}
func testcase_PeerRedirect() compileTestCase {
entries := newEntries()
entries.AddResolvers(
&structs.ServiceResolverConfigEntry{
Kind: "service-resolver",
Name: "main",
Redirect: &structs.ServiceResolverRedirect{
Service: "other",
Peer: "cluster-01",
},
},
)
expect := &structs.CompiledDiscoveryChain{
Protocol: "tcp",
StartNode: "resolver:other.default.default.external.cluster-01",
Nodes: map[string]*structs.DiscoveryGraphNode{
"resolver:other.default.default.external.cluster-01": {
Type: structs.DiscoveryGraphNodeTypeResolver,
Name: "other.default.default.external.cluster-01",
Resolver: &structs.DiscoveryResolver{
Default: true,
ConnectTimeout: 5 * time.Second,
Target: "other.default.default.external.cluster-01",
},
},
},
Targets: map[string]*structs.DiscoveryTarget{
"other.default.default.external.cluster-01": newTarget(structs.DiscoveryTargetOpts{
Service: "other",
Peer: "cluster-01",
}, func(t *structs.DiscoveryTarget) {
t.SNI = ""
t.Name = ""
t.Datacenter = ""
}),
},
}
return compileTestCase{entries: entries, expect: expect}
}
func testcase_DatacenterRedirect_WithMeshGateways() compileTestCase {
entries := newEntries()
entries.AddProxyDefaults(&structs.ProxyConfigEntry{

View File

@ -112,7 +112,7 @@ func (s *Server) emitPeeringMetricsOnce(logger hclog.Logger, metricsImpl *metric
if status.NeverConnected {
metricsImpl.SetGaugeWithLabels(leaderHealthyPeeringKey, float32(math.NaN()), labels)
} else {
healthy := status.IsHealthy()
healthy := s.peerStreamServer.Tracker.IsHealthy(status)
healthyInt := 0
if healthy {
healthyInt = 1
@ -295,13 +295,6 @@ func (s *Server) establishStream(ctx context.Context, logger hclog.Logger, ws me
return fmt.Errorf("failed to build TLS dial option from peering: %w", err)
}
// Create a ring buffer to cycle through peer addresses in the retry loop below.
buffer := ring.New(len(peer.PeerServerAddresses))
for _, addr := range peer.PeerServerAddresses {
buffer.Value = addr
buffer = buffer.Next()
}
secret, err := s.fsm.State().PeeringSecretsRead(ws, peer.ID)
if err != nil {
return fmt.Errorf("failed to read secret for peering: %w", err)
@ -312,27 +305,26 @@ func (s *Server) establishStream(ctx context.Context, logger hclog.Logger, ws me
logger.Trace("establishing stream to peer")
retryCtx, cancel := context.WithCancel(ctx)
cancelFns[peer.ID] = cancel
streamStatus, err := s.peerStreamTracker.Register(peer.ID)
streamStatus, err := s.peerStreamServer.Tracker.Register(peer.ID)
if err != nil {
return fmt.Errorf("failed to register stream: %v", err)
}
streamCtx, cancel := context.WithCancel(ctx)
cancelFns[peer.ID] = cancel
// Start a goroutine to watch for updates to peer server addresses.
// The latest valid server address can be received from nextServerAddr.
nextServerAddr := make(chan string)
go s.watchPeerServerAddrs(streamCtx, peer, nextServerAddr)
// Establish a stream-specific retry so that retrying stream/conn errors isn't dependent on state store changes.
go retryLoopBackoffPeering(retryCtx, logger, func() error {
go retryLoopBackoffPeering(streamCtx, logger, func() error {
// Try a new address on each iteration by advancing the ring buffer on errors.
defer func() {
buffer = buffer.Next()
}()
addr, ok := buffer.Value.(string)
if !ok {
return fmt.Errorf("peer server address type %T is not a string", buffer.Value)
}
addr := <-nextServerAddr
logger.Trace("dialing peer", "addr", addr)
conn, err := grpc.DialContext(retryCtx, addr,
conn, err := grpc.DialContext(streamCtx, addr,
// TODO(peering): use a grpc.WithStatsHandler here?)
tlsOption,
// For keep alive parameters there is a larger comment in ClientConnPool.dial about that.
@ -349,7 +341,7 @@ func (s *Server) establishStream(ctx context.Context, logger hclog.Logger, ws me
defer conn.Close()
client := pbpeerstream.NewPeerStreamServiceClient(conn)
stream, err := client.StreamResources(retryCtx)
stream, err := client.StreamResources(streamCtx)
if err != nil {
return err
}
@ -397,6 +389,74 @@ func (s *Server) establishStream(ctx context.Context, logger hclog.Logger, ws me
return nil
}
// watchPeerServerAddrs sends an up-to-date peer server address to nextServerAddr.
// It loads the server addresses into a ring buffer and cycles through them until:
// 1. streamCtx is cancelled (peer is deleted)
// 2. the peer is modified and the watchset fires.
//
// In case (2) we refetch the peering and rebuild the ring buffer.
func (s *Server) watchPeerServerAddrs(ctx context.Context, peer *pbpeering.Peering, nextServerAddr chan<- string) {
defer close(nextServerAddr)
// we initialize the ring buffer with the peer passed to `establishStream`
// because the caller has pre-checked `peer.ShouldDial`, guaranteeing
// at least one server address.
//
// IMPORTANT: ringbuf must always be length > 0 or else `<-nextServerAddr` may block.
ringbuf := ring.New(len(peer.PeerServerAddresses))
for _, addr := range peer.PeerServerAddresses {
ringbuf.Value = addr
ringbuf = ringbuf.Next()
}
innerWs := memdb.NewWatchSet()
_, _, err := s.fsm.State().PeeringReadByID(innerWs, peer.ID)
if err != nil {
s.logger.Warn("failed to watch for changes to peer; server addresses may become stale over time.",
"peer_id", peer.ID,
"error", err)
}
fetchAddrs := func() error {
// reinstantiate innerWs to prevent it from growing indefinitely
innerWs = memdb.NewWatchSet()
_, peering, err := s.fsm.State().PeeringReadByID(innerWs, peer.ID)
if err != nil {
return fmt.Errorf("failed to fetch peer %q: %w", peer.ID, err)
}
if !peering.IsActive() {
return fmt.Errorf("peer %q is no longer active", peer.ID)
}
if len(peering.PeerServerAddresses) == 0 {
return fmt.Errorf("peer %q has no addresses to dial", peer.ID)
}
ringbuf = ring.New(len(peering.PeerServerAddresses))
for _, addr := range peering.PeerServerAddresses {
ringbuf.Value = addr
ringbuf = ringbuf.Next()
}
return nil
}
for {
select {
case nextServerAddr <- ringbuf.Value.(string):
ringbuf = ringbuf.Next()
case err := <-innerWs.WatchCh(ctx):
if err != nil {
// context was cancelled
return
}
// watch fired so we refetch the peering and rebuild the ring buffer
if err := fetchAddrs(); err != nil {
s.logger.Warn("watchset for peer was fired but failed to update server addresses",
"peer_id", peer.ID,
"error", err)
}
}
}
}
func (s *Server) startPeeringDeferredDeletion(ctx context.Context) {
s.leaderRoutineManager.Start(ctx, peeringDeletionRoutineName, s.runPeeringDeletions)
}

View File

@ -18,6 +18,7 @@ import (
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
grpcstatus "google.golang.org/grpc/status"
"google.golang.org/protobuf/proto"
"github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/consul/state"
@ -25,6 +26,7 @@ import (
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/proto/pbpeering"
"github.com/hashicorp/consul/sdk/freeport"
"github.com/hashicorp/consul/sdk/testutil"
"github.com/hashicorp/consul/sdk/testutil/retry"
"github.com/hashicorp/consul/testrpc"
"github.com/hashicorp/consul/types"
@ -38,6 +40,7 @@ func TestLeader_PeeringSync_Lifecycle_ClientDeletion(t *testing.T) {
testLeader_PeeringSync_Lifecycle_ClientDeletion(t, true)
})
}
func testLeader_PeeringSync_Lifecycle_ClientDeletion(t *testing.T, enableTLS bool) {
if testing.Short() {
t.Skip("too slow for testing.Short")
@ -135,9 +138,11 @@ func testLeader_PeeringSync_Lifecycle_ClientDeletion(t *testing.T, enableTLS boo
// Delete the peering to trigger the termination sequence.
deleted := &pbpeering.Peering{
ID: p.Peering.ID,
Name: "my-peer-acceptor",
DeletedAt: structs.TimeToProto(time.Now()),
ID: p.Peering.ID,
Name: "my-peer-acceptor",
State: pbpeering.PeeringState_DELETING,
PeerServerAddresses: p.Peering.PeerServerAddresses,
DeletedAt: structs.TimeToProto(time.Now()),
}
require.NoError(t, dialer.fsm.State().PeeringWrite(2000, &pbpeering.PeeringWriteRequest{Peering: deleted}))
dialer.logger.Trace("deleted peering for my-peer-acceptor")
@ -260,6 +265,7 @@ func testLeader_PeeringSync_Lifecycle_AcceptorDeletion(t *testing.T, enableTLS b
deleted := &pbpeering.Peering{
ID: p.Peering.PeerID,
Name: "my-peer-dialer",
State: pbpeering.PeeringState_DELETING,
DeletedAt: structs.TimeToProto(time.Now()),
}
@ -429,6 +435,7 @@ func TestLeader_Peering_DeferredDeletion(t *testing.T) {
Peering: &pbpeering.Peering{
ID: peerID,
Name: peerName,
State: pbpeering.PeeringState_DELETING,
DeletedAt: structs.TimeToProto(time.Now()),
},
}))
@ -1163,6 +1170,7 @@ func TestLeader_Peering_NoDeletionWhenPeeringDisabled(t *testing.T) {
Peering: &pbpeering.Peering{
ID: peerID,
Name: peerName,
State: pbpeering.PeeringState_DELETING,
DeletedAt: structs.TimeToProto(time.Now()),
},
}))
@ -1214,7 +1222,7 @@ func TestLeader_Peering_NoEstablishmentWhenPeeringDisabled(t *testing.T) {
}))
require.Never(t, func() bool {
_, found := s1.peerStreamTracker.StreamStatus(peerID)
_, found := s1.peerStreamServer.StreamStatus(peerID)
return found
}, 7*time.Second, 1*time.Second, "peering should not have been established")
}
@ -1375,3 +1383,138 @@ func Test_isFailedPreconditionErr(t *testing.T) {
werr := fmt.Errorf("wrapped: %w", err)
assert.True(t, isFailedPreconditionErr(werr))
}
func Test_Leader_PeeringSync_ServerAddressUpdates(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")
}
// We want 1s retries for this test
orig := maxRetryBackoff
maxRetryBackoff = 1
t.Cleanup(func() { maxRetryBackoff = orig })
_, acceptor := testServerWithConfig(t, func(c *Config) {
c.NodeName = "acceptor"
c.Datacenter = "dc1"
c.TLSConfig.Domain = "consul"
})
testrpc.WaitForLeader(t, acceptor.RPC, "dc1")
// Create a peering by generating a token
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
t.Cleanup(cancel)
conn, err := grpc.DialContext(ctx, acceptor.config.RPCAddr.String(),
grpc.WithContextDialer(newServerDialer(acceptor.config.RPCAddr.String())),
grpc.WithInsecure(),
grpc.WithBlock())
require.NoError(t, err)
defer conn.Close()
acceptorClient := pbpeering.NewPeeringServiceClient(conn)
req := pbpeering.GenerateTokenRequest{
PeerName: "my-peer-dialer",
}
resp, err := acceptorClient.GenerateToken(ctx, &req)
require.NoError(t, err)
// Bring up dialer and establish a peering with acceptor's token so that it attempts to dial.
_, dialer := testServerWithConfig(t, func(c *Config) {
c.NodeName = "dialer"
c.Datacenter = "dc2"
c.PrimaryDatacenter = "dc2"
})
testrpc.WaitForLeader(t, dialer.RPC, "dc2")
// Create a peering at dialer by establishing a peering with acceptor's token
ctx, cancel = context.WithTimeout(context.Background(), 3*time.Second)
t.Cleanup(cancel)
conn, err = grpc.DialContext(ctx, dialer.config.RPCAddr.String(),
grpc.WithContextDialer(newServerDialer(dialer.config.RPCAddr.String())),
grpc.WithInsecure(),
grpc.WithBlock())
require.NoError(t, err)
defer conn.Close()
dialerClient := pbpeering.NewPeeringServiceClient(conn)
establishReq := pbpeering.EstablishRequest{
PeerName: "my-peer-acceptor",
PeeringToken: resp.PeeringToken,
}
_, err = dialerClient.Establish(ctx, &establishReq)
require.NoError(t, err)
p, err := dialerClient.PeeringRead(ctx, &pbpeering.PeeringReadRequest{Name: "my-peer-acceptor"})
require.NoError(t, err)
retry.Run(t, func(r *retry.R) {
status, found := dialer.peerStreamServer.StreamStatus(p.Peering.ID)
require.True(r, found)
require.True(r, status.Connected)
})
testutil.RunStep(t, "calling establish with active connection does not overwrite server addresses", func(t *testing.T) {
ctx, cancel = context.WithTimeout(context.Background(), 3*time.Second)
t.Cleanup(cancel)
// generate a new token from the acceptor
req := pbpeering.GenerateTokenRequest{
PeerName: "my-peer-dialer",
}
resp, err := acceptorClient.GenerateToken(ctx, &req)
require.NoError(t, err)
token, err := acceptor.peeringBackend.DecodeToken([]byte(resp.PeeringToken))
require.NoError(t, err)
// we will update the token with bad addresses to assert it doesn't clobber existing ones
token.ServerAddresses = []string{"1.2.3.4:1234"}
badToken, err := acceptor.peeringBackend.EncodeToken(token)
require.NoError(t, err)
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
t.Cleanup(cancel)
// Try establishing.
// This call will only succeed if the bad address was not used in the calls to exchange the peering secret.
establishReq := pbpeering.EstablishRequest{
PeerName: "my-peer-acceptor",
PeeringToken: string(badToken),
}
_, err = dialerClient.Establish(ctx, &establishReq)
require.NoError(t, err)
p, err := dialerClient.PeeringRead(ctx, &pbpeering.PeeringReadRequest{Name: "my-peer-acceptor"})
require.NoError(t, err)
require.NotContains(t, p.Peering.PeerServerAddresses, "1.2.3.4:1234")
})
testutil.RunStep(t, "updated server addresses are picked up by the leader", func(t *testing.T) {
// force close the acceptor's gRPC server so the dialier retries with a new address.
acceptor.externalGRPCServer.Stop()
clone := proto.Clone(p.Peering)
updated := clone.(*pbpeering.Peering)
// start with a bad address so we can assert for a specific error
updated.PeerServerAddresses = append([]string{
"bad",
}, p.Peering.PeerServerAddresses...)
// this write will wake up the watch on the leader to refetch server addresses
require.NoError(t, dialer.fsm.State().PeeringWrite(2000, &pbpeering.PeeringWriteRequest{Peering: updated}))
retry.Run(t, func(r *retry.R) {
status, found := dialer.peerStreamServer.StreamStatus(p.Peering.ID)
require.True(r, found)
// We assert for this error to be set which would indicate that we iterated
// through a bad address.
require.Contains(r, status.LastSendErrorMessage, "transport: Error while dialing dial tcp: address bad: missing port in address")
require.False(r, status.Connected)
})
})
}

View File

@ -370,9 +370,9 @@ type Server struct {
// peerStreamServer is a server used to handle peering streams from external clusters.
peerStreamServer *peerstream.Server
// peeringServer handles peering RPC requests internal to this cluster, like generating peering tokens.
peeringServer *peering.Server
peerStreamTracker *peerstream.Tracker
peeringServer *peering.Server
// embedded struct to hold all the enterprise specific data
EnterpriseServer
@ -724,11 +724,9 @@ func NewServer(config *Config, flat Deps, externalGRPCServer *grpc.Server) (*Ser
Logger: logger.Named("grpc-api.server-discovery"),
}).Register(s.externalGRPCServer)
s.peerStreamTracker = peerstream.NewTracker()
s.peeringBackend = NewPeeringBackend(s)
s.peerStreamServer = peerstream.NewServer(peerstream.Config{
Backend: s.peeringBackend,
Tracker: s.peerStreamTracker,
GetStore: func() peerstream.StateStore { return s.FSM().State() },
Logger: logger.Named("grpc-api.peerstream"),
ACLResolver: s.ACLResolver,
@ -742,7 +740,6 @@ func NewServer(config *Config, flat Deps, externalGRPCServer *grpc.Server) (*Ser
return s.ForwardGRPC(s.grpcConnPool, info, fn)
},
})
s.peerStreamTracker.SetHeartbeatTimeout(s.peerStreamServer.Config.IncomingHeartbeatTimeout)
s.peerStreamServer.Register(s.externalGRPCServer)
// Initialize internal gRPC server.
@ -791,7 +788,7 @@ func newGRPCHandlerFromConfig(deps Deps, config *Config, s *Server) connHandler
p := peering.NewServer(peering.Config{
Backend: s.peeringBackend,
Tracker: s.peerStreamTracker,
Tracker: s.peerStreamServer.Tracker,
Logger: deps.Logger.Named("grpc-api.peering"),
ForwardRPC: func(info structs.RPCInfo, fn func(*grpc.ClientConn) error) (bool, error) {
// Only forward the request if the dc in the request matches the server's datacenter.
@ -1575,12 +1572,12 @@ func (s *Server) Stats() map[string]map[string]string {
// GetLANCoordinate returns the coordinate of the node in the LAN gossip
// pool.
//
// - Clients return a single coordinate for the single gossip pool they are
// in (default, segment, or partition).
// - Clients return a single coordinate for the single gossip pool they are
// in (default, segment, or partition).
//
// - Servers return one coordinate for their canonical gossip pool (i.e.
// default partition/segment) and one per segment they are also ancillary
// members of.
// - Servers return one coordinate for their canonical gossip pool (i.e.
// default partition/segment) and one per segment they are also ancillary
// members of.
//
// NOTE: servers do not emit coordinates for partitioned gossip pools they
// are ancillary members of.

View File

@ -1134,7 +1134,7 @@ func terminatingGatewayVirtualIPsSupported(tx ReadTxn, ws memdb.WatchSet) (bool,
}
// Services returns all services along with a list of associated tags.
func (s *Store) Services(ws memdb.WatchSet, entMeta *acl.EnterpriseMeta, peerName string) (uint64, structs.Services, error) {
func (s *Store) Services(ws memdb.WatchSet, entMeta *acl.EnterpriseMeta, peerName string) (uint64, []*structs.ServiceNode, error) {
tx := s.db.Txn(false)
defer tx.Abort()
@ -1148,30 +1148,11 @@ func (s *Store) Services(ws memdb.WatchSet, entMeta *acl.EnterpriseMeta, peerNam
}
ws.Add(services.WatchCh())
// Rip through the services and enumerate them and their unique set of
// tags.
unique := make(map[string]map[string]struct{})
var result []*structs.ServiceNode
for service := services.Next(); service != nil; service = services.Next() {
svc := service.(*structs.ServiceNode)
tags, ok := unique[svc.ServiceName]
if !ok {
unique[svc.ServiceName] = make(map[string]struct{})
tags = unique[svc.ServiceName]
}
for _, tag := range svc.ServiceTags {
tags[tag] = struct{}{}
}
result = append(result, service.(*structs.ServiceNode))
}
// Generate the output structure.
var results = make(structs.Services)
for service, tags := range unique {
results[service] = make([]string, 0, len(tags))
for tag := range tags {
results[service] = append(results[service], tag)
}
}
return idx, results, nil
return idx, result, nil
}
func (s *Store) ServiceList(ws memdb.WatchSet, entMeta *acl.EnterpriseMeta, peerName string) (uint64, structs.ServiceList, error) {
@ -1212,7 +1193,7 @@ func serviceListTxn(tx ReadTxn, ws memdb.WatchSet, entMeta *acl.EnterpriseMeta,
}
// ServicesByNodeMeta returns all services, filtered by the given node metadata.
func (s *Store) ServicesByNodeMeta(ws memdb.WatchSet, filters map[string]string, entMeta *acl.EnterpriseMeta, peerName string) (uint64, structs.Services, error) {
func (s *Store) ServicesByNodeMeta(ws memdb.WatchSet, filters map[string]string, entMeta *acl.EnterpriseMeta, peerName string) (uint64, []*structs.ServiceNode, error) {
tx := s.db.Txn(false)
defer tx.Abort()
@ -1259,8 +1240,7 @@ func (s *Store) ServicesByNodeMeta(ws memdb.WatchSet, filters map[string]string,
}
allServicesCh := allServices.WatchCh()
// Populate the services map
unique := make(map[string]map[string]struct{})
var result structs.ServiceNodes
for node := nodes.Next(); node != nil; node = nodes.Next() {
n := node.(*structs.Node)
if len(filters) > 1 && !structs.SatisfiesMetaFilters(n.Meta, filters) {
@ -1274,30 +1254,11 @@ func (s *Store) ServicesByNodeMeta(ws memdb.WatchSet, filters map[string]string,
}
ws.AddWithLimit(watchLimit, services.WatchCh(), allServicesCh)
// Rip through the services and enumerate them and their unique set of
// tags.
for service := services.Next(); service != nil; service = services.Next() {
svc := service.(*structs.ServiceNode)
tags, ok := unique[svc.ServiceName]
if !ok {
unique[svc.ServiceName] = make(map[string]struct{})
tags = unique[svc.ServiceName]
}
for _, tag := range svc.ServiceTags {
tags[tag] = struct{}{}
}
result = append(result, service.(*structs.ServiceNode))
}
}
// Generate the output structure.
var results = make(structs.Services)
for service, tags := range unique {
results[service] = make([]string, 0, len(tags))
for tag := range tags {
results[service] = append(results[service], tag)
}
}
return idx, results, nil
return idx, result, nil
}
// maxIndexForService return the maximum Raft Index for a service

View File

@ -12,6 +12,8 @@ import (
"github.com/hashicorp/consul/acl"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/hashicorp/go-memdb"
"github.com/hashicorp/go-uuid"
"github.com/stretchr/testify/assert"
@ -2105,10 +2107,13 @@ func TestStateStore_Services(t *testing.T) {
Address: "1.1.1.1",
Port: 1111,
}
ns1.EnterpriseMeta.Normalize()
if err := s.EnsureService(2, "node1", ns1); err != nil {
t.Fatalf("err: %s", err)
}
testRegisterService(t, s, 3, "node1", "dogs")
ns1Dogs := testRegisterService(t, s, 3, "node1", "dogs")
ns1Dogs.EnterpriseMeta.Normalize()
testRegisterNode(t, s, 4, "node2")
ns2 := &structs.NodeService{
ID: "service3",
@ -2117,6 +2122,7 @@ func TestStateStore_Services(t *testing.T) {
Address: "1.1.1.1",
Port: 1111,
}
ns2.EnterpriseMeta.Normalize()
if err := s.EnsureService(5, "node2", ns2); err != nil {
t.Fatalf("err: %s", err)
}
@ -2134,19 +2140,13 @@ func TestStateStore_Services(t *testing.T) {
t.Fatalf("bad index: %d", idx)
}
// Verify the result. We sort the lists since the order is
// non-deterministic (it's built using a map internally).
expected := structs.Services{
"redis": []string{"prod", "primary", "replica"},
"dogs": []string{},
}
sort.Strings(expected["redis"])
for _, tags := range services {
sort.Strings(tags)
}
if !reflect.DeepEqual(expected, services) {
t.Fatalf("bad: %#v", services)
// Verify the result.
expected := []*structs.ServiceNode{
ns1Dogs.ToServiceNode("node1"),
ns1.ToServiceNode("node1"),
ns2.ToServiceNode("node2"),
}
assertDeepEqual(t, expected, services, cmpopts.IgnoreFields(structs.ServiceNode{}, "RaftIndex"))
// Deleting a node with a service should fire the watch.
if err := s.DeleteNode(6, "node1", nil, ""); err != nil {
@ -2185,6 +2185,7 @@ func TestStateStore_ServicesByNodeMeta(t *testing.T) {
Address: "1.1.1.1",
Port: 1111,
}
ns1.EnterpriseMeta.Normalize()
if err := s.EnsureService(2, "node0", ns1); err != nil {
t.Fatalf("err: %s", err)
}
@ -2195,6 +2196,7 @@ func TestStateStore_ServicesByNodeMeta(t *testing.T) {
Address: "1.1.1.1",
Port: 1111,
}
ns2.EnterpriseMeta.Normalize()
if err := s.EnsureService(3, "node1", ns2); err != nil {
t.Fatalf("err: %s", err)
}
@ -2209,11 +2211,10 @@ func TestStateStore_ServicesByNodeMeta(t *testing.T) {
if err != nil {
t.Fatalf("err: %s", err)
}
expected := structs.Services{
"redis": []string{"primary", "prod"},
expected := []*structs.ServiceNode{
ns1.ToServiceNode("node0"),
}
sort.Strings(res["redis"])
require.Equal(t, expected, res)
assertDeepEqual(t, res, expected, cmpopts.IgnoreFields(structs.ServiceNode{}, "RaftIndex"))
})
t.Run("Get all services using the common meta value", func(t *testing.T) {
@ -2221,11 +2222,12 @@ func TestStateStore_ServicesByNodeMeta(t *testing.T) {
if err != nil {
t.Fatalf("err: %s", err)
}
expected := structs.Services{
"redis": []string{"primary", "prod", "replica"},
require.Len(t, res, 2)
expected := []*structs.ServiceNode{
ns1.ToServiceNode("node0"),
ns2.ToServiceNode("node1"),
}
sort.Strings(res["redis"])
require.Equal(t, expected, res)
assertDeepEqual(t, res, expected, cmpopts.IgnoreFields(structs.ServiceNode{}, "RaftIndex"))
})
t.Run("Get an empty list for an invalid meta value", func(t *testing.T) {
@ -2233,8 +2235,8 @@ func TestStateStore_ServicesByNodeMeta(t *testing.T) {
if err != nil {
t.Fatalf("err: %s", err)
}
expected := structs.Services{}
require.Equal(t, expected, res)
var expected []*structs.ServiceNode
assertDeepEqual(t, res, expected, cmpopts.IgnoreFields(structs.ServiceNode{}, "RaftIndex"))
})
t.Run("Get the first node's service instance using multiple meta filters", func(t *testing.T) {
@ -2242,11 +2244,10 @@ func TestStateStore_ServicesByNodeMeta(t *testing.T) {
if err != nil {
t.Fatalf("err: %s", err)
}
expected := structs.Services{
"redis": []string{"primary", "prod"},
expected := []*structs.ServiceNode{
ns1.ToServiceNode("node0"),
}
sort.Strings(res["redis"])
require.Equal(t, expected, res)
assertDeepEqual(t, res, expected, cmpopts.IgnoreFields(structs.ServiceNode{}, "RaftIndex"))
})
t.Run("Registering some unrelated node + service should not fire the watch.", func(t *testing.T) {
@ -8810,3 +8811,10 @@ func setVirtualIPFlags(t *testing.T, s *Store) {
Value: "true",
}))
}
func assertDeepEqual(t *testing.T, x, y interface{}, opts ...cmp.Option) {
t.Helper()
if diff := cmp.Diff(x, y, opts...); diff != "" {
t.Fatalf("assertion failed: values are not equal\n--- expected\n+++ actual\n%v", diff)
}
}

View File

@ -7,12 +7,13 @@ import (
"strings"
"github.com/golang/protobuf/proto"
"github.com/hashicorp/go-memdb"
"github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/configentry"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/lib/maps"
"github.com/hashicorp/consul/proto/pbpeering"
"github.com/hashicorp/go-memdb"
)
const (
@ -534,6 +535,12 @@ func (s *Store) PeeringWrite(idx uint64, req *pbpeering.PeeringWriteRequest) err
if req.Peering.Name == "" {
return errors.New("Missing Peering Name")
}
if req.Peering.State == pbpeering.PeeringState_DELETING && (req.Peering.DeletedAt == nil || structs.IsZeroProtoTime(req.Peering.DeletedAt)) {
return errors.New("Missing deletion time for peering in deleting state")
}
if req.Peering.DeletedAt != nil && !structs.IsZeroProtoTime(req.Peering.DeletedAt) && req.Peering.State != pbpeering.PeeringState_DELETING {
return fmt.Errorf("Unexpected state for peering with deletion time: %s", pbpeering.PeeringStateToAPI(req.Peering.State))
}
// Ensure the name is unique (cannot conflict with another peering with a different ID).
_, existing, err := peeringReadTxn(tx, nil, Query{
@ -545,11 +552,32 @@ func (s *Store) PeeringWrite(idx uint64, req *pbpeering.PeeringWriteRequest) err
}
if existing != nil {
if req.Peering.ShouldDial() != existing.ShouldDial() {
return fmt.Errorf("Cannot switch peering dialing mode from %t to %t", existing.ShouldDial(), req.Peering.ShouldDial())
}
if req.Peering.ID != existing.ID {
return fmt.Errorf("A peering already exists with the name %q and a different ID %q", req.Peering.Name, existing.ID)
}
// Nothing to do if our peer wants to terminate the peering but the peering is already marked for deletion.
if existing.State == pbpeering.PeeringState_DELETING && req.Peering.State == pbpeering.PeeringState_TERMINATED {
return nil
}
// No-op deletion
if existing.State == pbpeering.PeeringState_DELETING && req.Peering.State == pbpeering.PeeringState_DELETING {
return nil
}
// No-op termination
if existing.State == pbpeering.PeeringState_TERMINATED && req.Peering.State == pbpeering.PeeringState_TERMINATED {
return nil
}
// Prevent modifications to Peering marked for deletion.
if !existing.IsActive() {
// This blocks generating new peering tokens or re-establishing the peering until the peering is done deleting.
if existing.State == pbpeering.PeeringState_DELETING {
return fmt.Errorf("cannot write to peering that is marked for deletion")
}
@ -581,8 +609,8 @@ func (s *Store) PeeringWrite(idx uint64, req *pbpeering.PeeringWriteRequest) err
req.Peering.ModifyIndex = idx
}
// Ensure associated secrets are cleaned up when a peering is marked for deletion.
if req.Peering.State == pbpeering.PeeringState_DELETING {
// Ensure associated secrets are cleaned up when a peering is marked for deletion or terminated.
if !req.Peering.IsActive() {
if err := peeringSecretsDeleteTxn(tx, req.Peering.ID, req.Peering.ShouldDial()); err != nil {
return fmt.Errorf("failed to delete peering secrets: %w", err)
}
@ -981,7 +1009,7 @@ func peeringsForServiceTxn(tx ReadTxn, ws memdb.WatchSet, serviceName string, en
if idx > maxIdx {
maxIdx = idx
}
if peering == nil || !peering.IsActive() {
if !peering.IsActive() {
continue
}
peerings = append(peerings, peering)

View File

@ -950,6 +950,7 @@ func TestStore_Peering_Watch(t *testing.T) {
Peering: &pbpeering.Peering{
ID: testFooPeerID,
Name: "foo",
State: pbpeering.PeeringState_DELETING,
DeletedAt: structs.TimeToProto(time.Now()),
},
})
@ -976,6 +977,7 @@ func TestStore_Peering_Watch(t *testing.T) {
err := s.PeeringWrite(lastIdx, &pbpeering.PeeringWriteRequest{Peering: &pbpeering.Peering{
ID: testBarPeerID,
Name: "bar",
State: pbpeering.PeeringState_DELETING,
DeletedAt: structs.TimeToProto(time.Now()),
},
})
@ -1077,6 +1079,7 @@ func TestStore_PeeringList_Watch(t *testing.T) {
Peering: &pbpeering.Peering{
ID: testFooPeerID,
Name: "foo",
State: pbpeering.PeeringState_DELETING,
DeletedAt: structs.TimeToProto(time.Now()),
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
},
@ -1112,16 +1115,22 @@ func TestStore_PeeringWrite(t *testing.T) {
// Each case depends on the previous.
s := NewStateStore(nil)
testTime := time.Now()
type expectations struct {
peering *pbpeering.Peering
secrets *pbpeering.PeeringSecrets
err string
}
type testcase struct {
name string
input *pbpeering.PeeringWriteRequest
expectSecrets *pbpeering.PeeringSecrets
expectErr string
name string
input *pbpeering.PeeringWriteRequest
expect expectations
}
run := func(t *testing.T, tc testcase) {
err := s.PeeringWrite(10, tc.input)
if tc.expectErr != "" {
testutil.RequireErrorContains(t, err, tc.expectErr)
if tc.expect.err != "" {
testutil.RequireErrorContains(t, err, tc.expect.err)
return
}
require.NoError(t, err)
@ -1133,52 +1142,176 @@ func TestStore_PeeringWrite(t *testing.T) {
_, p, err := s.PeeringRead(nil, q)
require.NoError(t, err)
require.NotNil(t, p)
require.Equal(t, tc.input.Peering.State, p.State)
require.Equal(t, tc.input.Peering.Name, p.Name)
require.Equal(t, tc.expect.peering.State, p.State)
require.Equal(t, tc.expect.peering.Name, p.Name)
require.Equal(t, tc.expect.peering.Meta, p.Meta)
if tc.expect.peering.DeletedAt != nil {
require.Equal(t, tc.expect.peering.DeletedAt, p.DeletedAt)
}
secrets, err := s.PeeringSecretsRead(nil, tc.input.Peering.ID)
require.NoError(t, err)
prototest.AssertDeepEqual(t, tc.expectSecrets, secrets)
prototest.AssertDeepEqual(t, tc.expect.secrets, secrets)
}
tcs := []testcase{
{
name: "create baz",
input: &pbpeering.PeeringWriteRequest{
Peering: &pbpeering.Peering{
ID: testBazPeerID,
Name: "baz",
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
ID: testBazPeerID,
Name: "baz",
State: pbpeering.PeeringState_ESTABLISHING,
PeerServerAddresses: []string{"localhost:8502"},
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
},
SecretsRequest: &pbpeering.SecretsWriteRequest{
PeerID: testBazPeerID,
Request: &pbpeering.SecretsWriteRequest_GenerateToken{
GenerateToken: &pbpeering.SecretsWriteRequest_GenerateTokenRequest{
EstablishmentSecret: testBazSecretID,
Request: &pbpeering.SecretsWriteRequest_Establish{
Establish: &pbpeering.SecretsWriteRequest_EstablishRequest{
ActiveStreamSecret: testBazSecretID,
},
},
},
},
expectSecrets: &pbpeering.PeeringSecrets{
PeerID: testBazPeerID,
Establishment: &pbpeering.PeeringSecrets_Establishment{
SecretID: testBazSecretID,
expect: expectations{
peering: &pbpeering.Peering{
ID: testBazPeerID,
Name: "baz",
State: pbpeering.PeeringState_ESTABLISHING,
},
secrets: &pbpeering.PeeringSecrets{
PeerID: testBazPeerID,
Stream: &pbpeering.PeeringSecrets_Stream{
ActiveSecretID: testBazSecretID,
},
},
},
},
{
name: "cannot change ID for baz",
input: &pbpeering.PeeringWriteRequest{
Peering: &pbpeering.Peering{
ID: "123",
Name: "baz",
State: pbpeering.PeeringState_FAILING,
PeerServerAddresses: []string{"localhost:8502"},
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
},
},
expect: expectations{
err: `A peering already exists with the name "baz" and a different ID`,
},
},
{
name: "cannot change dialer status for baz",
input: &pbpeering.PeeringWriteRequest{
Peering: &pbpeering.Peering{
ID: "123",
Name: "baz",
State: pbpeering.PeeringState_FAILING,
// Excluding the peer server addresses leads to baz not being considered a dialer.
// PeerServerAddresses: []string{"localhost:8502"},
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
},
},
expect: expectations{
err: "Cannot switch peering dialing mode from true to false",
},
},
{
name: "update baz",
input: &pbpeering.PeeringWriteRequest{
Peering: &pbpeering.Peering{
ID: testBazPeerID,
Name: "baz",
State: pbpeering.PeeringState_FAILING,
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
ID: testBazPeerID,
Name: "baz",
State: pbpeering.PeeringState_FAILING,
PeerServerAddresses: []string{"localhost:8502"},
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
},
},
expectSecrets: &pbpeering.PeeringSecrets{
PeerID: testBazPeerID,
Establishment: &pbpeering.PeeringSecrets_Establishment{
SecretID: testBazSecretID,
expect: expectations{
peering: &pbpeering.Peering{
ID: testBazPeerID,
Name: "baz",
State: pbpeering.PeeringState_FAILING,
},
secrets: &pbpeering.PeeringSecrets{
PeerID: testBazPeerID,
Stream: &pbpeering.PeeringSecrets_Stream{
ActiveSecretID: testBazSecretID,
},
},
},
},
{
name: "if no state was included in request it is inherited from existing",
input: &pbpeering.PeeringWriteRequest{
Peering: &pbpeering.Peering{
ID: testBazPeerID,
Name: "baz",
// Send undefined state.
// State: pbpeering.PeeringState_FAILING,
PeerServerAddresses: []string{"localhost:8502"},
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
},
},
expect: expectations{
peering: &pbpeering.Peering{
ID: testBazPeerID,
Name: "baz",
// Previous failing state is picked up.
State: pbpeering.PeeringState_FAILING,
},
secrets: &pbpeering.PeeringSecrets{
PeerID: testBazPeerID,
Stream: &pbpeering.PeeringSecrets_Stream{
ActiveSecretID: testBazSecretID,
},
},
},
},
{
name: "mark baz as terminated",
input: &pbpeering.PeeringWriteRequest{
Peering: &pbpeering.Peering{
ID: testBazPeerID,
Name: "baz",
State: pbpeering.PeeringState_TERMINATED,
PeerServerAddresses: []string{"localhost:8502"},
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
},
},
expect: expectations{
peering: &pbpeering.Peering{
ID: testBazPeerID,
Name: "baz",
State: pbpeering.PeeringState_TERMINATED,
},
// Secrets for baz should have been deleted
secrets: nil,
},
},
{
name: "cannot modify peering during no-op termination",
input: &pbpeering.PeeringWriteRequest{
Peering: &pbpeering.Peering{
ID: testBazPeerID,
Name: "baz",
State: pbpeering.PeeringState_TERMINATED,
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
PeerServerAddresses: []string{"localhost:8502"},
// Attempt to add metadata
Meta: map[string]string{"foo": "bar"},
},
},
expect: expectations{
peering: &pbpeering.Peering{
ID: testBazPeerID,
Name: "baz",
State: pbpeering.PeeringState_TERMINATED,
// Meta should be unchanged.
Meta: nil,
},
},
},
@ -1186,42 +1319,104 @@ func TestStore_PeeringWrite(t *testing.T) {
name: "mark baz for deletion",
input: &pbpeering.PeeringWriteRequest{
Peering: &pbpeering.Peering{
ID: testBazPeerID,
Name: "baz",
State: pbpeering.PeeringState_DELETING,
PeerServerAddresses: []string{"localhost:8502"},
DeletedAt: structs.TimeToProto(testTime),
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
},
},
expect: expectations{
peering: &pbpeering.Peering{
ID: testBazPeerID,
Name: "baz",
State: pbpeering.PeeringState_DELETING,
DeletedAt: structs.TimeToProto(time.Now()),
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
DeletedAt: structs.TimeToProto(testTime),
},
secrets: nil,
},
},
{
name: "deleting a deleted peering is a no-op",
input: &pbpeering.PeeringWriteRequest{
Peering: &pbpeering.Peering{
ID: testBazPeerID,
Name: "baz",
State: pbpeering.PeeringState_DELETING,
PeerServerAddresses: []string{"localhost:8502"},
DeletedAt: structs.TimeToProto(time.Now()),
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
},
},
// Secrets for baz should have been deleted
expectSecrets: nil,
expect: expectations{
peering: &pbpeering.Peering{
ID: testBazPeerID,
Name: "baz",
// Still marked as deleting at the original testTime
State: pbpeering.PeeringState_DELETING,
DeletedAt: structs.TimeToProto(testTime),
},
// Secrets for baz should have been deleted
secrets: nil,
},
},
{
name: "terminating a peering marked for deletion is a no-op",
input: &pbpeering.PeeringWriteRequest{
Peering: &pbpeering.Peering{
ID: testBazPeerID,
Name: "baz",
State: pbpeering.PeeringState_TERMINATED,
PeerServerAddresses: []string{"localhost:8502"},
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
},
},
expect: expectations{
peering: &pbpeering.Peering{
ID: testBazPeerID,
Name: "baz",
// Still marked as deleting
State: pbpeering.PeeringState_DELETING,
},
// Secrets for baz should have been deleted
secrets: nil,
},
},
{
name: "cannot update peering marked for deletion",
input: &pbpeering.PeeringWriteRequest{
Peering: &pbpeering.Peering{
ID: testBazPeerID,
Name: "baz",
ID: testBazPeerID,
Name: "baz",
PeerServerAddresses: []string{"localhost:8502"},
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
// Attempt to add metadata
Meta: map[string]string{
"source": "kubernetes",
},
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
},
},
expectErr: "cannot write to peering that is marked for deletion",
expect: expectations{
err: "cannot write to peering that is marked for deletion",
},
},
{
name: "cannot create peering marked for deletion",
input: &pbpeering.PeeringWriteRequest{
Peering: &pbpeering.Peering{
ID: testFooPeerID,
Name: "foo",
DeletedAt: structs.TimeToProto(time.Now()),
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
ID: testFooPeerID,
Name: "foo",
PeerServerAddresses: []string{"localhost:8502"},
State: pbpeering.PeeringState_DELETING,
DeletedAt: structs.TimeToProto(time.Now()),
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
},
},
expectErr: "cannot create a new peering marked for deletion",
expect: expectations{
err: "cannot create a new peering marked for deletion",
},
},
}
for _, tc := range tcs {
@ -1246,6 +1441,7 @@ func TestStore_PeeringDelete(t *testing.T) {
Peering: &pbpeering.Peering{
ID: testFooPeerID,
Name: "foo",
State: pbpeering.PeeringState_DELETING,
DeletedAt: structs.TimeToProto(time.Now()),
},
}))
@ -1759,6 +1955,7 @@ func TestStateStore_PeeringsForService(t *testing.T) {
copied := pbpeering.Peering{
ID: tp.peering.ID,
Name: tp.peering.Name,
State: pbpeering.PeeringState_DELETING,
DeletedAt: structs.TimeToProto(time.Now()),
}
require.NoError(t, s.PeeringWrite(lastIdx, &pbpeering.PeeringWriteRequest{Peering: &copied}))
@ -2201,6 +2398,7 @@ func TestStore_TrustBundleListByService(t *testing.T) {
Peering: &pbpeering.Peering{
ID: peerID1,
Name: "peer1",
State: pbpeering.PeeringState_DELETING,
DeletedAt: structs.TimeToProto(time.Now()),
},
}))

View File

@ -146,13 +146,13 @@ func testRegisterServiceOpts(t *testing.T, s *Store, idx uint64, nodeID, service
// testRegisterServiceWithChange registers a service and allow ensuring the consul index is updated
// even if service already exists if using `modifyAccordingIndex`.
// This is done by setting the transaction ID in "version" meta so service will be updated if it already exists
func testRegisterServiceWithChange(t *testing.T, s *Store, idx uint64, nodeID, serviceID string, modifyAccordingIndex bool) {
testRegisterServiceWithChangeOpts(t, s, idx, nodeID, serviceID, modifyAccordingIndex)
func testRegisterServiceWithChange(t *testing.T, s *Store, idx uint64, nodeID, serviceID string, modifyAccordingIndex bool) *structs.NodeService {
return testRegisterServiceWithChangeOpts(t, s, idx, nodeID, serviceID, modifyAccordingIndex)
}
// testRegisterServiceWithChangeOpts is the same as testRegisterServiceWithChange with the addition of opts that can
// modify the service prior to writing.
func testRegisterServiceWithChangeOpts(t *testing.T, s *Store, idx uint64, nodeID, serviceID string, modifyAccordingIndex bool, opts ...func(service *structs.NodeService)) {
func testRegisterServiceWithChangeOpts(t *testing.T, s *Store, idx uint64, nodeID, serviceID string, modifyAccordingIndex bool, opts ...func(service *structs.NodeService)) *structs.NodeService {
meta := make(map[string]string)
if modifyAccordingIndex {
meta["version"] = fmt.Sprint(idx)
@ -183,14 +183,15 @@ func testRegisterServiceWithChangeOpts(t *testing.T, s *Store, idx uint64, nodeI
result.ServiceID != serviceID {
t.Fatalf("bad service: %#v", result)
}
return svc
}
// testRegisterService register a service with given transaction idx
// If the service already exists, transaction number might not be increased
// Use `testRegisterServiceWithChange()` if you want perform a registration that
// ensures the transaction is updated by setting idx in Meta of Service
func testRegisterService(t *testing.T, s *Store, idx uint64, nodeID, serviceID string) {
testRegisterServiceWithChange(t, s, idx, nodeID, serviceID, false)
func testRegisterService(t *testing.T, s *Store, idx uint64, nodeID, serviceID string) *structs.NodeService {
return testRegisterServiceWithChange(t, s, idx, nodeID, serviceID, false)
}
func testRegisterConnectService(t *testing.T, s *Store, idx uint64, nodeID, serviceID string) {

View File

@ -41,8 +41,8 @@ var Gauges = []prometheus.GaugeDefinition{
Help: "Measures the current number of server agents registered with Consul. It is only emitted by Consul servers. Added in v1.9.6.",
},
{
Name: []string{"consul", "kv", "entries"},
Help: "Measures the current number of server agents registered with Consul. It is only emitted by Consul servers. Added in v1.10.3.",
Name: []string{"consul", "state", "kv_entries"},
Help: "Measures the current number of entries in the Consul KV store. It is only emitted by Consul servers. Added in v1.10.3.",
},
{
Name: []string{"consul", "state", "connect_instances"},

View File

@ -26,11 +26,12 @@ const (
type Server struct {
Config
Tracker *Tracker
}
type Config struct {
Backend Backend
Tracker *Tracker
GetStore func() StateStore
Logger hclog.Logger
ForwardRPC func(structs.RPCInfo, func(*grpc.ClientConn) error) (bool, error)
@ -42,8 +43,8 @@ type Config struct {
// outgoingHeartbeatInterval is how often we send a heartbeat.
outgoingHeartbeatInterval time.Duration
// IncomingHeartbeatTimeout is how long we'll wait between receiving heartbeats before we close the connection.
IncomingHeartbeatTimeout time.Duration
// incomingHeartbeatTimeout is how long we'll wait between receiving heartbeats before we close the connection.
incomingHeartbeatTimeout time.Duration
}
//go:generate mockery --name ACLResolver --inpackage
@ -53,7 +54,6 @@ type ACLResolver interface {
func NewServer(cfg Config) *Server {
requireNotNil(cfg.Backend, "Backend")
requireNotNil(cfg.Tracker, "Tracker")
requireNotNil(cfg.GetStore, "GetStore")
requireNotNil(cfg.Logger, "Logger")
// requireNotNil(cfg.ACLResolver, "ACLResolver") // TODO(peering): reenable check when ACLs are required
@ -63,11 +63,12 @@ func NewServer(cfg Config) *Server {
if cfg.outgoingHeartbeatInterval == 0 {
cfg.outgoingHeartbeatInterval = defaultOutgoingHeartbeatInterval
}
if cfg.IncomingHeartbeatTimeout == 0 {
cfg.IncomingHeartbeatTimeout = defaultIncomingHeartbeatTimeout
if cfg.incomingHeartbeatTimeout == 0 {
cfg.incomingHeartbeatTimeout = defaultIncomingHeartbeatTimeout
}
return &Server{
Config: cfg,
Config: cfg,
Tracker: NewTracker(cfg.incomingHeartbeatTimeout),
}
}

View File

@ -406,7 +406,7 @@ func (s *Server) realHandleStream(streamReq HandleStreamRequest) error {
// incomingHeartbeatCtx will complete if incoming heartbeats time out.
incomingHeartbeatCtx, incomingHeartbeatCtxCancel :=
context.WithTimeout(context.Background(), s.IncomingHeartbeatTimeout)
context.WithTimeout(context.Background(), s.incomingHeartbeatTimeout)
// NOTE: It's important that we wrap the call to cancel in a wrapper func because during the loop we're
// re-assigning the value of incomingHeartbeatCtxCancel and we want the defer to run on the last assigned
// value, not the current value.
@ -575,6 +575,7 @@ func (s *Server) realHandleStream(streamReq HandleStreamRequest) error {
status.TrackRecvResourceSuccess()
}
// We are replying ACK or NACK depending on whether we successfully processed the response.
if err := streamSend(reply); err != nil {
return fmt.Errorf("failed to send to stream: %v", err)
}
@ -605,7 +606,7 @@ func (s *Server) realHandleStream(streamReq HandleStreamRequest) error {
// They just can't trace the execution properly for some reason (possibly golang/go#29587).
//nolint:govet
incomingHeartbeatCtx, incomingHeartbeatCtxCancel =
context.WithTimeout(context.Background(), s.IncomingHeartbeatTimeout)
context.WithTimeout(context.Background(), s.incomingHeartbeatTimeout)
}
case update := <-subCh:
@ -642,7 +643,6 @@ func (s *Server) realHandleStream(streamReq HandleStreamRequest) error {
if err := streamSend(replResp); err != nil {
return fmt.Errorf("failed to push data for %q: %w", update.CorrelationID, err)
}
status.TrackSendSuccess()
}
}
}

View File

@ -499,9 +499,8 @@ func TestStreamResources_Server_Terminate(t *testing.T) {
base: time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC),
}
srv, store := newTestServer(t, func(c *Config) {
c.Tracker.SetClock(it.Now)
})
srv, store := newTestServer(t, nil)
srv.Tracker.setClock(it.Now)
p := writePeeringToBeDialed(t, store, 1, "my-peer")
require.Empty(t, p.PeerID, "should be empty if being dialed")
@ -552,9 +551,8 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) {
base: time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC),
}
srv, store := newTestServer(t, func(c *Config) {
c.Tracker.SetClock(it.Now)
})
srv, store := newTestServer(t, nil)
srv.Tracker.setClock(it.Now)
// Set the initial roots and CA configuration.
_, rootA := writeInitialRootsAndCA(t, store)
@ -572,7 +570,7 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) {
})
})
var lastSendAck, lastSendSuccess time.Time
var lastSendAck time.Time
testutil.RunStep(t, "ack tracked as success", func(t *testing.T) {
ack := &pbpeerstream.ReplicationMessage{
@ -587,16 +585,13 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) {
},
}
lastSendAck = time.Date(2000, time.January, 1, 0, 0, 2, 0, time.UTC)
lastSendSuccess = time.Date(2000, time.January, 1, 0, 0, 3, 0, time.UTC)
lastSendAck = it.FutureNow(1)
err := client.Send(ack)
require.NoError(t, err)
expect := Status{
Connected: true,
LastAck: lastSendAck,
heartbeatTimeout: defaultIncomingHeartbeatTimeout,
LastSendSuccess: lastSendSuccess,
Connected: true,
LastAck: lastSendAck,
}
retry.Run(t, func(r *retry.R) {
@ -624,20 +619,17 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) {
},
}
lastSendAck = time.Date(2000, time.January, 1, 0, 0, 4, 0, time.UTC)
lastNack = time.Date(2000, time.January, 1, 0, 0, 5, 0, time.UTC)
lastNack = it.FutureNow(1)
err := client.Send(nack)
require.NoError(t, err)
lastNackMsg = "client peer was unable to apply resource: bad bad not good"
expect := Status{
Connected: true,
LastAck: lastSendAck,
LastNack: lastNack,
LastNackMessage: lastNackMsg,
heartbeatTimeout: defaultIncomingHeartbeatTimeout,
LastSendSuccess: lastSendSuccess,
Connected: true,
LastAck: lastSendAck,
LastNack: lastNack,
LastNackMessage: lastNackMsg,
}
retry.Run(t, func(r *retry.R) {
@ -707,8 +699,6 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) {
ImportedServices: map[string]struct{}{
api.String(): {},
},
heartbeatTimeout: defaultIncomingHeartbeatTimeout,
LastSendSuccess: lastSendSuccess,
}
retry.Run(t, func(r *retry.R) {
@ -770,8 +760,6 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) {
ImportedServices: map[string]struct{}{
api.String(): {},
},
heartbeatTimeout: defaultIncomingHeartbeatTimeout,
LastSendSuccess: lastSendSuccess,
}
retry.Run(t, func(r *retry.R) {
@ -805,8 +793,6 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) {
ImportedServices: map[string]struct{}{
api.String(): {},
},
heartbeatTimeout: defaultIncomingHeartbeatTimeout,
LastSendSuccess: lastSendSuccess,
}
retry.Run(t, func(r *retry.R) {
@ -839,8 +825,6 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) {
ImportedServices: map[string]struct{}{
api.String(): {},
},
heartbeatTimeout: defaultIncomingHeartbeatTimeout,
LastSendSuccess: lastSendSuccess,
}
retry.Run(t, func(r *retry.R) {
@ -1142,9 +1126,9 @@ func TestStreamResources_Server_DisconnectsOnHeartbeatTimeout(t *testing.T) {
}
srv, store := newTestServer(t, func(c *Config) {
c.Tracker.SetClock(it.Now)
c.IncomingHeartbeatTimeout = 5 * time.Millisecond
c.incomingHeartbeatTimeout = 5 * time.Millisecond
})
srv.Tracker.setClock(it.Now)
p := writePeeringToBeDialed(t, store, 1, "my-peer")
require.Empty(t, p.PeerID, "should be empty if being dialed")
@ -1190,9 +1174,9 @@ func TestStreamResources_Server_SendsHeartbeats(t *testing.T) {
outgoingHeartbeatInterval := 5 * time.Millisecond
srv, store := newTestServer(t, func(c *Config) {
c.Tracker.SetClock(it.Now)
c.outgoingHeartbeatInterval = outgoingHeartbeatInterval
})
srv.Tracker.setClock(it.Now)
p := writePeeringToBeDialed(t, store, 1, "my-peer")
require.Empty(t, p.PeerID, "should be empty if being dialed")
@ -1249,9 +1233,9 @@ func TestStreamResources_Server_KeepsConnectionOpenWithHeartbeat(t *testing.T) {
incomingHeartbeatTimeout := 10 * time.Millisecond
srv, store := newTestServer(t, func(c *Config) {
c.Tracker.SetClock(it.Now)
c.IncomingHeartbeatTimeout = incomingHeartbeatTimeout
c.incomingHeartbeatTimeout = incomingHeartbeatTimeout
})
srv.Tracker.setClock(it.Now)
p := writePeeringToBeDialed(t, store, 1, "my-peer")
require.Empty(t, p.PeerID, "should be empty if being dialed")
@ -2760,7 +2744,6 @@ func newTestServer(t *testing.T, configFn func(c *Config)) (*testServer, *state.
store: store,
pub: publisher,
},
Tracker: NewTracker(),
GetStore: func() StateStore { return store },
Logger: testutil.Logger(t),
Datacenter: "dc1",

View File

@ -14,20 +14,27 @@ type Tracker struct {
mu sync.RWMutex
streams map[string]*MutableStatus
// heartbeatTimeout is the max duration a connection is allowed to be
// disconnected before the stream health is reported as non-healthy
heartbeatTimeout time.Duration
// timeNow is a shim for testing.
timeNow func() time.Time
heartbeatTimeout time.Duration
}
func NewTracker() *Tracker {
func NewTracker(heartbeatTimeout time.Duration) *Tracker {
if heartbeatTimeout == 0 {
heartbeatTimeout = defaultIncomingHeartbeatTimeout
}
return &Tracker{
streams: make(map[string]*MutableStatus),
timeNow: time.Now,
streams: make(map[string]*MutableStatus),
timeNow: time.Now,
heartbeatTimeout: heartbeatTimeout,
}
}
func (t *Tracker) SetClock(clock func() time.Time) {
// setClock is used for debugging purposes only.
func (t *Tracker) setClock(clock func() time.Time) {
if clock == nil {
t.timeNow = time.Now
} else {
@ -35,12 +42,6 @@ func (t *Tracker) SetClock(clock func() time.Time) {
}
}
func (t *Tracker) SetHeartbeatTimeout(heartbeatTimeout time.Duration) {
t.mu.Lock()
defer t.mu.Unlock()
t.heartbeatTimeout = heartbeatTimeout
}
// Register a stream for a given peer but do not mark it as connected.
func (t *Tracker) Register(id string) (*MutableStatus, error) {
t.mu.Lock()
@ -52,7 +53,7 @@ func (t *Tracker) Register(id string) (*MutableStatus, error) {
func (t *Tracker) registerLocked(id string, initAsConnected bool) (*MutableStatus, bool, error) {
status, ok := t.streams[id]
if !ok {
status = newMutableStatus(t.timeNow, t.heartbeatTimeout, initAsConnected)
status = newMutableStatus(t.timeNow, initAsConnected)
t.streams[id] = status
return status, true, nil
}
@ -136,6 +137,39 @@ func (t *Tracker) DeleteStatus(id string) {
delete(t.streams, id)
}
// IsHealthy is a calculates the health of a peering status.
// We define a peering as unhealthy if its status has been in the following
// states for longer than the configured incomingHeartbeatTimeout.
// - If it is disconnected
// - If the last received Nack is newer than last received Ack
// - If the last received error is newer than last received success
//
// If none of these conditions apply, we call the peering healthy.
func (t *Tracker) IsHealthy(s Status) bool {
// If stream is in a disconnected state for longer than the configured
// heartbeat timeout, report as unhealthy.
if !s.DisconnectTime.IsZero() &&
t.timeNow().Sub(s.DisconnectTime) > t.heartbeatTimeout {
return false
}
// If last Nack is after last Ack, it means the peer is unable to
// handle our replication message.
if s.LastNack.After(s.LastAck) &&
t.timeNow().Sub(s.LastAck) > t.heartbeatTimeout {
return false
}
// If last recv error is newer than last recv success, we were unable
// to handle the peer's replication message.
if s.LastRecvError.After(s.LastRecvResourceSuccess) &&
t.timeNow().Sub(s.LastRecvError) > t.heartbeatTimeout {
return false
}
return true
}
type MutableStatus struct {
mu sync.RWMutex
@ -152,8 +186,6 @@ type MutableStatus struct {
// Status contains information about the replication stream to a peer cluster.
// TODO(peering): There's a lot of fields here...
type Status struct {
heartbeatTimeout time.Duration
// Connected is true when there is an open stream for the peer.
Connected bool
@ -182,9 +214,6 @@ type Status struct {
// LastSendErrorMessage tracks the last error message when sending into the stream.
LastSendErrorMessage string
// LastSendSuccess tracks the time of the last success response sent into the stream.
LastSendSuccess time.Time
// LastRecvHeartbeat tracks when we last received a heartbeat from our peer.
LastRecvHeartbeat time.Time
@ -214,40 +243,11 @@ func (s *Status) GetExportedServicesCount() uint64 {
return uint64(len(s.ExportedServices))
}
// IsHealthy is a convenience func that returns true/ false for a peering status.
// We define a peering as unhealthy if its status satisfies one of the following:
// - If heartbeat hasn't been received within the IncomingHeartbeatTimeout
// - If the last sent error is newer than last sent success
// - If the last received error is newer than last received success
// If none of these conditions apply, we call the peering healthy.
func (s *Status) IsHealthy() bool {
if time.Now().Sub(s.LastRecvHeartbeat) > s.heartbeatTimeout {
// 1. If heartbeat hasn't been received for a while - report unhealthy
return false
}
if s.LastSendError.After(s.LastSendSuccess) {
// 2. If last sent error is newer than last sent success - report unhealthy
return false
}
if s.LastRecvError.After(s.LastRecvResourceSuccess) {
// 3. If last recv error is newer than last recv success - report unhealthy
return false
}
return true
}
func newMutableStatus(now func() time.Time, heartbeatTimeout time.Duration, connected bool) *MutableStatus {
if heartbeatTimeout.Microseconds() == 0 {
heartbeatTimeout = defaultIncomingHeartbeatTimeout
}
func newMutableStatus(now func() time.Time, connected bool) *MutableStatus {
return &MutableStatus{
Status: Status{
Connected: connected,
heartbeatTimeout: heartbeatTimeout,
NeverConnected: !connected,
Connected: connected,
NeverConnected: !connected,
},
timeNow: now,
doneCh: make(chan struct{}),
@ -271,12 +271,6 @@ func (s *MutableStatus) TrackSendError(error string) {
s.mu.Unlock()
}
func (s *MutableStatus) TrackSendSuccess() {
s.mu.Lock()
s.LastSendSuccess = s.timeNow().UTC()
s.mu.Unlock()
}
// TrackRecvResourceSuccess tracks receiving a replicated resource.
func (s *MutableStatus) TrackRecvResourceSuccess() {
s.mu.Lock()

View File

@ -5,6 +5,7 @@ import (
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/sdk/testutil"
@ -14,95 +15,107 @@ const (
aPeerID = "63b60245-c475-426b-b314-4588d210859d"
)
func TestStatus_IsHealthy(t *testing.T) {
func TestTracker_IsHealthy(t *testing.T) {
type testcase struct {
name string
dontConnect bool
modifierFunc func(status *MutableStatus)
expectedVal bool
heartbeatTimeout time.Duration
name string
tracker *Tracker
modifierFunc func(status *MutableStatus)
expectedVal bool
}
tcs := []testcase{
{
name: "never connected, unhealthy",
expectedVal: false,
dontConnect: true,
},
{
name: "no heartbeat, unhealthy",
expectedVal: false,
},
{
name: "heartbeat is not received, unhealthy",
expectedVal: false,
name: "disconnect time within timeout",
tracker: NewTracker(defaultIncomingHeartbeatTimeout),
expectedVal: true,
modifierFunc: func(status *MutableStatus) {
// set heartbeat
status.LastRecvHeartbeat = time.Now().Add(-1 * time.Second)
},
heartbeatTimeout: 1 * time.Second,
},
{
name: "send error before send success",
expectedVal: false,
modifierFunc: func(status *MutableStatus) {
// set heartbeat
status.LastRecvHeartbeat = time.Now()
status.LastSendSuccess = time.Now()
status.LastSendError = time.Now()
status.DisconnectTime = time.Now()
},
},
{
name: "received error before received success",
name: "disconnect time past timeout",
tracker: NewTracker(1 * time.Millisecond),
expectedVal: false,
modifierFunc: func(status *MutableStatus) {
// set heartbeat
status.LastRecvHeartbeat = time.Now()
status.LastRecvResourceSuccess = time.Now()
status.LastRecvError = time.Now()
status.DisconnectTime = time.Now().Add(-1 * time.Minute)
},
},
{
name: "receive error before receive success within timeout",
tracker: NewTracker(defaultIncomingHeartbeatTimeout),
expectedVal: true,
modifierFunc: func(status *MutableStatus) {
now := time.Now()
status.LastRecvResourceSuccess = now
status.LastRecvError = now.Add(1 * time.Second)
},
},
{
name: "receive error before receive success within timeout",
tracker: NewTracker(defaultIncomingHeartbeatTimeout),
expectedVal: true,
modifierFunc: func(status *MutableStatus) {
now := time.Now()
status.LastRecvResourceSuccess = now
status.LastRecvError = now.Add(1 * time.Second)
},
},
{
name: "receive error before receive success past timeout",
tracker: NewTracker(1 * time.Millisecond),
expectedVal: false,
modifierFunc: func(status *MutableStatus) {
now := time.Now().Add(-2 * time.Second)
status.LastRecvResourceSuccess = now
status.LastRecvError = now.Add(1 * time.Second)
},
},
{
name: "nack before ack within timeout",
tracker: NewTracker(defaultIncomingHeartbeatTimeout),
expectedVal: true,
modifierFunc: func(status *MutableStatus) {
now := time.Now()
status.LastAck = now
status.LastNack = now.Add(1 * time.Second)
},
},
{
name: "nack before ack past timeout",
tracker: NewTracker(1 * time.Millisecond),
expectedVal: false,
modifierFunc: func(status *MutableStatus) {
now := time.Now().Add(-2 * time.Second)
status.LastAck = now
status.LastNack = now.Add(1 * time.Second)
},
},
{
name: "healthy",
tracker: NewTracker(defaultIncomingHeartbeatTimeout),
expectedVal: true,
modifierFunc: func(status *MutableStatus) {
// set heartbeat
status.LastRecvHeartbeat = time.Now()
},
},
}
for _, tc := range tcs {
t.Run(tc.name, func(t *testing.T) {
tracker := NewTracker()
if tc.heartbeatTimeout.Microseconds() != 0 {
tracker.SetHeartbeatTimeout(tc.heartbeatTimeout)
tracker := tc.tracker
st, err := tracker.Connected(aPeerID)
require.NoError(t, err)
require.True(t, st.Connected)
if tc.modifierFunc != nil {
tc.modifierFunc(st)
}
if !tc.dontConnect {
st, err := tracker.Connected(aPeerID)
require.NoError(t, err)
require.True(t, st.Connected)
if tc.modifierFunc != nil {
tc.modifierFunc(st)
}
require.Equal(t, tc.expectedVal, st.IsHealthy())
} else {
st, found := tracker.StreamStatus(aPeerID)
require.False(t, found)
require.Equal(t, tc.expectedVal, st.IsHealthy())
}
assert.Equal(t, tc.expectedVal, tracker.IsHealthy(st.GetStatus()))
})
}
}
func TestTracker_EnsureConnectedDisconnected(t *testing.T) {
tracker := NewTracker()
tracker := NewTracker(defaultIncomingHeartbeatTimeout)
peerID := "63b60245-c475-426b-b314-4588d210859d"
it := incrementalTime{
@ -120,8 +133,7 @@ func TestTracker_EnsureConnectedDisconnected(t *testing.T) {
require.NoError(t, err)
expect := Status{
Connected: true,
heartbeatTimeout: defaultIncomingHeartbeatTimeout,
Connected: true,
}
status, ok := tracker.StreamStatus(peerID)
@ -147,9 +159,8 @@ func TestTracker_EnsureConnectedDisconnected(t *testing.T) {
lastSuccess = it.base.Add(time.Duration(sequence) * time.Second).UTC()
expect := Status{
Connected: true,
LastAck: lastSuccess,
heartbeatTimeout: defaultIncomingHeartbeatTimeout,
Connected: true,
LastAck: lastSuccess,
}
require.Equal(t, expect, status)
})
@ -159,10 +170,9 @@ func TestTracker_EnsureConnectedDisconnected(t *testing.T) {
sequence++
expect := Status{
Connected: false,
DisconnectTime: it.base.Add(time.Duration(sequence) * time.Second).UTC(),
LastAck: lastSuccess,
heartbeatTimeout: defaultIncomingHeartbeatTimeout,
Connected: false,
DisconnectTime: it.base.Add(time.Duration(sequence) * time.Second).UTC(),
LastAck: lastSuccess,
}
status, ok := tracker.StreamStatus(peerID)
require.True(t, ok)
@ -174,9 +184,8 @@ func TestTracker_EnsureConnectedDisconnected(t *testing.T) {
require.NoError(t, err)
expect := Status{
Connected: true,
LastAck: lastSuccess,
heartbeatTimeout: defaultIncomingHeartbeatTimeout,
Connected: true,
LastAck: lastSuccess,
// DisconnectTime gets cleared on re-connect.
}
@ -203,7 +212,7 @@ func TestTracker_connectedStreams(t *testing.T) {
}
run := func(t *testing.T, tc testCase) {
tracker := NewTracker()
tracker := NewTracker(defaultIncomingHeartbeatTimeout)
if tc.setup != nil {
tc.setup(t, tracker)
}

View File

@ -280,16 +280,6 @@ func (s *handlerConnectProxy) handleUpdate(ctx context.Context, u UpdateEvent, s
}
snap.Roots = roots
case strings.HasPrefix(u.CorrelationID, peerTrustBundleIDPrefix):
resp, ok := u.Result.(*pbpeering.TrustBundleReadResponse)
if !ok {
return fmt.Errorf("invalid type for response: %T", u.Result)
}
peer := strings.TrimPrefix(u.CorrelationID, peerTrustBundleIDPrefix)
if resp.Bundle != nil {
snap.ConnectProxy.UpstreamPeerTrustBundles.Set(peer, resp.Bundle)
}
case u.CorrelationID == peeringTrustBundlesWatchID:
resp, ok := u.Result.(*pbpeering.TrustBundleListByServiceResponse)
if !ok {
@ -369,6 +359,17 @@ func (s *handlerConnectProxy) handleUpdate(ctx context.Context, u UpdateEvent, s
// Clean up data
//
peeredChainTargets := make(map[UpstreamID]struct{})
for _, discoChain := range snap.ConnectProxy.DiscoveryChain {
for _, target := range discoChain.Targets {
if target.Peer == "" {
continue
}
uid := NewUpstreamIDFromTargetID(target.ID)
peeredChainTargets[uid] = struct{}{}
}
}
validPeerNames := make(map[string]struct{})
// Iterate through all known endpoints and remove references to upstream IDs that weren't in the update
@ -383,6 +384,11 @@ func (s *handlerConnectProxy) handleUpdate(ctx context.Context, u UpdateEvent, s
validPeerNames[uid.Peer] = struct{}{}
return true
}
// Peered upstream came from a discovery chain target
if _, ok := peeredChainTargets[uid]; ok {
validPeerNames[uid.Peer] = struct{}{}
return true
}
snap.ConnectProxy.PeerUpstreamEndpoints.CancelWatch(uid)
return true
})
@ -463,8 +469,14 @@ func (s *handlerConnectProxy) handleUpdate(ctx context.Context, u UpdateEvent, s
continue
}
if _, ok := seenUpstreams[uid]; !ok {
for _, cancelFn := range targets {
for targetID, cancelFn := range targets {
cancelFn()
targetUID := NewUpstreamIDFromTargetID(targetID)
if targetUID.Peer != "" {
snap.ConnectProxy.PeerUpstreamEndpoints.CancelWatch(targetUID)
snap.ConnectProxy.UpstreamPeerTrustBundles.CancelWatch(targetUID.Peer)
}
}
delete(snap.ConnectProxy.WatchedUpstreams, uid)
}

View File

@ -5,7 +5,9 @@ import (
"fmt"
cachetype "github.com/hashicorp/consul/agent/cache-types"
"github.com/hashicorp/consul/agent/proxycfg/internal/watch"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/proto/pbpeering"
)
type handlerIngressGateway struct {
@ -66,6 +68,9 @@ func (s *handlerIngressGateway) initialize(ctx context.Context) (ConfigSnapshot,
snap.IngressGateway.WatchedGateways = make(map[UpstreamID]map[string]context.CancelFunc)
snap.IngressGateway.WatchedGatewayEndpoints = make(map[UpstreamID]map[string]structs.CheckServiceNodes)
snap.IngressGateway.Listeners = make(map[IngressListenerKey]structs.IngressListener)
snap.IngressGateway.UpstreamPeerTrustBundles = watch.NewMap[string, *pbpeering.PeeringTrustBundle]()
snap.IngressGateway.PeerUpstreamEndpoints = watch.NewMap[UpstreamID, structs.CheckServiceNodes]()
snap.IngressGateway.PeerUpstreamEndpointsUseHostnames = make(map[UpstreamID]struct{})
return snap, nil
}
@ -152,6 +157,12 @@ func (s *handlerIngressGateway) handleUpdate(ctx context.Context, u UpdateEvent,
delete(snap.IngressGateway.WatchedUpstreams[uid], targetID)
delete(snap.IngressGateway.WatchedUpstreamEndpoints[uid], targetID)
cancelUpstreamFn()
targetUID := NewUpstreamIDFromTargetID(targetID)
if targetUID.Peer != "" {
snap.IngressGateway.PeerUpstreamEndpoints.CancelWatch(targetUID)
snap.IngressGateway.UpstreamPeerTrustBundles.CancelWatch(targetUID.Peer)
}
}
cancelFn()

View File

@ -814,6 +814,18 @@ func (s *ConfigSnapshot) MeshConfigTLSOutgoing() *structs.MeshDirectionalTLSConf
return mesh.TLS.Outgoing
}
func (s *ConfigSnapshot) ToConfigSnapshotUpstreams() (*ConfigSnapshotUpstreams, error) {
switch s.Kind {
case structs.ServiceKindConnectProxy:
return &s.ConnectProxy.ConfigSnapshotUpstreams, nil
case structs.ServiceKindIngressGateway:
return &s.IngressGateway.ConfigSnapshotUpstreams, nil
default:
// This is a coherence check and should never fail
return nil, fmt.Errorf("No upstream snapshot for gateway mode %q", s.Kind)
}
}
func (u *ConfigSnapshotUpstreams) UpstreamPeerMeta(uid UpstreamID) structs.PeeringServiceMeta {
nodes, _ := u.PeerUpstreamEndpoints.Get(uid)
if len(nodes) == 0 {

View File

@ -493,6 +493,11 @@ func TestState_WatchesAndUpdates(t *testing.T) {
Mode: structs.MeshGatewayModeNone,
},
},
structs.Upstream{
DestinationType: structs.UpstreamDestTypeService,
DestinationName: "api-failover-to-peer",
LocalBindPort: 10007,
},
structs.Upstream{
DestinationType: structs.UpstreamDestTypeService,
DestinationName: "api-dc2",
@ -552,6 +557,16 @@ func TestState_WatchesAndUpdates(t *testing.T) {
Mode: structs.MeshGatewayModeNone,
},
}),
fmt.Sprintf("discovery-chain:%s-failover-to-peer", apiUID.String()): genVerifyDiscoveryChainWatch(&structs.DiscoveryChainRequest{
Name: "api-failover-to-peer",
EvaluateInDatacenter: "dc1",
EvaluateInNamespace: "default",
EvaluateInPartition: "default",
Datacenter: "dc1",
OverrideMeshGateway: structs.MeshGatewayConfig{
Mode: meshGatewayProxyConfigValue,
},
}),
fmt.Sprintf("discovery-chain:%s-dc2", apiUID.String()): genVerifyDiscoveryChainWatch(&structs.DiscoveryChainRequest{
Name: "api-dc2",
EvaluateInDatacenter: "dc1",
@ -639,6 +654,26 @@ func TestState_WatchesAndUpdates(t *testing.T) {
},
Err: nil,
},
{
CorrelationID: fmt.Sprintf("discovery-chain:%s-failover-to-peer", apiUID.String()),
Result: &structs.DiscoveryChainResponse{
Chain: discoverychain.TestCompileConfigEntries(t, "api-failover-to-peer", "default", "default", "dc1", "trustdomain.consul",
func(req *discoverychain.CompileRequest) {
req.OverrideMeshGateway.Mode = meshGatewayProxyConfigValue
}, &structs.ServiceResolverConfigEntry{
Kind: structs.ServiceResolver,
Name: "api-failover-to-peer",
Failover: map[string]structs.ServiceResolverFailover{
"*": {
Targets: []structs.ServiceResolverFailoverTarget{
{Peer: "cluster-01"},
},
},
},
}),
},
Err: nil,
},
},
verifySnapshot: func(t testing.TB, snap *ConfigSnapshot) {
require.True(t, snap.Valid())
@ -646,15 +681,18 @@ func TestState_WatchesAndUpdates(t *testing.T) {
require.Equal(t, indexedRoots, snap.Roots)
require.Equal(t, issuedCert, snap.ConnectProxy.Leaf)
require.Len(t, snap.ConnectProxy.DiscoveryChain, 5, "%+v", snap.ConnectProxy.DiscoveryChain)
require.Len(t, snap.ConnectProxy.WatchedUpstreams, 5, "%+v", snap.ConnectProxy.WatchedUpstreams)
require.Len(t, snap.ConnectProxy.WatchedUpstreamEndpoints, 5, "%+v", snap.ConnectProxy.WatchedUpstreamEndpoints)
require.Len(t, snap.ConnectProxy.WatchedGateways, 5, "%+v", snap.ConnectProxy.WatchedGateways)
require.Len(t, snap.ConnectProxy.WatchedGatewayEndpoints, 5, "%+v", snap.ConnectProxy.WatchedGatewayEndpoints)
require.Len(t, snap.ConnectProxy.DiscoveryChain, 6, "%+v", snap.ConnectProxy.DiscoveryChain)
require.Len(t, snap.ConnectProxy.WatchedUpstreams, 6, "%+v", snap.ConnectProxy.WatchedUpstreams)
require.Len(t, snap.ConnectProxy.WatchedUpstreamEndpoints, 6, "%+v", snap.ConnectProxy.WatchedUpstreamEndpoints)
require.Len(t, snap.ConnectProxy.WatchedGateways, 6, "%+v", snap.ConnectProxy.WatchedGateways)
require.Len(t, snap.ConnectProxy.WatchedGatewayEndpoints, 6, "%+v", snap.ConnectProxy.WatchedGatewayEndpoints)
require.Len(t, snap.ConnectProxy.WatchedServiceChecks, 0, "%+v", snap.ConnectProxy.WatchedServiceChecks)
require.Len(t, snap.ConnectProxy.PreparedQueryEndpoints, 0, "%+v", snap.ConnectProxy.PreparedQueryEndpoints)
require.Equal(t, 1, snap.ConnectProxy.ConfigSnapshotUpstreams.PeerUpstreamEndpoints.Len())
require.Equal(t, 1, snap.ConnectProxy.ConfigSnapshotUpstreams.UpstreamPeerTrustBundles.Len())
require.True(t, snap.ConnectProxy.IntentionsSet)
require.Equal(t, ixnMatch, snap.ConnectProxy.Intentions)
require.True(t, snap.ConnectProxy.MeshConfigSet)
@ -667,6 +705,7 @@ func TestState_WatchesAndUpdates(t *testing.T) {
fmt.Sprintf("upstream-target:api-failover-remote.default.default.dc2:%s-failover-remote?dc=dc2", apiUID.String()): genVerifyServiceSpecificRequest("api-failover-remote", "", "dc2", true),
fmt.Sprintf("upstream-target:api-failover-local.default.default.dc2:%s-failover-local?dc=dc2", apiUID.String()): genVerifyServiceSpecificRequest("api-failover-local", "", "dc2", true),
fmt.Sprintf("upstream-target:api-failover-direct.default.default.dc2:%s-failover-direct?dc=dc2", apiUID.String()): genVerifyServiceSpecificRequest("api-failover-direct", "", "dc2", true),
upstreamPeerWatchIDPrefix + fmt.Sprintf("%s-failover-to-peer?peer=cluster-01", apiUID.String()): genVerifyServiceSpecificPeeredRequest("api-failover-to-peer", "", "", "cluster-01", true),
fmt.Sprintf("mesh-gateway:dc2:%s-failover-remote?dc=dc2", apiUID.String()): genVerifyGatewayWatch("dc2"),
fmt.Sprintf("mesh-gateway:dc1:%s-failover-local?dc=dc2", apiUID.String()): genVerifyGatewayWatch("dc1"),
},
@ -676,15 +715,18 @@ func TestState_WatchesAndUpdates(t *testing.T) {
require.Equal(t, indexedRoots, snap.Roots)
require.Equal(t, issuedCert, snap.ConnectProxy.Leaf)
require.Len(t, snap.ConnectProxy.DiscoveryChain, 5, "%+v", snap.ConnectProxy.DiscoveryChain)
require.Len(t, snap.ConnectProxy.WatchedUpstreams, 5, "%+v", snap.ConnectProxy.WatchedUpstreams)
require.Len(t, snap.ConnectProxy.WatchedUpstreamEndpoints, 5, "%+v", snap.ConnectProxy.WatchedUpstreamEndpoints)
require.Len(t, snap.ConnectProxy.WatchedGateways, 5, "%+v", snap.ConnectProxy.WatchedGateways)
require.Len(t, snap.ConnectProxy.WatchedGatewayEndpoints, 5, "%+v", snap.ConnectProxy.WatchedGatewayEndpoints)
require.Len(t, snap.ConnectProxy.DiscoveryChain, 6, "%+v", snap.ConnectProxy.DiscoveryChain)
require.Len(t, snap.ConnectProxy.WatchedUpstreams, 6, "%+v", snap.ConnectProxy.WatchedUpstreams)
require.Len(t, snap.ConnectProxy.WatchedUpstreamEndpoints, 6, "%+v", snap.ConnectProxy.WatchedUpstreamEndpoints)
require.Len(t, snap.ConnectProxy.WatchedGateways, 6, "%+v", snap.ConnectProxy.WatchedGateways)
require.Len(t, snap.ConnectProxy.WatchedGatewayEndpoints, 6, "%+v", snap.ConnectProxy.WatchedGatewayEndpoints)
require.Len(t, snap.ConnectProxy.WatchedServiceChecks, 0, "%+v", snap.ConnectProxy.WatchedServiceChecks)
require.Len(t, snap.ConnectProxy.PreparedQueryEndpoints, 0, "%+v", snap.ConnectProxy.PreparedQueryEndpoints)
require.Equal(t, 1, snap.ConnectProxy.ConfigSnapshotUpstreams.PeerUpstreamEndpoints.Len())
require.Equal(t, 1, snap.ConnectProxy.ConfigSnapshotUpstreams.UpstreamPeerTrustBundles.Len())
require.True(t, snap.ConnectProxy.IntentionsSet)
require.Equal(t, ixnMatch, snap.ConnectProxy.Intentions)
},

View File

@ -280,6 +280,31 @@ func TestUpstreamNodesDC2(t testing.T) structs.CheckServiceNodes {
}
}
func TestUpstreamNodesPeerCluster01(t testing.T) structs.CheckServiceNodes {
peer := "cluster-01"
service := structs.TestNodeServiceWithNameInPeer(t, "web", peer)
return structs.CheckServiceNodes{
structs.CheckServiceNode{
Node: &structs.Node{
ID: "test1",
Node: "test1",
Address: "10.40.1.1",
PeerName: peer,
},
Service: service,
},
structs.CheckServiceNode{
Node: &structs.Node{
ID: "test2",
Node: "test2",
Address: "10.40.1.2",
PeerName: peer,
},
Service: service,
},
}
}
func TestUpstreamNodesInStatusDC2(t testing.T, status string) structs.CheckServiceNodes {
return structs.CheckServiceNodes{
structs.CheckServiceNode{

View File

@ -8,6 +8,7 @@ import (
"github.com/hashicorp/consul/agent/connect"
"github.com/hashicorp/consul/agent/consul/discoverychain"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/proto/pbpeering"
)
func setupTestVariationConfigEntriesAndSnapshot(
@ -72,6 +73,24 @@ func setupTestVariationConfigEntriesAndSnapshot(
Nodes: TestGatewayNodesDC2(t),
},
})
case "failover-to-cluster-peer":
events = append(events, UpdateEvent{
CorrelationID: "peer-trust-bundle:cluster-01",
Result: &pbpeering.TrustBundleReadResponse{
Bundle: &pbpeering.PeeringTrustBundle{
PeerName: "peer1",
TrustDomain: "peer1.domain",
ExportedPartition: "peer1ap",
RootPEMs: []string{"peer1-root-1"},
},
},
})
events = append(events, UpdateEvent{
CorrelationID: "upstream-peer:db?peer=cluster-01",
Result: &structs.IndexedCheckServiceNodes{
Nodes: TestUpstreamNodesPeerCluster01(t),
},
})
case "failover-through-double-remote-gateway-triggered":
events = append(events, UpdateEvent{
CorrelationID: "upstream-target:db.default.default.dc1:" + dbUID.String(),
@ -255,6 +274,21 @@ func setupTestVariationDiscoveryChain(
},
},
)
case "failover-to-cluster-peer":
entries = append(entries,
&structs.ServiceResolverConfigEntry{
Kind: structs.ServiceResolver,
Name: "db",
ConnectTimeout: 33 * time.Second,
Failover: map[string]structs.ServiceResolverFailover{
"*": {
Targets: []structs.ServiceResolverFailoverTarget{
{Peer: "cluster-01"},
},
},
},
},
)
case "failover-through-double-remote-gateway-triggered":
fallthrough
case "failover-through-double-remote-gateway":

View File

@ -9,7 +9,9 @@ import (
"github.com/mitchellh/mapstructure"
"github.com/hashicorp/consul/acl"
cachetype "github.com/hashicorp/consul/agent/cache-types"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/proto/pbpeering"
)
type handlerUpstreams struct {
@ -21,9 +23,10 @@ func (s *handlerUpstreams) handleUpdateUpstreams(ctx context.Context, u UpdateEv
return fmt.Errorf("error filling agent cache: %v", u.Err)
}
upstreamsSnapshot := &snap.ConnectProxy.ConfigSnapshotUpstreams
if snap.Kind == structs.ServiceKindIngressGateway {
upstreamsSnapshot = &snap.IngressGateway.ConfigSnapshotUpstreams
upstreamsSnapshot, err := snap.ToConfigSnapshotUpstreams()
if err != nil {
return err
}
switch {
@ -98,19 +101,16 @@ func (s *handlerUpstreams) handleUpdateUpstreams(ctx context.Context, u UpdateEv
uid := UpstreamIDFromString(uidString)
filteredNodes := hostnameEndpoints(
s.logger,
GatewayKey{ /*empty so it never matches*/ },
resp.Nodes,
)
if len(filteredNodes) > 0 {
if set := upstreamsSnapshot.PeerUpstreamEndpoints.Set(uid, filteredNodes); set {
upstreamsSnapshot.PeerUpstreamEndpointsUseHostnames[uid] = struct{}{}
}
} else {
if set := upstreamsSnapshot.PeerUpstreamEndpoints.Set(uid, resp.Nodes); set {
delete(upstreamsSnapshot.PeerUpstreamEndpointsUseHostnames, uid)
}
s.setPeerEndpoints(upstreamsSnapshot, uid, resp.Nodes)
case strings.HasPrefix(u.CorrelationID, peerTrustBundleIDPrefix):
resp, ok := u.Result.(*pbpeering.TrustBundleReadResponse)
if !ok {
return fmt.Errorf("invalid type for response: %T", u.Result)
}
peer := strings.TrimPrefix(u.CorrelationID, peerTrustBundleIDPrefix)
if resp.Bundle != nil {
upstreamsSnapshot.UpstreamPeerTrustBundles.Set(peer, resp.Bundle)
}
case strings.HasPrefix(u.CorrelationID, "upstream-target:"):
@ -216,6 +216,23 @@ func removeColonPrefix(s string) (string, string, bool) {
return s[0:idx], s[idx+1:], true
}
func (s *handlerUpstreams) setPeerEndpoints(upstreamsSnapshot *ConfigSnapshotUpstreams, uid UpstreamID, nodes structs.CheckServiceNodes) {
filteredNodes := hostnameEndpoints(
s.logger,
GatewayKey{ /*empty so it never matches*/ },
nodes,
)
if len(filteredNodes) > 0 {
if set := upstreamsSnapshot.PeerUpstreamEndpoints.Set(uid, filteredNodes); set {
upstreamsSnapshot.PeerUpstreamEndpointsUseHostnames[uid] = struct{}{}
}
} else {
if set := upstreamsSnapshot.PeerUpstreamEndpoints.Set(uid, nodes); set {
delete(upstreamsSnapshot.PeerUpstreamEndpointsUseHostnames, uid)
}
}
}
func (s *handlerUpstreams) resetWatchesFromChain(
ctx context.Context,
uid UpstreamID,
@ -255,6 +272,12 @@ func (s *handlerUpstreams) resetWatchesFromChain(
delete(snap.WatchedUpstreams[uid], targetID)
delete(snap.WatchedUpstreamEndpoints[uid], targetID)
cancelFn()
targetUID := NewUpstreamIDFromTargetID(targetID)
if targetUID.Peer != "" {
snap.PeerUpstreamEndpoints.CancelWatch(targetUID)
snap.UpstreamPeerTrustBundles.CancelWatch(targetUID.Peer)
}
}
var (
@ -274,6 +297,7 @@ func (s *handlerUpstreams) resetWatchesFromChain(
service: target.Service,
filter: target.Subset.Filter,
datacenter: target.Datacenter,
peer: target.Peer,
entMeta: target.GetEnterpriseMetadata(),
}
err := s.watchUpstreamTarget(ctx, snap, opts)
@ -384,6 +408,7 @@ type targetWatchOpts struct {
service string
filter string
datacenter string
peer string
entMeta *acl.EnterpriseMeta
}
@ -397,11 +422,17 @@ func (s *handlerUpstreams) watchUpstreamTarget(ctx context.Context, snap *Config
var finalMeta acl.EnterpriseMeta
finalMeta.Merge(opts.entMeta)
correlationID := "upstream-target:" + opts.chainID + ":" + opts.upstreamID.String()
uid := opts.upstreamID
correlationID := "upstream-target:" + opts.chainID + ":" + uid.String()
if opts.peer != "" {
uid = NewUpstreamIDFromTargetID(opts.chainID)
correlationID = upstreamPeerWatchIDPrefix + uid.String()
}
ctx, cancel := context.WithCancel(ctx)
err := s.dataSources.Health.Notify(ctx, &structs.ServiceSpecificRequest{
PeerName: opts.upstreamID.Peer,
PeerName: opts.peer,
Datacenter: opts.datacenter,
QueryOptions: structs.QueryOptions{
Token: s.token,
@ -422,6 +453,31 @@ func (s *handlerUpstreams) watchUpstreamTarget(ctx context.Context, snap *Config
}
snap.WatchedUpstreams[opts.upstreamID][opts.chainID] = cancel
if uid.Peer == "" {
return nil
}
if ok := snap.PeerUpstreamEndpoints.IsWatched(uid); !ok {
snap.PeerUpstreamEndpoints.InitWatch(uid, cancel)
}
// Check whether a watch for this peer exists to avoid duplicates.
if ok := snap.UpstreamPeerTrustBundles.IsWatched(uid.Peer); !ok {
peerCtx, cancel := context.WithCancel(ctx)
if err := s.dataSources.TrustBundle.Notify(peerCtx, &cachetype.TrustBundleReadRequest{
Request: &pbpeering.TrustBundleReadRequest{
Name: uid.Peer,
Partition: uid.PartitionOrDefault(),
},
QueryOptions: structs.QueryOptions{Token: s.token},
}, peerTrustBundleIDPrefix+uid.Peer, s.ch); err != nil {
cancel()
return fmt.Errorf("error while watching trust bundle for peer %q: %w", uid.Peer, err)
}
snap.UpstreamPeerTrustBundles.InitWatch(uid.Peer, cancel)
}
return nil
}

View File

@ -8,7 +8,6 @@ import (
"time"
"github.com/armon/go-metrics"
"github.com/hashicorp/consul/proto/pbpeerstream"
"github.com/hashicorp/go-hclog"
"github.com/hashicorp/go-memdb"
"github.com/hashicorp/go-multierror"
@ -27,6 +26,7 @@ import (
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/lib"
"github.com/hashicorp/consul/proto/pbpeering"
"github.com/hashicorp/consul/proto/pbpeerstream"
)
var (
@ -379,6 +379,7 @@ func (s *Server) Establish(
}
var id string
serverAddrs := tok.ServerAddresses
if existing == nil {
id, err = lib.GenerateUUID(s.Backend.CheckPeeringUUID)
if err != nil {
@ -386,6 +387,11 @@ func (s *Server) Establish(
}
} else {
id = existing.ID
// If there is a connected stream, assume that the existing ServerAddresses
// are up to date and do not try to overwrite them with the token's addresses.
if status, ok := s.Tracker.StreamStatus(id); ok && status.Connected {
serverAddrs = existing.PeerServerAddresses
}
}
// validate that this peer name is not being used as an acceptor already
@ -397,7 +403,7 @@ func (s *Server) Establish(
ID: id,
Name: req.PeerName,
PeerCAPems: tok.CA,
PeerServerAddresses: tok.ServerAddresses,
PeerServerAddresses: serverAddrs,
PeerServerName: tok.ServerName,
PeerID: tok.PeerID,
Meta: req.Meta,
@ -418,9 +424,9 @@ func (s *Server) Establish(
}
var exchangeResp *pbpeerstream.ExchangeSecretResponse
// Loop through the token's addresses once, attempting to fetch the long-lived stream secret.
// Loop through the known server addresses once, attempting to fetch the long-lived stream secret.
var dialErrors error
for _, addr := range peering.PeerServerAddresses {
for _, addr := range serverAddrs {
exchangeResp, err = exchangeSecret(ctx, addr, tlsOption, &exchangeReq)
if err != nil {
dialErrors = multierror.Append(dialErrors, fmt.Errorf("failed to exchange peering secret with %q: %w", addr, err))
@ -720,11 +726,12 @@ func (s *Server) PeeringDelete(ctx context.Context, req *pbpeering.PeeringDelete
return nil, err
}
if existing == nil || !existing.IsActive() {
if existing == nil || existing.State == pbpeering.PeeringState_DELETING {
// Return early when the Peering doesn't exist or is already marked for deletion.
// We don't return nil because the pb will fail to marshal.
return &pbpeering.PeeringDeleteResponse{}, nil
}
// We are using a write request due to needing to perform a deferred deletion.
// The peering gets marked for deletion by setting the DeletedAt field,
// and a leader routine will handle deleting the peering.

View File

@ -621,38 +621,50 @@ func TestPeeringService_Read_ACLEnforcement(t *testing.T) {
}
func TestPeeringService_Delete(t *testing.T) {
// TODO(peering): see note on newTestServer, refactor to not use this
s := newTestServer(t, nil)
p := &pbpeering.Peering{
ID: testUUID(t),
Name: "foo",
State: pbpeering.PeeringState_ESTABLISHING,
PeerCAPems: nil,
PeerServerName: "test",
PeerServerAddresses: []string{"addr1"},
tt := map[string]pbpeering.PeeringState{
"active peering": pbpeering.PeeringState_ACTIVE,
"terminated peering": pbpeering.PeeringState_TERMINATED,
}
err := s.Server.FSM().State().PeeringWrite(10, &pbpeering.PeeringWriteRequest{Peering: p})
require.NoError(t, err)
require.Nil(t, p.DeletedAt)
require.True(t, p.IsActive())
client := pbpeering.NewPeeringServiceClient(s.ClientConn(t))
for name, overrideState := range tt {
t.Run(name, func(t *testing.T) {
// TODO(peering): see note on newTestServer, refactor to not use this
s := newTestServer(t, nil)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
t.Cleanup(cancel)
// A pointer is kept for the following peering so that we can modify the object without another PeeringWrite.
p := &pbpeering.Peering{
ID: testUUID(t),
Name: "foo",
PeerCAPems: nil,
PeerServerName: "test",
PeerServerAddresses: []string{"addr1"},
}
err := s.Server.FSM().State().PeeringWrite(10, &pbpeering.PeeringWriteRequest{Peering: p})
require.NoError(t, err)
require.Nil(t, p.DeletedAt)
require.True(t, p.IsActive())
_, err = client.PeeringDelete(ctx, &pbpeering.PeeringDeleteRequest{Name: "foo"})
require.NoError(t, err)
// Overwrite the peering state to simulate deleting from a non-initial state.
p.State = overrideState
retry.Run(t, func(r *retry.R) {
_, resp, err := s.Server.FSM().State().PeeringRead(nil, state.Query{Value: "foo"})
require.NoError(r, err)
client := pbpeering.NewPeeringServiceClient(s.ClientConn(t))
// Initially the peering will be marked for deletion but eventually the leader
// routine will clean it up.
require.Nil(r, resp)
})
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
t.Cleanup(cancel)
_, err = client.PeeringDelete(ctx, &pbpeering.PeeringDeleteRequest{Name: "foo"})
require.NoError(t, err)
retry.Run(t, func(r *retry.R) {
_, resp, err := s.Server.FSM().State().PeeringRead(nil, state.Query{Value: "foo"})
require.NoError(r, err)
// Initially the peering will be marked for deletion but eventually the leader
// routine will clean it up.
require.Nil(r, resp)
})
})
}
}
func TestPeeringService_Delete_ACLEnforcement(t *testing.T) {

View File

@ -964,11 +964,18 @@ func (e *ServiceResolverConfigEntry) Validate() error {
// TODO(rb): prevent subsets and default subsets from being defined?
if r.Service == "" && r.ServiceSubset == "" && r.Namespace == "" && r.Partition == "" && r.Datacenter == "" {
if r.isEmpty() {
return fmt.Errorf("Redirect is empty")
}
if r.Service == "" {
switch {
case r.Peer != "" && r.ServiceSubset != "":
return fmt.Errorf("Redirect.Peer cannot be set with Redirect.ServiceSubset")
case r.Peer != "" && r.Partition != "":
return fmt.Errorf("Redirect.Partition cannot be set with Redirect.Peer")
case r.Peer != "" && r.Datacenter != "":
return fmt.Errorf("Redirect.Peer cannot be set with Redirect.Datacenter")
case r.Service == "":
if r.ServiceSubset != "" {
return fmt.Errorf("Redirect.ServiceSubset defined without Redirect.Service")
}
@ -978,9 +985,12 @@ func (e *ServiceResolverConfigEntry) Validate() error {
if r.Partition != "" {
return fmt.Errorf("Redirect.Partition defined without Redirect.Service")
}
} else if r.Service == e.Name {
if r.ServiceSubset != "" && !isSubset(r.ServiceSubset) {
return fmt.Errorf("Redirect.ServiceSubset %q is not a valid subset of %q", r.ServiceSubset, r.Service)
if r.Peer != "" {
return fmt.Errorf("Redirect.Peer defined without Redirect.Service")
}
case r.ServiceSubset != "" && (r.Service == "" || r.Service == e.Name):
if !isSubset(r.ServiceSubset) {
return fmt.Errorf("Redirect.ServiceSubset %q is not a valid subset of %q", r.ServiceSubset, e.Name)
}
}
}
@ -1231,6 +1241,10 @@ type ServiceResolverRedirect struct {
// Datacenter is the datacenter to resolve the service from instead of the
// current one (optional).
Datacenter string `json:",omitempty"`
// Peer is the name of the cluster peer to resolve the service from instead
// of the current one (optional).
Peer string `json:",omitempty"`
}
func (r *ServiceResolverRedirect) ToDiscoveryTargetOpts() DiscoveryTargetOpts {
@ -1240,9 +1254,14 @@ func (r *ServiceResolverRedirect) ToDiscoveryTargetOpts() DiscoveryTargetOpts {
Namespace: r.Namespace,
Partition: r.Partition,
Datacenter: r.Datacenter,
Peer: r.Peer,
}
}
func (r *ServiceResolverRedirect) isEmpty() bool {
return r.Service == "" && r.ServiceSubset == "" && r.Namespace == "" && r.Partition == "" && r.Datacenter == "" && r.Peer == ""
}
// There are some restrictions on what is allowed in here:
//
// - Service, ServiceSubset, Namespace, Datacenters, and Targets cannot all be

View File

@ -72,6 +72,28 @@ func TestServiceResolverConfigEntry_OSS(t *testing.T) {
},
validateErr: `Bad Failover["*"]: Setting Namespace requires Consul Enterprise`,
},
{
name: "setting redirect Namespace on OSS",
entry: &ServiceResolverConfigEntry{
Kind: ServiceResolver,
Name: "test",
Redirect: &ServiceResolverRedirect{
Namespace: "ns1",
},
},
validateErr: `Redirect: Setting Namespace requires Consul Enterprise`,
},
{
name: "setting redirect Partition on OSS",
entry: &ServiceResolverConfigEntry{
Kind: ServiceResolver,
Name: "test",
Redirect: &ServiceResolverRedirect{
Partition: "ap1",
},
},
validateErr: `Redirect: Setting Partition requires Consul Enterprise`,
},
}
// Bulk add a bunch of similar validation cases.

View File

@ -655,6 +655,41 @@ func TestServiceResolverConfigEntry(t *testing.T) {
},
validateErr: `Redirect.ServiceSubset "gone" is not a valid subset of "test"`,
},
{
name: "redirect with peer and subset",
entry: &ServiceResolverConfigEntry{
Kind: ServiceResolver,
Name: "test",
Redirect: &ServiceResolverRedirect{
Peer: "cluster-01",
ServiceSubset: "gone",
},
},
validateErr: `Redirect.Peer cannot be set with Redirect.ServiceSubset`,
},
{
name: "redirect with peer and datacenter",
entry: &ServiceResolverConfigEntry{
Kind: ServiceResolver,
Name: "test",
Redirect: &ServiceResolverRedirect{
Peer: "cluster-01",
Datacenter: "dc2",
},
},
validateErr: `Redirect.Peer cannot be set with Redirect.Datacenter`,
},
{
name: "redirect with peer and datacenter",
entry: &ServiceResolverConfigEntry{
Kind: ServiceResolver,
Name: "test",
Redirect: &ServiceResolverRedirect{
Peer: "cluster-01",
},
},
validateErr: `Redirect.Peer defined without Redirect.Service`,
},
{
name: "self redirect with valid subset",
entry: &ServiceResolverConfigEntry{
@ -669,6 +704,17 @@ func TestServiceResolverConfigEntry(t *testing.T) {
},
},
},
{
name: "redirect to peer",
entry: &ServiceResolverConfigEntry{
Kind: ServiceResolver,
Name: "test",
Redirect: &ServiceResolverRedirect{
Service: "other",
Peer: "cluster-01",
},
},
},
{
name: "simple wildcard failover",
entry: &ServiceResolverConfigEntry{

View File

@ -53,6 +53,28 @@ func TestNodeServiceWithName(t testing.T, name string) *NodeService {
}
}
const peerTrustDomain = "1c053652-8512-4373-90cf-5a7f6263a994.consul"
func TestNodeServiceWithNameInPeer(t testing.T, name string, peer string) *NodeService {
service := "payments"
return &NodeService{
Kind: ServiceKindTypical,
Service: name,
Port: 8080,
Connect: ServiceConnect{
PeerMeta: &PeeringServiceMeta{
SNI: []string{
service + ".default.default." + peer + ".external." + peerTrustDomain,
},
SpiffeID: []string{
"spiffe://" + peerTrustDomain + "/ns/default/dc/" + peer + "-dc/svc/" + service,
},
Protocol: "tcp",
},
},
}
}
// TestNodeServiceProxy returns a *NodeService representing a valid
// Connect proxy.
func TestNodeServiceProxy(t testing.T) *NodeService {

View File

@ -88,29 +88,26 @@ func (s *ResourceGenerator) clustersFromSnapshotConnectProxy(cfgSnap *proxycfg.C
clusters = append(clusters, passthroughs...)
}
// NOTE: Any time we skip a chain below we MUST also skip that discovery chain in endpoints.go
// so that the sets of endpoints generated matches the sets of clusters.
for uid, chain := range cfgSnap.ConnectProxy.DiscoveryChain {
getUpstream := func(uid proxycfg.UpstreamID) (*structs.Upstream, bool) {
upstream := cfgSnap.ConnectProxy.UpstreamConfig[uid]
explicit := upstream.HasLocalPortOrSocket()
implicit := cfgSnap.ConnectProxy.IsImplicitUpstream(uid)
if !implicit && !explicit {
// Discovery chain is not associated with a known explicit or implicit upstream so it is skipped.
continue
}
return upstream, !implicit && !explicit
}
chainEndpoints, ok := cfgSnap.ConnectProxy.WatchedUpstreamEndpoints[uid]
if !ok {
// this should not happen
return nil, fmt.Errorf("no endpoint map for upstream %q", uid)
// NOTE: Any time we skip a chain below we MUST also skip that discovery chain in endpoints.go
// so that the sets of endpoints generated matches the sets of clusters.
for uid, chain := range cfgSnap.ConnectProxy.DiscoveryChain {
upstream, skip := getUpstream(uid)
if skip {
continue
}
upstreamClusters, err := s.makeUpstreamClustersForDiscoveryChain(
uid,
upstream,
chain,
chainEndpoints,
cfgSnap,
false,
)
@ -127,18 +124,15 @@ func (s *ResourceGenerator) clustersFromSnapshotConnectProxy(cfgSnap *proxycfg.C
// upstream in endpoints.go so that the sets of endpoints generated matches
// the sets of clusters.
for _, uid := range cfgSnap.ConnectProxy.PeeredUpstreamIDs() {
upstreamCfg := cfgSnap.ConnectProxy.UpstreamConfig[uid]
explicit := upstreamCfg.HasLocalPortOrSocket()
implicit := cfgSnap.ConnectProxy.IsImplicitUpstream(uid)
if !implicit && !explicit {
// Not associated with a known explicit or implicit upstream so it is skipped.
upstream, skip := getUpstream(uid)
if skip {
continue
}
peerMeta := cfgSnap.ConnectProxy.UpstreamPeerMeta(uid)
cfg := s.getAndModifyUpstreamConfigForPeeredListener(uid, upstream, peerMeta)
upstreamCluster, err := s.makeUpstreamClusterForPeerService(uid, upstreamCfg, peerMeta, cfgSnap)
upstreamCluster, err := s.makeUpstreamClusterForPeerService(uid, cfg, peerMeta, cfgSnap)
if err != nil {
return nil, err
}
@ -652,17 +646,10 @@ func (s *ResourceGenerator) clustersFromSnapshotIngressGateway(cfgSnap *proxycfg
return nil, fmt.Errorf("no discovery chain for upstream %q", uid)
}
chainEndpoints, ok := cfgSnap.IngressGateway.WatchedUpstreamEndpoints[uid]
if !ok {
// this should not happen
return nil, fmt.Errorf("no endpoint map for upstream %q", uid)
}
upstreamClusters, err := s.makeUpstreamClustersForDiscoveryChain(
uid,
&u,
chain,
chainEndpoints,
cfgSnap,
false,
)
@ -745,7 +732,7 @@ func (s *ResourceGenerator) makeAppCluster(cfgSnap *proxycfg.ConfigSnapshot, nam
func (s *ResourceGenerator) makeUpstreamClusterForPeerService(
uid proxycfg.UpstreamID,
upstream *structs.Upstream,
upstreamConfig structs.UpstreamConfig,
peerMeta structs.PeeringServiceMeta,
cfgSnap *proxycfg.ConfigSnapshot,
) (*envoy_cluster_v3.Cluster, error) {
@ -754,16 +741,21 @@ func (s *ResourceGenerator) makeUpstreamClusterForPeerService(
err error
)
cfg := s.getAndModifyUpstreamConfigForPeeredListener(uid, upstream, peerMeta)
if cfg.EnvoyClusterJSON != "" {
c, err = makeClusterFromUserConfig(cfg.EnvoyClusterJSON)
if upstreamConfig.EnvoyClusterJSON != "" {
c, err = makeClusterFromUserConfig(upstreamConfig.EnvoyClusterJSON)
if err != nil {
return c, err
}
// In the happy path don't return yet as we need to inject TLS config still.
}
tbs, ok := cfgSnap.ConnectProxy.UpstreamPeerTrustBundles.Get(uid.Peer)
upstreamsSnapshot, err := cfgSnap.ToConfigSnapshotUpstreams()
if err != nil {
return c, err
}
tbs, ok := upstreamsSnapshot.UpstreamPeerTrustBundles.Get(uid.Peer)
if !ok {
// this should never happen since we loop through upstreams with
// set trust bundles
@ -772,22 +764,29 @@ func (s *ResourceGenerator) makeUpstreamClusterForPeerService(
clusterName := generatePeeredClusterName(uid, tbs)
outlierDetection := ToOutlierDetection(upstreamConfig.PassiveHealthCheck)
// We can't rely on health checks for services on cluster peers because they
// don't take into account service resolvers, splitters and routers. Setting
// MaxEjectionPercent too 100% gives outlier detection the power to eject the
// entire cluster.
outlierDetection.MaxEjectionPercent = &wrappers.UInt32Value{Value: 100}
s.Logger.Trace("generating cluster for", "cluster", clusterName)
if c == nil {
c = &envoy_cluster_v3.Cluster{
Name: clusterName,
ConnectTimeout: durationpb.New(time.Duration(cfg.ConnectTimeoutMs) * time.Millisecond),
ConnectTimeout: durationpb.New(time.Duration(upstreamConfig.ConnectTimeoutMs) * time.Millisecond),
CommonLbConfig: &envoy_cluster_v3.Cluster_CommonLbConfig{
HealthyPanicThreshold: &envoy_type_v3.Percent{
Value: 0, // disable panic threshold
},
},
CircuitBreakers: &envoy_cluster_v3.CircuitBreakers{
Thresholds: makeThresholdsIfNeeded(cfg.Limits),
Thresholds: makeThresholdsIfNeeded(upstreamConfig.Limits),
},
OutlierDetection: ToOutlierDetection(cfg.PassiveHealthCheck),
OutlierDetection: outlierDetection,
}
if cfg.Protocol == "http2" || cfg.Protocol == "grpc" {
if upstreamConfig.Protocol == "http2" || upstreamConfig.Protocol == "grpc" {
if err := s.setHttp2ProtocolOptions(c); err != nil {
return c, err
}
@ -821,12 +820,11 @@ func (s *ResourceGenerator) makeUpstreamClusterForPeerService(
false, /*onlyPassing*/
)
}
}
rootPEMs := cfgSnap.RootPEMs()
if uid.Peer != "" {
tbs, _ := cfgSnap.ConnectProxy.UpstreamPeerTrustBundles.Get(uid.Peer)
tbs, _ := upstreamsSnapshot.UpstreamPeerTrustBundles.Get(uid.Peer)
rootPEMs = tbs.ConcatenatedRootPEMs()
}
@ -961,7 +959,6 @@ func (s *ResourceGenerator) makeUpstreamClustersForDiscoveryChain(
uid proxycfg.UpstreamID,
upstream *structs.Upstream,
chain *structs.CompiledDiscoveryChain,
chainEndpoints map[string]structs.CheckServiceNodes,
cfgSnap *proxycfg.ConfigSnapshot,
forMeshGateway bool,
) ([]*envoy_cluster_v3.Cluster, error) {
@ -978,7 +975,15 @@ func (s *ResourceGenerator) makeUpstreamClustersForDiscoveryChain(
upstreamConfigMap = upstream.Config
}
cfg, err := structs.ParseUpstreamConfigNoDefaults(upstreamConfigMap)
upstreamsSnapshot, err := cfgSnap.ToConfigSnapshotUpstreams()
// Mesh gateways are exempt because upstreamsSnapshot is only used for
// cluster peering targets and transative failover/redirects are unsupported.
if err != nil && !forMeshGateway {
return nil, fmt.Errorf("No upstream snapshot for gateway mode %q", cfgSnap.Kind)
}
rawUpstreamConfig, err := structs.ParseUpstreamConfigNoDefaults(upstreamConfigMap)
if err != nil {
// Don't hard fail on a config typo, just warn. The parse func returns
// default config if there is an error so it's safe to continue.
@ -986,13 +991,28 @@ func (s *ResourceGenerator) makeUpstreamClustersForDiscoveryChain(
"error", err)
}
finalizeUpstreamConfig := func(cfg structs.UpstreamConfig, connectTimeout time.Duration) structs.UpstreamConfig {
if cfg.Protocol == "" {
cfg.Protocol = chain.Protocol
}
if cfg.Protocol == "" {
cfg.Protocol = "tcp"
}
if cfg.ConnectTimeoutMs == 0 {
cfg.ConnectTimeoutMs = int(connectTimeout / time.Millisecond)
}
return cfg
}
var escapeHatchCluster *envoy_cluster_v3.Cluster
if !forMeshGateway {
if cfg.EnvoyClusterJSON != "" {
if rawUpstreamConfig.EnvoyClusterJSON != "" {
if chain.Default {
// If you haven't done anything to setup the discovery chain, then
// you can use the envoy_cluster_json escape hatch.
escapeHatchCluster, err = makeClusterFromUserConfig(cfg.EnvoyClusterJSON)
escapeHatchCluster, err = makeClusterFromUserConfig(rawUpstreamConfig.EnvoyClusterJSON)
if err != nil {
return nil, err
}
@ -1006,14 +1026,20 @@ func (s *ResourceGenerator) makeUpstreamClustersForDiscoveryChain(
var out []*envoy_cluster_v3.Cluster
for _, node := range chain.Nodes {
if node.Type != structs.DiscoveryGraphNodeTypeResolver {
switch {
case node == nil:
return nil, fmt.Errorf("impossible to process a nil node")
case node.Type != structs.DiscoveryGraphNodeTypeResolver:
continue
case node.Resolver == nil:
return nil, fmt.Errorf("impossible to process a non-resolver node")
}
failover := node.Resolver.Failover
// These variables are prefixed with primary to avoid shaddowing bugs.
primaryTargetID := node.Resolver.Target
primaryTarget := chain.Targets[primaryTargetID]
primaryClusterName := CustomizeClusterName(primaryTarget.Name, chain)
upstreamConfig := finalizeUpstreamConfig(rawUpstreamConfig, node.Resolver.ConnectTimeout)
if forMeshGateway {
primaryClusterName = meshGatewayExportedClusterNamePrefix + primaryClusterName
}
@ -1026,22 +1052,38 @@ func (s *ResourceGenerator) makeUpstreamClustersForDiscoveryChain(
continue
}
type targetClusterOptions struct {
type targetClusterOption struct {
targetID string
clusterName string
}
// Construct the information required to make target clusters. When
// failover is configured, create the aggregate cluster.
var targetClustersOptions []targetClusterOptions
var targetClustersOptions []targetClusterOption
if failover != nil && !forMeshGateway {
var failoverClusterNames []string
for _, tid := range append([]string{primaryTargetID}, failover.Targets...) {
target := chain.Targets[tid]
clusterName := CustomizeClusterName(target.Name, chain)
clusterName := target.Name
targetUID := proxycfg.NewUpstreamIDFromTargetID(tid)
if targetUID.Peer != "" {
tbs, ok := upstreamsSnapshot.UpstreamPeerTrustBundles.Get(targetUID.Peer)
// We can't generate cluster on peers without the trust bundle. The
// trust bundle should be ready soon.
if !ok {
s.Logger.Debug("peer trust bundle not ready for discovery chain target",
"peer", targetUID.Peer,
"target", tid,
)
continue
}
clusterName = generatePeeredClusterName(targetUID, tbs)
}
clusterName = CustomizeClusterName(clusterName, chain)
clusterName = failoverClusterNamePrefix + clusterName
targetClustersOptions = append(targetClustersOptions, targetClusterOptions{
targetClustersOptions = append(targetClustersOptions, targetClusterOption{
targetID: tid,
clusterName: clusterName,
})
@ -1070,7 +1112,7 @@ func (s *ResourceGenerator) makeUpstreamClustersForDiscoveryChain(
out = append(out, c)
} else {
targetClustersOptions = append(targetClustersOptions, targetClusterOptions{
targetClustersOptions = append(targetClustersOptions, targetClusterOption{
targetID: primaryTargetID,
clusterName: primaryClusterName,
})
@ -1089,11 +1131,20 @@ func (s *ResourceGenerator) makeUpstreamClustersForDiscoveryChain(
Datacenter: target.Datacenter,
Service: target.Service,
}.URI().String()
if uid.Peer != "" {
return nil, fmt.Errorf("impossible to get a peer discovery chain")
targetUID := proxycfg.NewUpstreamIDFromTargetID(targetInfo.targetID)
s.Logger.Debug("generating cluster for", "cluster", targetInfo.clusterName)
if targetUID.Peer != "" {
peerMeta := upstreamsSnapshot.UpstreamPeerMeta(targetUID)
upstreamCluster, err := s.makeUpstreamClusterForPeerService(targetUID, upstreamConfig, peerMeta, cfgSnap)
if err != nil {
continue
}
// Override the cluster name to include the failover-target~ prefix.
upstreamCluster.Name = targetInfo.clusterName
out = append(out, upstreamCluster)
continue
}
s.Logger.Trace("generating cluster for", "cluster", targetInfo.clusterName)
c := &envoy_cluster_v3.Cluster{
Name: targetInfo.clusterName,
AltStatName: targetInfo.clusterName,
@ -1114,9 +1165,9 @@ func (s *ResourceGenerator) makeUpstreamClustersForDiscoveryChain(
},
// TODO(peering): make circuit breakers or outlier detection work?
CircuitBreakers: &envoy_cluster_v3.CircuitBreakers{
Thresholds: makeThresholdsIfNeeded(cfg.Limits),
Thresholds: makeThresholdsIfNeeded(upstreamConfig.Limits),
},
OutlierDetection: ToOutlierDetection(cfg.PassiveHealthCheck),
OutlierDetection: ToOutlierDetection(upstreamConfig.PassiveHealthCheck),
}
var lb *structs.LoadBalancer
@ -1127,19 +1178,7 @@ func (s *ResourceGenerator) makeUpstreamClustersForDiscoveryChain(
return nil, fmt.Errorf("failed to apply load balancer configuration to cluster %q: %v", targetInfo.clusterName, err)
}
var proto string
if !forMeshGateway {
proto = cfg.Protocol
}
if proto == "" {
proto = chain.Protocol
}
if proto == "" {
proto = "tcp"
}
if proto == "http2" || proto == "grpc" {
if upstreamConfig.Protocol == "http2" || upstreamConfig.Protocol == "grpc" {
if err := s.setHttp2ProtocolOptions(c); err != nil {
return nil, err
}
@ -1148,7 +1187,7 @@ func (s *ResourceGenerator) makeUpstreamClustersForDiscoveryChain(
configureTLS := true
if forMeshGateway {
// We only initiate TLS if we're doing an L7 proxy.
configureTLS = structs.IsProtocolHTTPLike(proto)
configureTLS = structs.IsProtocolHTTPLike(upstreamConfig.Protocol)
}
if configureTLS {
@ -1221,7 +1260,6 @@ func (s *ResourceGenerator) makeExportedUpstreamClustersForMeshGateway(cfgSnap *
proxycfg.NewUpstreamIDFromServiceName(svc),
nil,
chain,
nil,
cfgSnap,
true,
)

View File

@ -257,6 +257,12 @@ func TestClustersFromSnapshot(t *testing.T) {
return proxycfg.TestConfigSnapshotDiscoveryChain(t, "failover", nil, nil)
},
},
{
name: "connect-proxy-with-chain-and-failover-to-cluster-peer",
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
return proxycfg.TestConfigSnapshotDiscoveryChain(t, "failover-to-cluster-peer", nil, nil)
},
},
{
name: "connect-proxy-with-tcp-chain-failover-through-remote-gateway",
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
@ -495,6 +501,13 @@ func TestClustersFromSnapshot(t *testing.T) {
"failover", nil, nil, nil)
},
},
{
name: "ingress-with-chain-and-failover-to-cluster-peer",
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
return proxycfg.TestConfigSnapshotIngressGateway(t, true, "tcp",
"failover-to-cluster-peer", nil, nil, nil)
},
},
{
name: "ingress-with-tcp-chain-failover-through-remote-gateway",
create: func(t testinf.T) *proxycfg.ConfigSnapshot {

View File

@ -50,14 +50,19 @@ func (s *ResourceGenerator) endpointsFromSnapshotConnectProxy(cfgSnap *proxycfg.
cfgSnap.ConnectProxy.PeerUpstreamEndpoints.Len()+
len(cfgSnap.ConnectProxy.WatchedUpstreamEndpoints))
// NOTE: Any time we skip a chain below we MUST also skip that discovery chain in clusters.go
// so that the sets of endpoints generated matches the sets of clusters.
for uid, chain := range cfgSnap.ConnectProxy.DiscoveryChain {
getUpstream := func(uid proxycfg.UpstreamID) (*structs.Upstream, bool) {
upstream := cfgSnap.ConnectProxy.UpstreamConfig[uid]
explicit := upstream.HasLocalPortOrSocket()
implicit := cfgSnap.ConnectProxy.IsImplicitUpstream(uid)
if !implicit && !explicit {
return upstream, !implicit && !explicit
}
// NOTE: Any time we skip a chain below we MUST also skip that discovery chain in clusters.go
// so that the sets of endpoints generated matches the sets of clusters.
for uid, chain := range cfgSnap.ConnectProxy.DiscoveryChain {
upstream, skip := getUpstream(uid)
if skip {
// Discovery chain is not associated with a known explicit or implicit upstream so it is skipped.
continue
}
@ -70,6 +75,7 @@ func (s *ResourceGenerator) endpointsFromSnapshotConnectProxy(cfgSnap *proxycfg.
es, err := s.endpointsFromDiscoveryChain(
uid,
chain,
cfgSnap,
cfgSnap.Locality,
upstreamConfigMap,
cfgSnap.ConnectProxy.WatchedUpstreamEndpoints[uid],
@ -86,12 +92,9 @@ func (s *ResourceGenerator) endpointsFromSnapshotConnectProxy(cfgSnap *proxycfg.
// upstream in clusters.go so that the sets of endpoints generated matches
// the sets of clusters.
for _, uid := range cfgSnap.ConnectProxy.PeeredUpstreamIDs() {
upstreamCfg := cfgSnap.ConnectProxy.UpstreamConfig[uid]
explicit := upstreamCfg.HasLocalPortOrSocket()
implicit := cfgSnap.ConnectProxy.IsImplicitUpstream(uid)
if !implicit && !explicit {
// Not associated with a known explicit or implicit upstream so it is skipped.
_, skip := getUpstream(uid)
if skip {
// Discovery chain is not associated with a known explicit or implicit upstream so it is skipped.
continue
}
@ -104,22 +107,14 @@ func (s *ResourceGenerator) endpointsFromSnapshotConnectProxy(cfgSnap *proxycfg.
clusterName := generatePeeredClusterName(uid, tbs)
// Also skip peer instances with a hostname as their address. EDS
// cannot resolve hostnames, so we provide them through CDS instead.
if _, ok := cfgSnap.ConnectProxy.PeerUpstreamEndpointsUseHostnames[uid]; ok {
continue
loadAssignment, err := s.makeUpstreamLoadAssignmentForPeerService(cfgSnap, clusterName, uid)
if err != nil {
return nil, err
}
endpoints, ok := cfgSnap.ConnectProxy.PeerUpstreamEndpoints.Get(uid)
if ok {
la := makeLoadAssignment(
clusterName,
[]loadAssignmentEndpointGroup{
{Endpoints: endpoints},
},
proxycfg.GatewayKey{ /*empty so it never matches*/ },
)
resources = append(resources, la)
if loadAssignment != nil {
resources = append(resources, loadAssignment)
}
}
@ -375,6 +370,7 @@ func (s *ResourceGenerator) endpointsFromSnapshotIngressGateway(cfgSnap *proxycf
es, err := s.endpointsFromDiscoveryChain(
uid,
cfgSnap.IngressGateway.DiscoveryChain[uid],
cfgSnap,
proxycfg.GatewayKey{Datacenter: cfgSnap.Datacenter, Partition: u.DestinationPartition},
u.Config,
cfgSnap.IngressGateway.WatchedUpstreamEndpoints[uid],
@ -412,9 +408,38 @@ func makePipeEndpoint(path string) *envoy_endpoint_v3.LbEndpoint {
}
}
func (s *ResourceGenerator) makeUpstreamLoadAssignmentForPeerService(cfgSnap *proxycfg.ConfigSnapshot, clusterName string, uid proxycfg.UpstreamID) (*envoy_endpoint_v3.ClusterLoadAssignment, error) {
var la *envoy_endpoint_v3.ClusterLoadAssignment
upstreamsSnapshot, err := cfgSnap.ToConfigSnapshotUpstreams()
if err != nil {
return la, err
}
// Also skip peer instances with a hostname as their address. EDS
// cannot resolve hostnames, so we provide them through CDS instead.
if _, ok := upstreamsSnapshot.PeerUpstreamEndpointsUseHostnames[uid]; ok {
return la, nil
}
endpoints, ok := upstreamsSnapshot.PeerUpstreamEndpoints.Get(uid)
if !ok {
return nil, nil
}
la = makeLoadAssignment(
clusterName,
[]loadAssignmentEndpointGroup{
{Endpoints: endpoints},
},
proxycfg.GatewayKey{ /*empty so it never matches*/ },
)
return la, nil
}
func (s *ResourceGenerator) endpointsFromDiscoveryChain(
uid proxycfg.UpstreamID,
chain *structs.CompiledDiscoveryChain,
cfgSnap *proxycfg.ConfigSnapshot,
gatewayKey proxycfg.GatewayKey,
upstreamConfigMap map[string]interface{},
upstreamEndpoints map[string]structs.CheckServiceNodes,
@ -432,6 +457,14 @@ func (s *ResourceGenerator) endpointsFromDiscoveryChain(
upstreamConfigMap = make(map[string]interface{}) // TODO:needed?
}
upstreamsSnapshot, err := cfgSnap.ToConfigSnapshotUpstreams()
// Mesh gateways are exempt because upstreamsSnapshot is only used for
// cluster peering targets and transative failover/redirects are unsupported.
if err != nil && !forMeshGateway {
return nil, fmt.Errorf("No upstream snapshot for gateway mode %q", cfgSnap.Kind)
}
var resources []proto.Message
var escapeHatchCluster *envoy_cluster_v3.Cluster
@ -465,8 +498,15 @@ func (s *ResourceGenerator) endpointsFromDiscoveryChain(
if node.Type != structs.DiscoveryGraphNodeTypeResolver {
continue
}
primaryTargetID := node.Resolver.Target
failover := node.Resolver.Failover
type targetLoadAssignmentOption struct {
targetID string
clusterName string
}
var targetLoadAssignmentOptions []targetLoadAssignmentOption
var numFailoverTargets int
if failover != nil {
numFailoverTargets = len(failover.Targets)
@ -474,66 +514,84 @@ func (s *ResourceGenerator) endpointsFromDiscoveryChain(
clusterNamePrefix := ""
if numFailoverTargets > 0 && !forMeshGateway {
clusterNamePrefix = failoverClusterNamePrefix
for _, failTargetID := range failover.Targets {
target := chain.Targets[failTargetID]
endpointGroup, valid := makeLoadAssignmentEndpointGroup(
chain.Targets,
upstreamEndpoints,
gatewayEndpoints,
failTargetID,
gatewayKey,
forMeshGateway,
)
if !valid {
continue // skip the failover target if we're still populating the snapshot
}
for _, targetID := range append([]string{primaryTargetID}, failover.Targets...) {
target := chain.Targets[targetID]
clusterName := target.Name
targetUID := proxycfg.NewUpstreamIDFromTargetID(targetID)
if targetUID.Peer != "" {
tbs, ok := upstreamsSnapshot.UpstreamPeerTrustBundles.Get(targetUID.Peer)
// We can't generate cluster on peers without the trust bundle. The
// trust bundle should be ready soon.
if !ok {
s.Logger.Debug("peer trust bundle not ready for discovery chain target",
"peer", targetUID.Peer,
"target", targetID,
)
continue
}
clusterName := CustomizeClusterName(target.Name, chain)
clusterName = generatePeeredClusterName(targetUID, tbs)
}
clusterName = CustomizeClusterName(clusterName, chain)
clusterName = failoverClusterNamePrefix + clusterName
if escapeHatchCluster != nil {
clusterName = escapeHatchCluster.Name
}
s.Logger.Debug("generating endpoints for", "cluster", clusterName)
la := makeLoadAssignment(
clusterName,
[]loadAssignmentEndpointGroup{endpointGroup},
gatewayKey,
)
resources = append(resources, la)
targetLoadAssignmentOptions = append(targetLoadAssignmentOptions, targetLoadAssignmentOption{
targetID: targetID,
clusterName: clusterName,
})
}
}
targetID := node.Resolver.Target
target := chain.Targets[targetID]
clusterName := CustomizeClusterName(target.Name, chain)
clusterName = clusterNamePrefix + clusterName
if escapeHatchCluster != nil {
clusterName = escapeHatchCluster.Name
}
if forMeshGateway {
clusterName = meshGatewayExportedClusterNamePrefix + clusterName
}
s.Logger.Debug("generating endpoints for", "cluster", clusterName)
endpointGroup, valid := makeLoadAssignmentEndpointGroup(
chain.Targets,
upstreamEndpoints,
gatewayEndpoints,
targetID,
gatewayKey,
forMeshGateway,
)
if !valid {
continue // skip the cluster if we're still populating the snapshot
} else {
target := chain.Targets[primaryTargetID]
clusterName := CustomizeClusterName(target.Name, chain)
clusterName = clusterNamePrefix + clusterName
if escapeHatchCluster != nil {
clusterName = escapeHatchCluster.Name
}
if forMeshGateway {
clusterName = meshGatewayExportedClusterNamePrefix + clusterName
}
targetLoadAssignmentOptions = append(targetLoadAssignmentOptions, targetLoadAssignmentOption{
targetID: primaryTargetID,
clusterName: clusterName,
})
}
la := makeLoadAssignment(
clusterName,
[]loadAssignmentEndpointGroup{endpointGroup},
gatewayKey,
)
resources = append(resources, la)
for _, targetInfo := range targetLoadAssignmentOptions {
s.Logger.Debug("generating endpoints for", "cluster", targetInfo.clusterName)
targetUID := proxycfg.NewUpstreamIDFromTargetID(targetInfo.targetID)
if targetUID.Peer != "" {
loadAssignment, err := s.makeUpstreamLoadAssignmentForPeerService(cfgSnap, targetInfo.clusterName, targetUID)
if err != nil {
return nil, err
}
if loadAssignment != nil {
resources = append(resources, loadAssignment)
}
continue
}
endpointGroup, valid := makeLoadAssignmentEndpointGroup(
chain.Targets,
upstreamEndpoints,
gatewayEndpoints,
targetInfo.targetID,
gatewayKey,
forMeshGateway,
)
if !valid {
continue // skip the cluster if we're still populating the snapshot
}
la := makeLoadAssignment(
targetInfo.clusterName,
[]loadAssignmentEndpointGroup{endpointGroup},
gatewayKey,
)
resources = append(resources, la)
}
}
return resources, nil
@ -586,6 +644,7 @@ func (s *ResourceGenerator) makeExportedUpstreamEndpointsForMeshGateway(cfgSnap
clusterEndpoints, err := s.endpointsFromDiscoveryChain(
proxycfg.NewUpstreamIDFromServiceName(svc),
chain,
cfgSnap,
cfgSnap.Locality,
nil,
chainEndpoints,
@ -640,11 +699,12 @@ func makeLoadAssignment(clusterName string, endpointGroups []loadAssignmentEndpo
healthStatus = endpointGroup.OverrideHealth
}
endpoint := &envoy_endpoint_v3.Endpoint{
Address: makeAddress(addr, port),
}
es = append(es, &envoy_endpoint_v3.LbEndpoint{
HostIdentifier: &envoy_endpoint_v3.LbEndpoint_Endpoint{
Endpoint: &envoy_endpoint_v3.Endpoint{
Address: makeAddress(addr, port),
},
Endpoint: endpoint,
},
HealthStatus: healthStatus,
LoadBalancingWeight: makeUint32Value(weight),

View File

@ -284,6 +284,12 @@ func TestEndpointsFromSnapshot(t *testing.T) {
return proxycfg.TestConfigSnapshotDiscoveryChain(t, "failover", nil, nil)
},
},
{
name: "connect-proxy-with-chain-and-failover-to-cluster-peer",
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
return proxycfg.TestConfigSnapshotDiscoveryChain(t, "failover-to-cluster-peer", nil, nil)
},
},
{
name: "connect-proxy-with-tcp-chain-failover-through-remote-gateway",
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
@ -396,6 +402,13 @@ func TestEndpointsFromSnapshot(t *testing.T) {
"failover", nil, nil, nil)
},
},
{
name: "ingress-with-chain-and-failover-to-cluster-peer",
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
return proxycfg.TestConfigSnapshotIngressGateway(t, true, "tcp",
"failover-to-cluster-peer", nil, nil, nil)
},
},
{
name: "ingress-with-tcp-chain-failover-through-remote-gateway",
create: func(t testinf.T) *proxycfg.ConfigSnapshot {

View File

@ -0,0 +1,219 @@
{
"versionInfo": "00000001",
"resources": [
{
"@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster",
"name": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul",
"altStatName": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul",
"clusterType": {
"name": "envoy.clusters.aggregate",
"typedConfig": {
"@type": "type.googleapis.com/envoy.extensions.clusters.aggregate.v3.ClusterConfig",
"clusters": [
"failover-target~db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul",
"failover-target~db.default.cluster-01.external.peer1.domain"
]
}
},
"connectTimeout": "33s",
"lbPolicy": "CLUSTER_PROVIDED"
},
{
"@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster",
"name": "failover-target~db.default.cluster-01.external.peer1.domain",
"type": "EDS",
"edsClusterConfig": {
"edsConfig": {
"ads": {
},
"resourceApiVersion": "V3"
}
},
"connectTimeout": "1s",
"circuitBreakers": {
},
"outlierDetection": {
"maxEjectionPercent": 100
},
"commonLbConfig": {
"healthyPanicThreshold": {
}
},
"transportSocket": {
"name": "tls",
"typedConfig": {
"@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext",
"commonTlsContext": {
"tlsParams": {
},
"tlsCertificates": [
{
"certificateChain": {
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n"
},
"privateKey": {
"inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n"
}
}
],
"validationContext": {
"trustedCa": {
"inlineString": "peer1-root-1\n"
},
"matchSubjectAltNames": [
{
"exact": "spiffe://1c053652-8512-4373-90cf-5a7f6263a994.consul/ns/default/dc/cluster-01-dc/svc/payments"
}
]
}
},
"sni": "payments.default.default.cluster-01.external.1c053652-8512-4373-90cf-5a7f6263a994.consul"
}
}
},
{
"@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster",
"name": "failover-target~db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul",
"altStatName": "failover-target~db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul",
"type": "EDS",
"edsClusterConfig": {
"edsConfig": {
"ads": {
},
"resourceApiVersion": "V3"
}
},
"connectTimeout": "33s",
"circuitBreakers": {
},
"outlierDetection": {
},
"commonLbConfig": {
"healthyPanicThreshold": {
}
},
"transportSocket": {
"name": "tls",
"typedConfig": {
"@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext",
"commonTlsContext": {
"tlsParams": {
},
"tlsCertificates": [
{
"certificateChain": {
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n"
},
"privateKey": {
"inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n"
}
}
],
"validationContext": {
"trustedCa": {
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n"
},
"matchSubjectAltNames": [
{
"exact": "spiffe://11111111-2222-3333-4444-555555555555.consul/ns/default/dc/dc1/svc/db"
}
]
}
},
"sni": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul"
}
}
},
{
"@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster",
"name": "geo-cache.default.dc1.query.11111111-2222-3333-4444-555555555555.consul",
"type": "EDS",
"edsClusterConfig": {
"edsConfig": {
"ads": {
},
"resourceApiVersion": "V3"
}
},
"connectTimeout": "5s",
"circuitBreakers": {
},
"outlierDetection": {
},
"transportSocket": {
"name": "tls",
"typedConfig": {
"@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext",
"commonTlsContext": {
"tlsParams": {
},
"tlsCertificates": [
{
"certificateChain": {
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n"
},
"privateKey": {
"inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n"
}
}
],
"validationContext": {
"trustedCa": {
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n"
},
"matchSubjectAltNames": [
{
"exact": "spiffe://11111111-2222-3333-4444-555555555555.consul/ns/default/dc/dc1/svc/geo-cache-target"
},
{
"exact": "spiffe://11111111-2222-3333-4444-555555555555.consul/ns/default/dc/dc2/svc/geo-cache-target"
}
]
}
},
"sni": "geo-cache.default.dc1.query.11111111-2222-3333-4444-555555555555.consul"
}
}
},
{
"@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster",
"name": "local_app",
"type": "STATIC",
"connectTimeout": "5s",
"loadAssignment": {
"clusterName": "local_app",
"endpoints": [
{
"lbEndpoints": [
{
"endpoint": {
"address": {
"socketAddress": {
"address": "127.0.0.1",
"portValue": 8080
}
}
}
}
]
}
]
}
}
],
"typeUrl": "type.googleapis.com/envoy.config.cluster.v3.Cluster",
"nonce": "00000001"
}

View File

@ -58,7 +58,7 @@
"dnsRefreshRate": "10s",
"dnsLookupFamily": "V4_ONLY",
"outlierDetection": {
"maxEjectionPercent": 100
},
"commonLbConfig": {
"healthyPanicThreshold": {
@ -115,7 +115,7 @@
},
"outlierDetection": {
"maxEjectionPercent": 100
},
"commonLbConfig": {
"healthyPanicThreshold": {

View File

@ -0,0 +1,139 @@
{
"versionInfo": "00000001",
"resources": [
{
"@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster",
"name": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul",
"altStatName": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul",
"clusterType": {
"name": "envoy.clusters.aggregate",
"typedConfig": {
"@type": "type.googleapis.com/envoy.extensions.clusters.aggregate.v3.ClusterConfig",
"clusters": [
"failover-target~db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul",
"failover-target~db.default.cluster-01.external.peer1.domain"
]
}
},
"connectTimeout": "33s",
"lbPolicy": "CLUSTER_PROVIDED"
},
{
"@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster",
"name": "failover-target~db.default.cluster-01.external.peer1.domain",
"type": "EDS",
"edsClusterConfig": {
"edsConfig": {
"ads": {
},
"resourceApiVersion": "V3"
}
},
"connectTimeout": "33s",
"circuitBreakers": {
},
"outlierDetection": {
"maxEjectionPercent": 100
},
"commonLbConfig": {
"healthyPanicThreshold": {
}
},
"transportSocket": {
"name": "tls",
"typedConfig": {
"@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext",
"commonTlsContext": {
"tlsParams": {
},
"tlsCertificates": [
{
"certificateChain": {
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n"
},
"privateKey": {
"inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n"
}
}
],
"validationContext": {
"trustedCa": {
"inlineString": "peer1-root-1\n"
},
"matchSubjectAltNames": [
{
"exact": "spiffe://1c053652-8512-4373-90cf-5a7f6263a994.consul/ns/default/dc/cluster-01-dc/svc/payments"
}
]
}
},
"sni": "payments.default.default.cluster-01.external.1c053652-8512-4373-90cf-5a7f6263a994.consul"
}
}
},
{
"@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster",
"name": "failover-target~db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul",
"altStatName": "failover-target~db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul",
"type": "EDS",
"edsClusterConfig": {
"edsConfig": {
"ads": {
},
"resourceApiVersion": "V3"
}
},
"connectTimeout": "33s",
"circuitBreakers": {
},
"outlierDetection": {
},
"commonLbConfig": {
"healthyPanicThreshold": {
}
},
"transportSocket": {
"name": "tls",
"typedConfig": {
"@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext",
"commonTlsContext": {
"tlsParams": {
},
"tlsCertificates": [
{
"certificateChain": {
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n"
},
"privateKey": {
"inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n"
}
}
],
"validationContext": {
"trustedCa": {
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n"
},
"matchSubjectAltNames": [
{
"exact": "spiffe://11111111-2222-3333-4444-555555555555.consul/ns/default/dc/dc1/svc/db"
}
]
}
},
"sni": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul"
}
}
}
],
"typeUrl": "type.googleapis.com/envoy.config.cluster.v3.Cluster",
"nonce": "00000001"
}

View File

@ -18,7 +18,7 @@
},
"outlierDetection": {
"maxEjectionPercent": 100
},
"commonLbConfig": {
"healthyPanicThreshold": {
@ -75,7 +75,7 @@
},
"outlierDetection": {
"maxEjectionPercent": 100
},
"commonLbConfig": {
"healthyPanicThreshold": {
@ -157,7 +157,7 @@
},
"outlierDetection": {
"maxEjectionPercent": 100
},
"commonLbConfig": {
"healthyPanicThreshold": {

View File

@ -0,0 +1,109 @@
{
"versionInfo": "00000001",
"resources": [
{
"@type": "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment",
"clusterName": "failover-target~db.default.cluster-01.external.peer1.domain",
"endpoints": [
{
"lbEndpoints": [
{
"endpoint": {
"address": {
"socketAddress": {
"address": "10.40.1.1",
"portValue": 8080
}
}
},
"healthStatus": "HEALTHY",
"loadBalancingWeight": 1
},
{
"endpoint": {
"address": {
"socketAddress": {
"address": "10.40.1.2",
"portValue": 8080
}
}
},
"healthStatus": "HEALTHY",
"loadBalancingWeight": 1
}
]
}
]
},
{
"@type": "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment",
"clusterName": "failover-target~db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul",
"endpoints": [
{
"lbEndpoints": [
{
"endpoint": {
"address": {
"socketAddress": {
"address": "10.10.1.1",
"portValue": 8080
}
}
},
"healthStatus": "HEALTHY",
"loadBalancingWeight": 1
},
{
"endpoint": {
"address": {
"socketAddress": {
"address": "10.10.1.2",
"portValue": 8080
}
}
},
"healthStatus": "HEALTHY",
"loadBalancingWeight": 1
}
]
}
]
},
{
"@type": "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment",
"clusterName": "geo-cache.default.dc1.query.11111111-2222-3333-4444-555555555555.consul",
"endpoints": [
{
"lbEndpoints": [
{
"endpoint": {
"address": {
"socketAddress": {
"address": "10.10.1.1",
"portValue": 8080
}
}
},
"healthStatus": "HEALTHY",
"loadBalancingWeight": 1
},
{
"endpoint": {
"address": {
"socketAddress": {
"address": "10.20.1.2",
"portValue": 8080
}
}
},
"healthStatus": "HEALTHY",
"loadBalancingWeight": 1
}
]
}
]
}
],
"typeUrl": "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment",
"nonce": "00000001"
}

View File

@ -0,0 +1,75 @@
{
"versionInfo": "00000001",
"resources": [
{
"@type": "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment",
"clusterName": "failover-target~db.default.cluster-01.external.peer1.domain",
"endpoints": [
{
"lbEndpoints": [
{
"endpoint": {
"address": {
"socketAddress": {
"address": "10.40.1.1",
"portValue": 8080
}
}
},
"healthStatus": "HEALTHY",
"loadBalancingWeight": 1
},
{
"endpoint": {
"address": {
"socketAddress": {
"address": "10.40.1.2",
"portValue": 8080
}
}
},
"healthStatus": "HEALTHY",
"loadBalancingWeight": 1
}
]
}
]
},
{
"@type": "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment",
"clusterName": "failover-target~db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul",
"endpoints": [
{
"lbEndpoints": [
{
"endpoint": {
"address": {
"socketAddress": {
"address": "10.10.1.1",
"portValue": 8080
}
}
},
"healthStatus": "HEALTHY",
"loadBalancingWeight": 1
},
{
"endpoint": {
"address": {
"socketAddress": {
"address": "10.10.1.2",
"portValue": 8080
}
}
},
"healthStatus": "HEALTHY",
"loadBalancingWeight": 1
}
]
}
]
}
],
"typeUrl": "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment",
"nonce": "00000001"
}

View File

@ -219,6 +219,7 @@ type ServiceResolverRedirect struct {
Namespace string `json:",omitempty"`
Partition string `json:",omitempty"`
Datacenter string `json:",omitempty"`
Peer string `json:",omitempty"`
}
type ServiceResolverFailover struct {

View File

@ -193,6 +193,20 @@ func TestAPI_ConfigEntry_DiscoveryChain(t *testing.T) {
},
verify: verifyResolver,
},
{
name: "redirect to peer",
entry: &ServiceResolverConfigEntry{
Kind: ServiceResolver,
Name: "test-redirect",
Partition: splitDefaultPartition,
Namespace: splitDefaultNamespace,
Redirect: &ServiceResolverRedirect{
Service: "test-failover",
Peer: "cluster-01",
},
},
verify: verifyResolver,
},
{
name: "mega splitter", // use one mega object to avoid multiple trips
entry: &ServiceSplitterConfigEntry{

View File

@ -17,6 +17,9 @@ type QueryFailoverOptions struct {
Targets []QueryFailoverTarget
}
// Deprecated: use QueryFailoverOptions instead.
type QueryDatacenterOptions = QueryFailoverOptions
type QueryFailoverTarget struct {
// PeerName specifies a peer to try during failover.
PeerName string

View File

@ -0,0 +1,5 @@
ARG CONSUL_IMAGE_VERSION=latest
FROM consul:${CONSUL_IMAGE_VERSION}
RUN apk update && apk add iptables
ARG TARGETARCH
COPY linux_${TARGETARCH}/consul /bin/consul

View File

@ -689,6 +689,7 @@ func ServiceResolverRedirectToStructs(s *ServiceResolverRedirect, t *structs.Ser
t.Namespace = s.Namespace
t.Partition = s.Partition
t.Datacenter = s.Datacenter
t.Peer = s.Peer
}
func ServiceResolverRedirectFromStructs(t *structs.ServiceResolverRedirect, s *ServiceResolverRedirect) {
if s == nil {
@ -699,6 +700,7 @@ func ServiceResolverRedirectFromStructs(t *structs.ServiceResolverRedirect, s *S
s.Namespace = t.Namespace
s.Partition = t.Partition
s.Datacenter = t.Datacenter
s.Peer = t.Peer
}
func ServiceResolverSubsetToStructs(s *ServiceResolverSubset, t *structs.ServiceResolverSubset) {
if s == nil {

View File

@ -796,6 +796,7 @@ type ServiceResolverRedirect struct {
Namespace string `protobuf:"bytes,3,opt,name=Namespace,proto3" json:"Namespace,omitempty"`
Partition string `protobuf:"bytes,4,opt,name=Partition,proto3" json:"Partition,omitempty"`
Datacenter string `protobuf:"bytes,5,opt,name=Datacenter,proto3" json:"Datacenter,omitempty"`
Peer string `protobuf:"bytes,6,opt,name=Peer,proto3" json:"Peer,omitempty"`
}
func (x *ServiceResolverRedirect) Reset() {
@ -865,6 +866,13 @@ func (x *ServiceResolverRedirect) GetDatacenter() string {
return ""
}
func (x *ServiceResolverRedirect) GetPeer() string {
if x != nil {
return x.Peer
}
return ""
}
// mog annotation:
//
// target=github.com/hashicorp/consul/agent/structs.ServiceResolverFailover
@ -2521,7 +2529,7 @@ var file_proto_pbconfigentry_config_entry_proto_rawDesc = []byte{
0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01,
0x28, 0x09, 0x52, 0x06, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x20, 0x0a, 0x0b, 0x4f, 0x6e,
0x6c, 0x79, 0x50, 0x61, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52,
0x0b, 0x4f, 0x6e, 0x6c, 0x79, 0x50, 0x61, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x22, 0xb5, 0x01, 0x0a,
0x0b, 0x4f, 0x6e, 0x6c, 0x79, 0x50, 0x61, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x22, 0xc9, 0x01, 0x0a,
0x17, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72,
0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x53, 0x65, 0x72, 0x76,
0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x53, 0x65, 0x72, 0x76, 0x69,
@ -2533,341 +2541,342 @@ var file_proto_pbconfigentry_config_entry_proto_rawDesc = []byte{
0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69,
0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74,
0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65,
0x6e, 0x74, 0x65, 0x72, 0x22, 0xf9, 0x01, 0x0a, 0x17, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x46, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72,
0x12, 0x18, 0x0a, 0x07, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
0x09, 0x52, 0x07, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x24, 0x0a, 0x0d, 0x53, 0x65,
0x72, 0x76, 0x69, 0x63, 0x65, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28,
0x09, 0x52, 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74,
0x12, 0x1c, 0x0a, 0x09, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20,
0x01, 0x28, 0x09, 0x52, 0x09, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x20,
0x0a, 0x0b, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x73, 0x18, 0x04, 0x20,
0x03, 0x28, 0x09, 0x52, 0x0b, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x73,
0x12, 0x5e, 0x0a, 0x07, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28,
0x0b, 0x32, 0x44, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f,
0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f,
0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
0x65, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x46, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65,
0x72, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x07, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73,
0x22, 0xcf, 0x01, 0x0a, 0x1d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x6f,
0x6c, 0x76, 0x65, 0x72, 0x46, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x54, 0x61, 0x72, 0x67,
0x65, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20,
0x01, 0x28, 0x09, 0x52, 0x07, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x24, 0x0a, 0x0d,
0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x18, 0x02, 0x20,
0x01, 0x28, 0x09, 0x52, 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x53, 0x75, 0x62, 0x73,
0x65, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18,
0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e,
0x12, 0x1c, 0x0a, 0x09, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x04, 0x20,
0x01, 0x28, 0x09, 0x52, 0x09, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1e,
0x0a, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01,
0x28, 0x09, 0x52, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x12,
0x0a, 0x04, 0x50, 0x65, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x50, 0x65,
0x65, 0x72, 0x22, 0xc7, 0x02, 0x0a, 0x0c, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e,
0x63, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x01, 0x20,
0x01, 0x28, 0x09, 0x52, 0x06, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x5d, 0x0a, 0x0e, 0x52,
0x69, 0x6e, 0x67, 0x48, 0x61, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20,
0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e,
0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e,
0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x52, 0x69, 0x6e, 0x67,
0x48, 0x61, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0e, 0x52, 0x69, 0x6e, 0x67,
0x48, 0x61, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x69, 0x0a, 0x12, 0x4c, 0x65,
0x61, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f,
0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e,
0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x4c,
0x65, 0x61, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69,
0x67, 0x52, 0x12, 0x4c, 0x65, 0x61, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43,
0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x55, 0x0a, 0x0c, 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c,
0x69, 0x63, 0x69, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x68, 0x61,
0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69,
0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e,
0x74, 0x72, 0x79, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0c,
0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x22, 0x64, 0x0a, 0x0e,
0x52, 0x69, 0x6e, 0x67, 0x48, 0x61, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x28,
0x0a, 0x0f, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x52, 0x69, 0x6e, 0x67, 0x53, 0x69, 0x7a,
0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d,
0x52, 0x69, 0x6e, 0x67, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x28, 0x0a, 0x0f, 0x4d, 0x61, 0x78, 0x69,
0x6d, 0x75, 0x6d, 0x52, 0x69, 0x6e, 0x67, 0x53, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
0x04, 0x52, 0x0f, 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x52, 0x69, 0x6e, 0x67, 0x53, 0x69,
0x7a, 0x65, 0x22, 0x36, 0x0a, 0x12, 0x4c, 0x65, 0x61, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65,
0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x20, 0x0a, 0x0b, 0x43, 0x68, 0x6f, 0x69,
0x63, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x43,
0x68, 0x6f, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0xd3, 0x01, 0x0a, 0x0a, 0x48,
0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x46, 0x69, 0x65,
0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x12,
0x1e, 0x0a, 0x0a, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20,
0x01, 0x28, 0x09, 0x52, 0x0a, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12,
0x57, 0x0a, 0x0c, 0x43, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18,
0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72,
0x6e, 0x74, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x50, 0x65, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01,
0x28, 0x09, 0x52, 0x04, 0x50, 0x65, 0x65, 0x72, 0x22, 0xf9, 0x01, 0x0a, 0x17, 0x53, 0x65, 0x72,
0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x46, 0x61, 0x69, 0x6c,
0x6f, 0x76, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18,
0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x24,
0x0a, 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x18,
0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x53, 0x75,
0x62, 0x73, 0x65, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63,
0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61,
0x63, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72,
0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e,
0x74, 0x65, 0x72, 0x73, 0x12, 0x5e, 0x0a, 0x07, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18,
0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x44, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72,
0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61,
0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x43, 0x6f,
0x6f, 0x6b, 0x69, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0c, 0x43, 0x6f, 0x6f, 0x6b,
0x69, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1a, 0x0a, 0x08, 0x53, 0x6f, 0x75, 0x72,
0x63, 0x65, 0x49, 0x50, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x53, 0x6f, 0x75, 0x72,
0x63, 0x65, 0x49, 0x50, 0x12, 0x1a, 0x0a, 0x08, 0x54, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c,
0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x54, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c,
0x22, 0x69, 0x0a, 0x0c, 0x43, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
0x12, 0x18, 0x0a, 0x07, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28,
0x08, 0x52, 0x07, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x03, 0x54, 0x54,
0x4c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69,
0x6f, 0x6e, 0x52, 0x03, 0x54, 0x54, 0x4c, 0x12, 0x12, 0x0a, 0x04, 0x50, 0x61, 0x74, 0x68, 0x18,
0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x50, 0x61, 0x74, 0x68, 0x22, 0xbf, 0x02, 0x0a, 0x0e,
0x49, 0x6e, 0x67, 0x72, 0x65, 0x73, 0x73, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x12, 0x49,
0x0a, 0x03, 0x54, 0x4c, 0x53, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x68, 0x61,
0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69,
0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e,
0x74, 0x72, 0x79, 0x2e, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x54, 0x4c, 0x53, 0x43, 0x6f,
0x6e, 0x66, 0x69, 0x67, 0x52, 0x03, 0x54, 0x4c, 0x53, 0x12, 0x54, 0x0a, 0x09, 0x4c, 0x69, 0x73,
0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x68,
0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e,
0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65,
0x6e, 0x74, 0x72, 0x79, 0x2e, 0x49, 0x6e, 0x67, 0x72, 0x65, 0x73, 0x73, 0x4c, 0x69, 0x73, 0x74,
0x65, 0x6e, 0x65, 0x72, 0x52, 0x09, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x12,
0x53, 0x0a, 0x04, 0x4d, 0x65, 0x74, 0x61, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3f, 0x2e,
0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c,
0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67,
0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x49, 0x6e, 0x67, 0x72, 0x65, 0x73, 0x73, 0x47, 0x61, 0x74,
0x65, 0x77, 0x61, 0x79, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04,
0x4d, 0x65, 0x74, 0x61, 0x1a, 0x37, 0x0a, 0x09, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72,
0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03,
0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01,
0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xea, 0x01,
0x0a, 0x10, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x54, 0x4c, 0x53, 0x43, 0x6f, 0x6e, 0x66,
0x69, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20,
0x01, 0x28, 0x08, 0x52, 0x07, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x4c, 0x0a, 0x03,
0x53, 0x44, 0x53, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x68, 0x61, 0x73, 0x68,
0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x53, 0x65,
0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x46, 0x61, 0x69,
0x6c, 0x6f, 0x76, 0x65, 0x72, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x07, 0x54, 0x61, 0x72,
0x67, 0x65, 0x74, 0x73, 0x22, 0xcf, 0x01, 0x0a, 0x1d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x46, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72,
0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
0x12, 0x24, 0x0a, 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x53, 0x75, 0x62, 0x73, 0x65,
0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74,
0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69,
0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63,
0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61,
0x63, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72,
0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74,
0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x50, 0x65, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09,
0x52, 0x04, 0x50, 0x65, 0x65, 0x72, 0x22, 0xc7, 0x02, 0x0a, 0x0c, 0x4c, 0x6f, 0x61, 0x64, 0x42,
0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x50, 0x6f, 0x6c, 0x69, 0x63,
0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12,
0x5d, 0x0a, 0x0e, 0x52, 0x69, 0x6e, 0x67, 0x48, 0x61, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69,
0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63,
0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72,
0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e,
0x52, 0x69, 0x6e, 0x67, 0x48, 0x61, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0e,
0x52, 0x69, 0x6e, 0x67, 0x48, 0x61, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x69,
0x0a, 0x12, 0x4c, 0x65, 0x61, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6f,
0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x68, 0x61, 0x73,
0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e,
0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74,
0x72, 0x79, 0x2e, 0x4c, 0x65, 0x61, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43,
0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x12, 0x4c, 0x65, 0x61, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75,
0x65, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x55, 0x0a, 0x0c, 0x48, 0x61, 0x73,
0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32,
0x31, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73,
0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66,
0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69,
0x63, 0x79, 0x52, 0x0c, 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73,
0x22, 0x64, 0x0a, 0x0e, 0x52, 0x69, 0x6e, 0x67, 0x48, 0x61, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66,
0x69, 0x67, 0x12, 0x28, 0x0a, 0x0f, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x52, 0x69, 0x6e,
0x67, 0x53, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, 0x4d, 0x69, 0x6e,
0x69, 0x6d, 0x75, 0x6d, 0x52, 0x69, 0x6e, 0x67, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x28, 0x0a, 0x0f,
0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x52, 0x69, 0x6e, 0x67, 0x53, 0x69, 0x7a, 0x65, 0x18,
0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x52, 0x69,
0x6e, 0x67, 0x53, 0x69, 0x7a, 0x65, 0x22, 0x36, 0x0a, 0x12, 0x4c, 0x65, 0x61, 0x73, 0x74, 0x52,
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x20, 0x0a, 0x0b,
0x43, 0x68, 0x6f, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28,
0x0d, 0x52, 0x0b, 0x43, 0x68, 0x6f, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0xd3,
0x01, 0x0a, 0x0a, 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x14, 0x0a,
0x05, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x46, 0x69,
0x65, 0x6c, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x56, 0x61, 0x6c, 0x75,
0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x56, 0x61,
0x6c, 0x75, 0x65, 0x12, 0x57, 0x0a, 0x0c, 0x43, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x43, 0x6f, 0x6e,
0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x68, 0x61, 0x73, 0x68,
0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74,
0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72,
0x79, 0x2e, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x54, 0x4c, 0x53, 0x53, 0x44, 0x53, 0x43,
0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x03, 0x53, 0x44, 0x53, 0x12, 0x24, 0x0a, 0x0d, 0x54, 0x4c,
0x53, 0x4d, 0x69, 0x6e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28,
0x09, 0x52, 0x0d, 0x54, 0x4c, 0x53, 0x4d, 0x69, 0x6e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e,
0x12, 0x24, 0x0a, 0x0d, 0x54, 0x4c, 0x53, 0x4d, 0x61, 0x78, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f,
0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x54, 0x4c, 0x53, 0x4d, 0x61, 0x78, 0x56,
0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x0a, 0x0c, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72,
0x53, 0x75, 0x69, 0x74, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x43, 0x69,
0x70, 0x68, 0x65, 0x72, 0x53, 0x75, 0x69, 0x74, 0x65, 0x73, 0x22, 0x5b, 0x0a, 0x13, 0x47, 0x61,
0x74, 0x65, 0x77, 0x61, 0x79, 0x54, 0x4c, 0x53, 0x53, 0x44, 0x53, 0x43, 0x6f, 0x6e, 0x66, 0x69,
0x67, 0x12, 0x20, 0x0a, 0x0b, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65,
0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e,
0x61, 0x6d, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x43, 0x65, 0x72, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75,
0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x43, 0x65, 0x72, 0x74, 0x52,
0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, 0xdf, 0x01, 0x0a, 0x0f, 0x49, 0x6e, 0x67, 0x72,
0x65, 0x73, 0x73, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x50,
0x6f, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x50, 0x6f, 0x72, 0x74, 0x12,
0x1a, 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28,
0x09, 0x52, 0x08, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x51, 0x0a, 0x08, 0x53,
0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x35, 0x2e,
0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c,
0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67,
0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x49, 0x6e, 0x67, 0x72, 0x65, 0x73, 0x73, 0x53, 0x65, 0x72,
0x76, 0x69, 0x63, 0x65, 0x52, 0x08, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x49,
0x0a, 0x03, 0x54, 0x4c, 0x53, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x68, 0x61,
0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69,
0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e,
0x74, 0x72, 0x79, 0x2e, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x54, 0x4c, 0x53, 0x43, 0x6f,
0x6e, 0x66, 0x69, 0x67, 0x52, 0x03, 0x54, 0x4c, 0x53, 0x22, 0xbe, 0x04, 0x0a, 0x0e, 0x49, 0x6e,
0x67, 0x72, 0x65, 0x73, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04,
0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65,
0x12, 0x14, 0x0a, 0x05, 0x48, 0x6f, 0x73, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52,
0x05, 0x48, 0x6f, 0x73, 0x74, 0x73, 0x12, 0x50, 0x0a, 0x03, 0x54, 0x4c, 0x53, 0x18, 0x03, 0x20,
0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e,
0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e,
0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x47, 0x61, 0x74, 0x65,
0x77, 0x61, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x54, 0x4c, 0x53, 0x43, 0x6f, 0x6e,
0x66, 0x69, 0x67, 0x52, 0x03, 0x54, 0x4c, 0x53, 0x12, 0x62, 0x0a, 0x0e, 0x52, 0x65, 0x71, 0x75,
0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b,
0x32, 0x3a, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e,
0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e,
0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x48, 0x54, 0x54, 0x50, 0x48, 0x65, 0x61,
0x64, 0x65, 0x72, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x72, 0x73, 0x52, 0x0e, 0x52, 0x65,
0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x64, 0x0a, 0x0f,
0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18,
0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72,
0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61,
0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x48, 0x54,
0x54, 0x50, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x72,
0x73, 0x52, 0x0f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65,
0x72, 0x73, 0x12, 0x53, 0x0a, 0x04, 0x4d, 0x65, 0x74, 0x61, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b,
0x32, 0x3f, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e,
0x79, 0x2e, 0x43, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0c,
0x43, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1a, 0x0a, 0x08,
0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x50, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08,
0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x50, 0x12, 0x1a, 0x0a, 0x08, 0x54, 0x65, 0x72, 0x6d,
0x69, 0x6e, 0x61, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x54, 0x65, 0x72, 0x6d,
0x69, 0x6e, 0x61, 0x6c, 0x22, 0x69, 0x0a, 0x0c, 0x43, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x43, 0x6f,
0x6e, 0x66, 0x69, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18,
0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2b,
0x0a, 0x03, 0x54, 0x54, 0x4c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75,
0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x03, 0x54, 0x54, 0x4c, 0x12, 0x12, 0x0a, 0x04, 0x50,
0x61, 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x50, 0x61, 0x74, 0x68, 0x22,
0xbf, 0x02, 0x0a, 0x0e, 0x49, 0x6e, 0x67, 0x72, 0x65, 0x73, 0x73, 0x47, 0x61, 0x74, 0x65, 0x77,
0x61, 0x79, 0x12, 0x49, 0x0a, 0x03, 0x54, 0x4c, 0x53, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
0x37, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73,
0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66,
0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x54,
0x4c, 0x53, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x03, 0x54, 0x4c, 0x53, 0x12, 0x54, 0x0a,
0x09, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b,
0x32, 0x36, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e,
0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e,
0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x49, 0x6e, 0x67, 0x72, 0x65, 0x73, 0x73,
0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72,
0x79, 0x52, 0x04, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x58, 0x0a, 0x0e, 0x45, 0x6e, 0x74, 0x65, 0x72,
0x70, 0x72, 0x69, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32,
0x30, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73,
0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6d, 0x6d,
0x6f, 0x6e, 0x2e, 0x45, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x69, 0x73, 0x65, 0x4d, 0x65, 0x74,
0x61, 0x52, 0x0e, 0x45, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x69, 0x73, 0x65, 0x4d, 0x65, 0x74,
0x61, 0x1a, 0x37, 0x0a, 0x09, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10,
0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79,
0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x67, 0x0a, 0x17, 0x47, 0x61,
0x74, 0x65, 0x77, 0x61, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x54, 0x4c, 0x53, 0x43,
0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4c, 0x0a, 0x03, 0x53, 0x44, 0x53, 0x18, 0x01, 0x20, 0x01,
0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63,
0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63,
0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x47, 0x61, 0x74, 0x65, 0x77,
0x61, 0x79, 0x54, 0x4c, 0x53, 0x53, 0x44, 0x53, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x03,
0x53, 0x44, 0x53, 0x22, 0xcb, 0x02, 0x0a, 0x13, 0x48, 0x54, 0x54, 0x50, 0x48, 0x65, 0x61, 0x64,
0x65, 0x72, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x72, 0x73, 0x12, 0x55, 0x0a, 0x03, 0x41,
0x64, 0x64, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69,
0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65,
0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79,
0x2e, 0x48, 0x54, 0x54, 0x50, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x6f, 0x64, 0x69, 0x66,
0x69, 0x65, 0x72, 0x73, 0x2e, 0x41, 0x64, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x03, 0x41,
0x64, 0x64, 0x12, 0x55, 0x0a, 0x03, 0x53, 0x65, 0x74, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32,
0x43, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73,
0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66,
0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x48, 0x54, 0x54, 0x50, 0x48, 0x65, 0x61, 0x64,
0x65, 0x72, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x72, 0x73, 0x2e, 0x53, 0x65, 0x74, 0x45,
0x6e, 0x74, 0x72, 0x79, 0x52, 0x03, 0x53, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x52, 0x65, 0x6d,
0x6f, 0x76, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x52, 0x65, 0x6d, 0x6f, 0x76,
0x65, 0x1a, 0x36, 0x0a, 0x08, 0x41, 0x64, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a,
0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12,
0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05,
0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x36, 0x0a, 0x08, 0x53, 0x65, 0x74,
0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x52, 0x09, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e,
0x65, 0x72, 0x73, 0x12, 0x53, 0x0a, 0x04, 0x4d, 0x65, 0x74, 0x61, 0x18, 0x03, 0x20, 0x03, 0x28,
0x0b, 0x32, 0x3f, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f,
0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f,
0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x49, 0x6e, 0x67, 0x72, 0x65, 0x73,
0x73, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74,
0x72, 0x79, 0x52, 0x04, 0x4d, 0x65, 0x74, 0x61, 0x1a, 0x37, 0x0a, 0x09, 0x4d, 0x65, 0x74, 0x61,
0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01,
0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38,
0x01, 0x22, 0xf6, 0x01, 0x0a, 0x11, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x6e, 0x74,
0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x50, 0x0a, 0x07, 0x53, 0x6f, 0x75, 0x72, 0x63,
0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69,
0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65,
0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79,
0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e,
0x52, 0x07, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x56, 0x0a, 0x04, 0x4d, 0x65, 0x74,
0x61, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63,
0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72,
0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e,
0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e,
0x73, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, 0x4d, 0x65, 0x74,
0x61, 0x1a, 0x37, 0x0a, 0x09, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10,
0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79,
0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xa6, 0x06, 0x0a, 0x0f, 0x53,
0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12,
0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61,
0x6d, 0x65, 0x12, 0x4e, 0x0a, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01,
0x28, 0x0e, 0x32, 0x36, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63,
0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63,
0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x6e,
0x74, 0x69, 0x6f, 0x6e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x41, 0x63, 0x74, 0x69,
0x6f, 0x6e, 0x12, 0x5c, 0x0a, 0x0b, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e,
0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63,
0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72,
0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e,
0x49, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73,
0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73,
0x12, 0x1e, 0x0a, 0x0a, 0x50, 0x72, 0x65, 0x63, 0x65, 0x64, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x04,
0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x50, 0x72, 0x65, 0x63, 0x65, 0x64, 0x65, 0x6e, 0x63, 0x65,
0x12, 0x1a, 0x0a, 0x08, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x49, 0x44, 0x18, 0x05, 0x20, 0x01,
0x28, 0x09, 0x52, 0x08, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x49, 0x44, 0x12, 0x4e, 0x0a, 0x04,
0x54, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3a, 0x2e, 0x68, 0x61, 0x73,
0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e,
0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74,
0x72, 0x79, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x6f, 0x75, 0x72,
0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x20, 0x0a, 0x0b,
0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28,
0x09, 0x52, 0x0b, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x66,
0x0a, 0x0a, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x18, 0x08, 0x20, 0x03,
0x28, 0x0b, 0x32, 0x46, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63,
0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63,
0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63,
0x65, 0x49, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4c, 0x65, 0x67, 0x61, 0x63,
0x79, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x4c, 0x65, 0x67, 0x61,
0x63, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x46, 0x0a, 0x10, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79,
0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b,
0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x4c, 0x65,
0x67, 0x61, 0x63, 0x79, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x46,
0x0a, 0x10, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69,
0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73,
0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x55, 0x70, 0x64, 0x61,
0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x58, 0x0a, 0x0e, 0x45, 0x6e, 0x74, 0x65, 0x72, 0x70,
0x72, 0x69, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30,
0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75,
0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f,
0x6e, 0x2e, 0x45, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x69, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61,
0x52, 0x0e, 0x45, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x69, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61,
0x12, 0x12, 0x0a, 0x04, 0x50, 0x65, 0x65, 0x72, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04,
0x50, 0x65, 0x65, 0x72, 0x1a, 0x3d, 0x0a, 0x0f, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4d, 0x65,
0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01,
0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c,
0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a,
0x02, 0x38, 0x01, 0x22, 0xb9, 0x01, 0x0a, 0x13, 0x49, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f,
0x6e, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x4e, 0x0a, 0x06, 0x41,
0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x68, 0x61,
0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69,
0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e,
0x74, 0x72, 0x79, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x63, 0x74,
0x69, 0x6f, 0x6e, 0x52, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x52, 0x0a, 0x04, 0x48,
0x54, 0x54, 0x50, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x68, 0x61, 0x73, 0x68,
0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74,
0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72,
0x79, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x54, 0x54, 0x50, 0x50,
0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x48, 0x54, 0x54, 0x50, 0x22,
0xed, 0x01, 0x0a, 0x17, 0x49, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x54, 0x54,
0x50, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x50,
0x61, 0x74, 0x68, 0x45, 0x78, 0x61, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09,
0x50, 0x61, 0x74, 0x68, 0x45, 0x78, 0x61, 0x63, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x50, 0x61, 0x74,
0x68, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x50,
0x61, 0x74, 0x68, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x1c, 0x0a, 0x09, 0x50, 0x61, 0x74,
0x68, 0x52, 0x65, 0x67, 0x65, 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x50, 0x61,
0x74, 0x68, 0x52, 0x65, 0x67, 0x65, 0x78, 0x12, 0x5c, 0x0a, 0x06, 0x48, 0x65, 0x61, 0x64, 0x65,
0x72, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x44, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63,
0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72,
0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e,
0x49, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x54, 0x54, 0x50, 0x48, 0x65, 0x61,
0x64, 0x65, 0x72, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x48,
0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x73,
0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x73, 0x22,
0xc1, 0x01, 0x0a, 0x1d, 0x49, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x54, 0x54,
0x50, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f,
0x6e, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74,
0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, 0x12,
0x14, 0x0a, 0x05, 0x45, 0x78, 0x61, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05,
0x45, 0x78, 0x61, 0x63, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18,
0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x16, 0x0a,
0x06, 0x53, 0x75, 0x66, 0x66, 0x69, 0x78, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x53,
0x75, 0x66, 0x66, 0x69, 0x78, 0x12, 0x14, 0x0a, 0x05, 0x52, 0x65, 0x67, 0x65, 0x78, 0x18, 0x06,
0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x52, 0x65, 0x67, 0x65, 0x78, 0x12, 0x16, 0x0a, 0x06, 0x49,
0x6e, 0x76, 0x65, 0x72, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x49, 0x6e, 0x76,
0x65, 0x72, 0x74, 0x2a, 0x77, 0x0a, 0x04, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x0f, 0x0a, 0x0b, 0x4b,
0x69, 0x6e, 0x64, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x10, 0x00, 0x12, 0x12, 0x0a, 0x0e,
0x4b, 0x69, 0x6e, 0x64, 0x4d, 0x65, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x10, 0x01,
0x12, 0x17, 0x0a, 0x13, 0x4b, 0x69, 0x6e, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52,
0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x10, 0x02, 0x12, 0x16, 0x0a, 0x12, 0x4b, 0x69, 0x6e,
0x64, 0x49, 0x6e, 0x67, 0x72, 0x65, 0x73, 0x73, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x10,
0x03, 0x12, 0x19, 0x0a, 0x15, 0x4b, 0x69, 0x6e, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
0x49, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x10, 0x04, 0x2a, 0x26, 0x0a, 0x0f,
0x49, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12,
0x08, 0x0a, 0x04, 0x44, 0x65, 0x6e, 0x79, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x6c, 0x6c,
0x6f, 0x77, 0x10, 0x01, 0x2a, 0x21, 0x0a, 0x13, 0x49, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f,
0x6e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x43,
0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x10, 0x00, 0x42, 0xa6, 0x02, 0x0a, 0x29, 0x63, 0x6f, 0x6d, 0x2e,
0x01, 0x22, 0xea, 0x01, 0x0a, 0x10, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x54, 0x4c, 0x53,
0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65,
0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64,
0x12, 0x4c, 0x0a, 0x03, 0x53, 0x44, 0x53, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e,
0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c,
0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67,
0x65, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x10, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74,
0x72, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75,
0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f,
0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x63,
0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, 0xa2, 0x02, 0x04, 0x48, 0x43, 0x49,
0x43, 0xaa, 0x02, 0x25, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x43, 0x6f,
0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x43, 0x6f,
0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, 0xca, 0x02, 0x25, 0x48, 0x61, 0x73, 0x68,
0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x49, 0x6e, 0x74,
0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5c, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72,
0x79, 0xe2, 0x02, 0x31, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f,
0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5c, 0x43, 0x6f,
0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74,
0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x28, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72,
0x70, 0x3a, 0x3a, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x3a, 0x3a, 0x49, 0x6e, 0x74, 0x65, 0x72,
0x6e, 0x61, 0x6c, 0x3a, 0x3a, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79,
0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x54, 0x4c, 0x53,
0x53, 0x44, 0x53, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x03, 0x53, 0x44, 0x53, 0x12, 0x24,
0x0a, 0x0d, 0x54, 0x4c, 0x53, 0x4d, 0x69, 0x6e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18,
0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x54, 0x4c, 0x53, 0x4d, 0x69, 0x6e, 0x56, 0x65, 0x72,
0x73, 0x69, 0x6f, 0x6e, 0x12, 0x24, 0x0a, 0x0d, 0x54, 0x4c, 0x53, 0x4d, 0x61, 0x78, 0x56, 0x65,
0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x54, 0x4c, 0x53,
0x4d, 0x61, 0x78, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x0a, 0x0c, 0x43, 0x69,
0x70, 0x68, 0x65, 0x72, 0x53, 0x75, 0x69, 0x74, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09,
0x52, 0x0c, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x53, 0x75, 0x69, 0x74, 0x65, 0x73, 0x22, 0x5b,
0x0a, 0x13, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x54, 0x4c, 0x53, 0x53, 0x44, 0x53, 0x43,
0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x20, 0x0a, 0x0b, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72,
0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x43, 0x6c, 0x75, 0x73,
0x74, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x43, 0x65, 0x72, 0x74, 0x52,
0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x43,
0x65, 0x72, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, 0xdf, 0x01, 0x0a, 0x0f,
0x49, 0x6e, 0x67, 0x72, 0x65, 0x73, 0x73, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x12,
0x12, 0x0a, 0x04, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x50,
0x6f, 0x72, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18,
0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12,
0x51, 0x0a, 0x08, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28,
0x0b, 0x32, 0x35, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f,
0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f,
0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x49, 0x6e, 0x67, 0x72, 0x65, 0x73,
0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x08, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
0x65, 0x73, 0x12, 0x49, 0x0a, 0x03, 0x54, 0x4c, 0x53, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32,
0x37, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73,
0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66,
0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x54,
0x4c, 0x53, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x03, 0x54, 0x4c, 0x53, 0x22, 0xbe, 0x04,
0x0a, 0x0e, 0x49, 0x6e, 0x67, 0x72, 0x65, 0x73, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04,
0x4e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x48, 0x6f, 0x73, 0x74, 0x73, 0x18, 0x02, 0x20,
0x03, 0x28, 0x09, 0x52, 0x05, 0x48, 0x6f, 0x73, 0x74, 0x73, 0x12, 0x50, 0x0a, 0x03, 0x54, 0x4c,
0x53, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63,
0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72,
0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e,
0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x54, 0x4c,
0x53, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x03, 0x54, 0x4c, 0x53, 0x12, 0x62, 0x0a, 0x0e,
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x04,
0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70,
0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c,
0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x48, 0x54, 0x54,
0x50, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x72, 0x73,
0x52, 0x0e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73,
0x12, 0x64, 0x0a, 0x0f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64,
0x65, 0x72, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x68, 0x61, 0x73, 0x68,
0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74,
0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72,
0x79, 0x2e, 0x48, 0x54, 0x54, 0x50, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x6f, 0x64, 0x69,
0x66, 0x69, 0x65, 0x72, 0x73, 0x52, 0x0f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48,
0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x53, 0x0a, 0x04, 0x4d, 0x65, 0x74, 0x61, 0x18, 0x06,
0x20, 0x03, 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70,
0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c,
0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x49, 0x6e, 0x67,
0x72, 0x65, 0x73, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x4d, 0x65, 0x74, 0x61,
0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x58, 0x0a, 0x0e, 0x45,
0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x69, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x18, 0x07, 0x20,
0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e,
0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e,
0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x45, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x69, 0x73,
0x65, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x0e, 0x45, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x69, 0x73,
0x65, 0x4d, 0x65, 0x74, 0x61, 0x1a, 0x37, 0x0a, 0x09, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74,
0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20,
0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x67,
0x0a, 0x17, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
0x54, 0x4c, 0x53, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4c, 0x0a, 0x03, 0x53, 0x44, 0x53,
0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f,
0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e,
0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x47,
0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x54, 0x4c, 0x53, 0x53, 0x44, 0x53, 0x43, 0x6f, 0x6e, 0x66,
0x69, 0x67, 0x52, 0x03, 0x53, 0x44, 0x53, 0x22, 0xcb, 0x02, 0x0a, 0x13, 0x48, 0x54, 0x54, 0x50,
0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x72, 0x73, 0x12,
0x55, 0x0a, 0x03, 0x41, 0x64, 0x64, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x68,
0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e,
0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65,
0x6e, 0x74, 0x72, 0x79, 0x2e, 0x48, 0x54, 0x54, 0x50, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d,
0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x72, 0x73, 0x2e, 0x41, 0x64, 0x64, 0x45, 0x6e, 0x74, 0x72,
0x79, 0x52, 0x03, 0x41, 0x64, 0x64, 0x12, 0x55, 0x0a, 0x03, 0x53, 0x65, 0x74, 0x18, 0x02, 0x20,
0x03, 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e,
0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e,
0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x48, 0x54, 0x54, 0x50,
0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x72, 0x73, 0x2e,
0x53, 0x65, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x03, 0x53, 0x65, 0x74, 0x12, 0x16, 0x0a,
0x06, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x52,
0x65, 0x6d, 0x6f, 0x76, 0x65, 0x1a, 0x36, 0x0a, 0x08, 0x41, 0x64, 0x64, 0x45, 0x6e, 0x74, 0x72,
0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03,
0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01,
0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x36, 0x0a,
0x08, 0x53, 0x65, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79,
0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76,
0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75,
0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xf6, 0x01, 0x0a, 0x11, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
0x65, 0x49, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x50, 0x0a, 0x07, 0x53,
0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x68,
0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e,
0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65,
0x6e, 0x74, 0x72, 0x79, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x6e,
0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x56, 0x0a,
0x04, 0x4d, 0x65, 0x74, 0x61, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x68, 0x61,
0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69,
0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e,
0x74, 0x72, 0x79, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x6e,
0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52,
0x04, 0x4d, 0x65, 0x74, 0x61, 0x1a, 0x37, 0x0a, 0x09, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74,
0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20,
0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xa6,
0x06, 0x0a, 0x0f, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x69,
0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x4e, 0x0a, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e,
0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f,
0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e,
0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x49,
0x6e, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06,
0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x5c, 0x0a, 0x0b, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73,
0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x68, 0x61,
0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69,
0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e,
0x74, 0x72, 0x79, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x65, 0x72,
0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73,
0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1e, 0x0a, 0x0a, 0x50, 0x72, 0x65, 0x63, 0x65, 0x64, 0x65, 0x6e,
0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x50, 0x72, 0x65, 0x63, 0x65, 0x64,
0x65, 0x6e, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x49, 0x44,
0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x49, 0x44,
0x12, 0x4e, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3a,
0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75,
0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69,
0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e,
0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x54, 0x79, 0x70, 0x65,
0x12, 0x20, 0x0a, 0x0b, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18,
0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69,
0x6f, 0x6e, 0x12, 0x66, 0x0a, 0x0a, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4d, 0x65, 0x74, 0x61,
0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x46, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f,
0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e,
0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x53,
0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4c,
0x65, 0x67, 0x61, 0x63, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a,
0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x46, 0x0a, 0x10, 0x4c, 0x65,
0x67, 0x61, 0x63, 0x79, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x18, 0x09,
0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70,
0x52, 0x10, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69,
0x6d, 0x65, 0x12, 0x46, 0x0a, 0x10, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x55, 0x70, 0x64, 0x61,
0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54,
0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79,
0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x58, 0x0a, 0x0e, 0x45, 0x6e,
0x74, 0x65, 0x72, 0x70, 0x72, 0x69, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x18, 0x0b, 0x20, 0x01,
0x28, 0x0b, 0x32, 0x30, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63,
0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63,
0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x45, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x69, 0x73, 0x65,
0x4d, 0x65, 0x74, 0x61, 0x52, 0x0e, 0x45, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x69, 0x73, 0x65,
0x4d, 0x65, 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x50, 0x65, 0x65, 0x72, 0x18, 0x0c, 0x20, 0x01,
0x28, 0x09, 0x52, 0x04, 0x50, 0x65, 0x65, 0x72, 0x1a, 0x3d, 0x0a, 0x0f, 0x4c, 0x65, 0x67, 0x61,
0x63, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b,
0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a,
0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61,
0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xb9, 0x01, 0x0a, 0x13, 0x49, 0x6e, 0x74, 0x65,
0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12,
0x4e, 0x0a, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32,
0x36, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73,
0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66,
0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f,
0x6e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12,
0x52, 0x0a, 0x04, 0x48, 0x54, 0x54, 0x50, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e,
0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c,
0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67,
0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x48,
0x54, 0x54, 0x50, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x48,
0x54, 0x54, 0x50, 0x22, 0xed, 0x01, 0x0a, 0x17, 0x49, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f,
0x6e, 0x48, 0x54, 0x54, 0x50, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12,
0x1c, 0x0a, 0x09, 0x50, 0x61, 0x74, 0x68, 0x45, 0x78, 0x61, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01,
0x28, 0x09, 0x52, 0x09, 0x50, 0x61, 0x74, 0x68, 0x45, 0x78, 0x61, 0x63, 0x74, 0x12, 0x1e, 0x0a,
0x0a, 0x50, 0x61, 0x74, 0x68, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28,
0x09, 0x52, 0x0a, 0x50, 0x61, 0x74, 0x68, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x1c, 0x0a,
0x09, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x67, 0x65, 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09,
0x52, 0x09, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x67, 0x65, 0x78, 0x12, 0x5c, 0x0a, 0x06, 0x48,
0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x44, 0x2e, 0x68, 0x61,
0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69,
0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e,
0x74, 0x72, 0x79, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x54, 0x54,
0x50, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f,
0x6e, 0x52, 0x06, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x4d, 0x65, 0x74,
0x68, 0x6f, 0x64, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x4d, 0x65, 0x74, 0x68,
0x6f, 0x64, 0x73, 0x22, 0xc1, 0x01, 0x0a, 0x1d, 0x49, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f,
0x6e, 0x48, 0x54, 0x54, 0x50, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x50, 0x65, 0x72, 0x6d, 0x69,
0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20,
0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x50, 0x72, 0x65,
0x73, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x50, 0x72, 0x65, 0x73,
0x65, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x45, 0x78, 0x61, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01,
0x28, 0x09, 0x52, 0x05, 0x45, 0x78, 0x61, 0x63, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x50, 0x72, 0x65,
0x66, 0x69, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x50, 0x72, 0x65, 0x66, 0x69,
0x78, 0x12, 0x16, 0x0a, 0x06, 0x53, 0x75, 0x66, 0x66, 0x69, 0x78, 0x18, 0x05, 0x20, 0x01, 0x28,
0x09, 0x52, 0x06, 0x53, 0x75, 0x66, 0x66, 0x69, 0x78, 0x12, 0x14, 0x0a, 0x05, 0x52, 0x65, 0x67,
0x65, 0x78, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x52, 0x65, 0x67, 0x65, 0x78, 0x12,
0x16, 0x0a, 0x06, 0x49, 0x6e, 0x76, 0x65, 0x72, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52,
0x06, 0x49, 0x6e, 0x76, 0x65, 0x72, 0x74, 0x2a, 0x77, 0x0a, 0x04, 0x4b, 0x69, 0x6e, 0x64, 0x12,
0x0f, 0x0a, 0x0b, 0x4b, 0x69, 0x6e, 0x64, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x10, 0x00,
0x12, 0x12, 0x0a, 0x0e, 0x4b, 0x69, 0x6e, 0x64, 0x4d, 0x65, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66,
0x69, 0x67, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x4b, 0x69, 0x6e, 0x64, 0x53, 0x65, 0x72, 0x76,
0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x10, 0x02, 0x12, 0x16, 0x0a,
0x12, 0x4b, 0x69, 0x6e, 0x64, 0x49, 0x6e, 0x67, 0x72, 0x65, 0x73, 0x73, 0x47, 0x61, 0x74, 0x65,
0x77, 0x61, 0x79, 0x10, 0x03, 0x12, 0x19, 0x0a, 0x15, 0x4b, 0x69, 0x6e, 0x64, 0x53, 0x65, 0x72,
0x76, 0x69, 0x63, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x10, 0x04,
0x2a, 0x26, 0x0a, 0x0f, 0x49, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x63, 0x74,
0x69, 0x6f, 0x6e, 0x12, 0x08, 0x0a, 0x04, 0x44, 0x65, 0x6e, 0x79, 0x10, 0x00, 0x12, 0x09, 0x0a,
0x05, 0x41, 0x6c, 0x6c, 0x6f, 0x77, 0x10, 0x01, 0x2a, 0x21, 0x0a, 0x13, 0x49, 0x6e, 0x74, 0x65,
0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12,
0x0a, 0x0a, 0x06, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x10, 0x00, 0x42, 0xa6, 0x02, 0x0a, 0x29,
0x63, 0x6f, 0x6d, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f,
0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f,
0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x10, 0x43, 0x6f, 0x6e, 0x66, 0x69,
0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2f, 0x67,
0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63,
0x6f, 0x72, 0x70, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x2f, 0x70, 0x62, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, 0xa2, 0x02,
0x04, 0x48, 0x43, 0x49, 0x43, 0xaa, 0x02, 0x25, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72,
0x70, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61,
0x6c, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, 0xca, 0x02, 0x25,
0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c,
0x5c, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5c, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
0x65, 0x6e, 0x74, 0x72, 0x79, 0xe2, 0x02, 0x31, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72,
0x70, 0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61,
0x6c, 0x5c, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x5c, 0x47, 0x50,
0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x28, 0x48, 0x61, 0x73, 0x68,
0x69, 0x63, 0x6f, 0x72, 0x70, 0x3a, 0x3a, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x3a, 0x3a, 0x49,
0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x3a, 0x3a, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65,
0x6e, 0x74, 0x72, 0x79, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (

View File

@ -122,6 +122,7 @@ message ServiceResolverRedirect {
string Namespace = 3;
string Partition = 4;
string Datacenter = 5;
string Peer = 6;
}
// mog annotation:

View File

@ -143,10 +143,10 @@ func PeeringStateFromAPI(t api.PeeringState) PeeringState {
}
func (p *Peering) IsActive() bool {
if p != nil && p.State == PeeringState_TERMINATED {
if p == nil || p.State == PeeringState_TERMINATED {
return false
}
if p == nil || p.DeletedAt == nil {
if p.DeletedAt == nil {
return true
}

View File

@ -1,7 +1,7 @@
# Note this arg has to be before the first FROM
ARG ENVOY_VERSION
FROM consul-dev as consul
FROM consul:local as consul
FROM docker.mirror.hashicorp.services/envoyproxy/envoy:v${ENVOY_VERSION}
COPY --from=consul /bin/consul /bin/consul

View File

@ -0,0 +1,5 @@
primary_datacenter = "alpha"
log_level = "trace"
peering {
enabled = true
}

View File

@ -0,0 +1,26 @@
config_entries {
bootstrap = [
{
kind = "proxy-defaults"
name = "global"
config {
protocol = "tcp"
}
},
{
kind = "exported-services"
name = "default"
services = [
{
name = "s2"
consumers = [
{
peer_name = "alpha-to-primary"
}
]
}
]
}
]
}

View File

@ -0,0 +1,5 @@
services {
name = "mesh-gateway"
kind = "mesh-gateway"
port = 4432
}

View File

@ -0,0 +1 @@
# We don't want an s1 service in this peer

View File

@ -0,0 +1,7 @@
services {
name = "s2"
port = 8181
connect {
sidecar_service {}
}
}

View File

@ -0,0 +1,11 @@
#!/bin/bash
set -euo pipefail
register_services alpha
gen_envoy_bootstrap s2 19002 alpha
gen_envoy_bootstrap mesh-gateway 19003 alpha true
wait_for_config_entry proxy-defaults global alpha
wait_for_config_entry exported-services default alpha

View File

@ -0,0 +1,27 @@
#!/usr/bin/env bats
load helpers
@test "s2 proxy is running correct version" {
assert_envoy_version 19002
}
@test "s2 proxy admin is up on :19002" {
retry_default curl -f -s localhost:19002/stats -o /dev/null
}
@test "gateway-alpha proxy admin is up on :19003" {
retry_default curl -f -s localhost:19003/stats -o /dev/null
}
@test "s2 proxy listener should be up and have right cert" {
assert_proxy_presents_cert_uri localhost:21000 s2 alpha
}
@test "s2 proxy should be healthy" {
assert_service_has_healthy_instances s2 1 alpha
}
@test "gateway-alpha should be up and listening" {
retry_long nc -z consul-alpha-client:4432
}

View File

@ -0,0 +1,2 @@
bind_addr = "0.0.0.0"
advertise_addr = "{{ GetInterfaceIP \"eth0\" }}"

View File

@ -0,0 +1,6 @@
#!/bin/bash
snapshot_envoy_admin localhost:19000 s1 primary || true
snapshot_envoy_admin localhost:19001 s2 primary || true
snapshot_envoy_admin localhost:19002 s2 alpha || true
snapshot_envoy_admin localhost:19003 mesh-gateway alpha || true

View File

@ -0,0 +1,3 @@
peering {
enabled = true
}

View File

@ -0,0 +1,21 @@
config_entries {
bootstrap {
kind = "proxy-defaults"
name = "global"
config {
protocol = "tcp"
}
}
bootstrap {
kind = "service-resolver"
name = "s2"
failover = {
"*" = {
targets = [{peer = "primary-to-alpha"}]
}
}
}
}

View File

@ -0,0 +1,16 @@
services {
name = "s1"
port = 8080
connect {
sidecar_service {
proxy {
upstreams = [
{
destination_name = "s2"
local_bind_port = 5000
}
]
}
}
}
}

View File

@ -0,0 +1,7 @@
services {
name = "s2"
port = 8181
connect {
sidecar_service {}
}
}

View File

@ -0,0 +1,10 @@
#!/bin/bash
set -euo pipefail
register_services primary
gen_envoy_bootstrap s1 19000 primary
gen_envoy_bootstrap s2 19001 primary
wait_for_config_entry proxy-defaults global

View File

@ -0,0 +1,87 @@
#!/usr/bin/env bats
load helpers
@test "s1 proxy is running correct version" {
assert_envoy_version 19000
}
@test "s1 proxy admin is up on :19000" {
retry_default curl -f -s localhost:19000/stats -o /dev/null
}
@test "s2 proxy admin is up on :19001" {
retry_default curl -f -s localhost:19001/stats -o /dev/null
}
@test "gateway-primary proxy admin is up on :19001" {
retry_default curl localhost:19000/config_dump
}
@test "s1 proxy listener should be up and have right cert" {
assert_proxy_presents_cert_uri localhost:21000 s1
}
@test "s2 proxies should be healthy in primary" {
assert_service_has_healthy_instances s2 1 primary
}
@test "s2 proxies should be healthy in alpha" {
assert_service_has_healthy_instances s2 1 alpha
}
@test "gateway-alpha should be up and listening" {
retry_long nc -z consul-alpha-client:4432
}
@test "peer the two clusters together" {
create_peering primary alpha
}
@test "s2 alpha proxies should be healthy in primary" {
assert_service_has_healthy_instances s2 1 primary "" "" primary-to-alpha
}
@test "s1 upstream should have healthy endpoints for s2 in both primary and failover" {
assert_upstream_has_endpoints_in_status 127.0.0.1:19000 failover-target~s2.default.primary.internal HEALTHY 1
assert_upstream_has_endpoints_in_status 127.0.0.1:19000 failover-target~s2.default.primary-to-alpha.external HEALTHY 1
}
@test "s1 upstream should be able to connect to s2" {
run retry_default curl -s -f -d hello localhost:5000
[ "$status" -eq 0 ]
[ "$output" = "hello" ]
}
@test "s1 upstream made 1 connection" {
assert_envoy_metric_at_least 127.0.0.1:19000 "cluster.failover-target~s2.default.primary.internal.*cx_total" 1
}
@test "terminate instance of s2 primary envoy which should trigger failover to s2 alpha when the tcp check fails" {
kill_envoy s2 primary
}
@test "s2 proxies should be unhealthy in primary" {
assert_service_has_healthy_instances s2 0 primary
}
@test "s1 upstream should have healthy endpoints for s2 in the failover cluster peer" {
assert_upstream_has_endpoints_in_status 127.0.0.1:19000 failover-target~s2.default.primary.internal UNHEALTHY 1
assert_upstream_has_endpoints_in_status 127.0.0.1:19000 failover-target~s2.default.primary-to-alpha.external HEALTHY 1
}
@test "reset envoy statistics" {
reset_envoy_metrics 127.0.0.1:19000
}
@test "s1 upstream should be able to connect to s2 in the failover cluster peer" {
run retry_default curl -s -f -d hello localhost:5000
[ "$status" -eq 0 ]
[ "$output" = "hello" ]
}
@test "s1 upstream made 1 connection to s2 through the cluster peer" {
assert_envoy_metric_at_least 127.0.0.1:19000 "cluster.failover-target~s2.default.primary-to-alpha.external.*cx_total" 1
}

View File

@ -0,0 +1,4 @@
#!/bin/bash
export REQUIRED_SERVICES="s1 s1-sidecar-proxy s2 s2-sidecar-proxy s2-alpha s2-sidecar-proxy-alpha gateway-alpha tcpdump-primary tcpdump-alpha"
export REQUIRE_PEERS=1

View File

@ -17,7 +17,7 @@ consul tls cert create -dc=secondary -server -node=sec
"
docker rm -f "$container" &>/dev/null || true
docker run -i --net=none --name="$container" consul-dev:latest sh -c "${scriptlet}"
docker run -i --net=none --name="$container" consul:local sh -c "${scriptlet}"
# primary
for f in \

View File

@ -562,14 +562,14 @@ function assert_intention_denied {
function docker_consul {
local DC=$1
shift 1
docker run -i --rm --network container:envoy_consul-${DC}_1 consul-dev "$@"
docker run -i --rm --network container:envoy_consul-${DC}_1 consul:local "$@"
}
function docker_consul_for_proxy_bootstrap {
local DC=$1
shift 1
docker run -i --rm --network container:envoy_consul-${DC}_1 consul-dev "$@"
docker run -i --rm --network container:envoy_consul-${DC}_1 consul:local "$@" 2> /dev/null
}
function docker_wget {
@ -581,7 +581,7 @@ function docker_wget {
function docker_curl {
local DC=$1
shift 1
docker run --rm --network container:envoy_consul-${DC}_1 --entrypoint curl consul-dev "$@"
docker run --rm --network container:envoy_consul-${DC}_1 --entrypoint curl consul:local "$@"
}
function docker_exec {
@ -806,9 +806,16 @@ function delete_config_entry {
function register_services {
local DC=${1:-primary}
wait_for_leader "$DC"
docker_consul_exec ${DC} sh -c "consul services register /workdir/${DC}/register/service_*.hcl"
}
# wait_for_leader waits until a leader is elected.
# Its first argument must be the datacenter name.
function wait_for_leader {
retry_default docker_consul_exec "$1" sh -c '[[ $(curl --fail -sS http://127.0.0.1:8500/v1/status/leader) ]]'
}
function setup_upsert_l4_intention {
local SOURCE=$1
local DESTINATION=$2

View File

@ -16,6 +16,8 @@ ENVOY_VERSION=${ENVOY_VERSION:-"1.23.0"}
export ENVOY_VERSION
export DOCKER_BUILDKIT=1
# Always run tests on amd64 because that's what the CI environment uses.
export DOCKER_DEFAULT_PLATFORM="linux/amd64"
if [ ! -z "$DEBUG" ] ; then
set -x
@ -44,17 +46,19 @@ function network_snippet {
}
function aws_snippet {
local snippet=""
if [[ ! -z "$LAMBDA_TESTS_ENABLED" ]]; then
local snippet=""
# The Lambda integration cases assume that a Lambda function exists in $AWS_REGION with an ARN of $AWS_LAMBDA_ARN.
# The AWS credentials must have permission to invoke the Lambda function.
[ -n "$(set | grep '^AWS_ACCESS_KEY_ID=')" ] && snippet="${snippet} -e AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID"
[ -n "$(set | grep '^AWS_SECRET_ACCESS_KEY=')" ] && snippet="${snippet} -e AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY"
[ -n "$(set | grep '^AWS_SESSION_TOKEN=')" ] && snippet="${snippet} -e AWS_SESSION_TOKEN=$AWS_SESSION_TOKEN"
[ -n "$(set | grep '^AWS_LAMBDA_REGION=')" ] && snippet="${snippet} -e AWS_LAMBDA_REGION=$AWS_LAMBDA_REGION"
[ -n "$(set | grep '^AWS_LAMBDA_ARN=')" ] && snippet="${snippet} -e AWS_LAMBDA_ARN=$AWS_LAMBDA_ARN"
# The Lambda integration cases assume that a Lambda function exists in $AWS_REGION with an ARN of $AWS_LAMBDA_ARN.
# The AWS credentials must have permission to invoke the Lambda function.
[ -n "$(set | grep '^AWS_ACCESS_KEY_ID=')" ] && snippet="${snippet} -e AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID"
[ -n "$(set | grep '^AWS_SECRET_ACCESS_KEY=')" ] && snippet="${snippet} -e AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY"
[ -n "$(set | grep '^AWS_SESSION_TOKEN=')" ] && snippet="${snippet} -e AWS_SESSION_TOKEN=$AWS_SESSION_TOKEN"
[ -n "$(set | grep '^AWS_LAMBDA_REGION=')" ] && snippet="${snippet} -e AWS_LAMBDA_REGION=$AWS_LAMBDA_REGION"
[ -n "$(set | grep '^AWS_LAMBDA_ARN=')" ] && snippet="${snippet} -e AWS_LAMBDA_ARN=$AWS_LAMBDA_ARN"
echo "$snippet"
echo "$snippet"
fi
}
function init_workdir {
@ -222,7 +226,7 @@ function start_consul {
--hostname "consul-${DC}-server" \
--network-alias "consul-${DC}-server" \
-e "CONSUL_LICENSE=$license" \
consul-dev \
consul:local \
agent -dev -datacenter "${DC}" \
-config-dir "/workdir/${DC}/consul" \
-config-dir "/workdir/${DC}/consul-server" \
@ -237,7 +241,7 @@ function start_consul {
--network-alias "consul-${DC}-client" \
-e "CONSUL_LICENSE=$license" \
${ports[@]} \
consul-dev \
consul:local \
agent -datacenter "${DC}" \
-config-dir "/workdir/${DC}/consul" \
-data-dir "/tmp/consul" \
@ -256,7 +260,7 @@ function start_consul {
--network-alias "consul-${DC}-server" \
-e "CONSUL_LICENSE=$license" \
${ports[@]} \
consul-dev \
consul:local \
agent -dev -datacenter "${DC}" \
-config-dir "/workdir/${DC}/consul" \
-config-dir "/workdir/${DC}/consul-server" \
@ -290,7 +294,7 @@ function start_partitioned_client {
--hostname "consul-${PARTITION}-client" \
--network-alias "consul-${PARTITION}-client" \
-e "CONSUL_LICENSE=$license" \
consul-dev agent \
consul:local agent \
-datacenter "primary" \
-retry-join "consul-primary-server" \
-grpc-port 8502 \

View File

@ -410,13 +410,64 @@ The corresponding CLI command is [`consul catalog services`](/commands/catalog/s
- `dc` `(string: "")` - Specifies the datacenter to query. This will default to
the datacenter of the agent being queried.
- `node-meta` `(string: "")` - Specifies a desired node metadata key/value pair
- `node-meta` `(string: "")` **Deprecated** - Use `filter` with the `NodeMeta` selector instead.
This parameter will be removed in a future version of Consul.
Specifies a desired node metadata key/value pair
of the form `key:value`. This parameter can be specified multiple times, and
filters the results to nodes with the specified key/value pairs.
- `ns` `(string: "")` <EnterpriseAlert inline /> - Specifies the namespace of the services you lookup.
You can also [specify the namespace through other methods](#methods-to-specify-namespace).
- `filter` `(string: "")` - Specifies the expression used to filter the
queries results prior to returning the data.
### Filtering
The filter will be executed against each Service mapping within the catalog.
The following selectors and filter operations are supported:
| Selector | Supported Operations |
| ---------------------------------------------------- | -------------------------------------------------- |
| `Address` | Equal, Not Equal, In, Not In, Matches, Not Matches |
| `Datacenter` | Equal, Not Equal, In, Not In, Matches, Not Matches |
| `ID` | Equal, Not Equal, In, Not In, Matches, Not Matches |
| `Node` | Equal, Not Equal, In, Not In, Matches, Not Matches |
| `NodeMeta.<any>` | Equal, Not Equal, In, Not In, Matches, Not Matches |
| `NodeMeta` | Is Empty, Is Not Empty, In, Not In |
| `ServiceAddress` | Equal, Not Equal, In, Not In, Matches, Not Matches |
| `ServiceConnect.Native` | Equal, Not Equal |
| `ServiceEnableTagOverride` | Equal, Not Equal |
| `ServiceID` | Equal, Not Equal, In, Not In, Matches, Not Matches |
| `ServiceKind` | Equal, Not Equal, In, Not In, Matches, Not Matches |
| `ServiceMeta.<any>` | Equal, Not Equal, In, Not In, Matches, Not Matches |
| `ServiceMeta` | Is Empty, Is Not Empty, In, Not In |
| `ServiceName` | Equal, Not Equal, In, Not In, Matches, Not Matches |
| `ServicePort` | Equal, Not Equal |
| `ServiceProxy.DestinationServiceID` | Equal, Not Equal, In, Not In, Matches, Not Matches |
| `ServiceProxy.DestinationServiceName` | Equal, Not Equal, In, Not In, Matches, Not Matches |
| `ServiceProxy.LocalServiceAddress` | Equal, Not Equal, In, Not In, Matches, Not Matches |
| `ServiceProxy.LocalServicePort` | Equal, Not Equal |
| `ServiceProxy.Mode` | Equal, Not Equal, In, Not In, Matches, Not Matches |
| `ServiceProxy.TransparentProxy.OutboundListenerPort` | Equal, Not Equal |
| `ServiceProxy.MeshGateway.Mode` | Equal, Not Equal, In, Not In, Matches, Not Matches |
| `ServiceProxy.Upstreams.Datacenter` | Equal, Not Equal, In, Not In, Matches, Not Matches |
| `ServiceProxy.Upstreams.DestinationName` | Equal, Not Equal, In, Not In, Matches, Not Matches |
| `ServiceProxy.Upstreams.DestinationNamespace` | Equal, Not Equal, In, Not In, Matches, Not Matches |
| `ServiceProxy.Upstreams.DestinationType` | Equal, Not Equal, In, Not In, Matches, Not Matches |
| `ServiceProxy.Upstreams.LocalBindAddress` | Equal, Not Equal, In, Not In, Matches, Not Matches |
| `ServiceProxy.Upstreams.LocalBindPort` | Equal, Not Equal |
| `ServiceProxy.Upstreams.MeshGateway.Mode` | Equal, Not Equal, In, Not In, Matches, Not Matches |
| `ServiceProxy.Upstreams` | Is Empty, Is Not Empty |
| `ServiceTaggedAddresses.<any>.Address` | Equal, Not Equal, In, Not In, Matches, Not Matches |
| `ServiceTaggedAddresses.<any>.Port` | Equal, Not Equal |
| `ServiceTaggedAddresses` | Is Empty, Is Not Empty, In, Not In |
| `ServiceTags` | In, Not In, Is Empty, Is Not Empty |
| `ServiceWeights.Passing` | Equal, Not Equal |
| `ServiceWeights.Warning` | Equal, Not Equal |
| `TaggedAddresses.<any>` | Equal, Not Equal, In, Not In, Matches, Not Matches |
| `TaggedAddresses` | Is Empty, Is Not Empty, In, Not In |
### Sample Request
```shell-session

View File

@ -75,9 +75,9 @@ This endpoint restores a point-in-time snapshot of the Consul server state.
Restores involve a potentially dangerous low-level Raft operation that is not
designed to handle server failures during a restore. This operation is primarily
intended to be used when recovering from a disaster, restoring into a fresh
cluster of Consul servers running the same version as the cluster from where the
snapshot was taken.
intended to recover from a disaster. It restores your configuration into a fresh
cluster of Consul servers as long as your new cluster runs the same Consul
version as the cluster that originally took the snapshot.
| Method | Path | Produces |
| :----- | :---------- | ----------------------------- |

View File

@ -168,7 +168,8 @@ Usage: `consul snapshot agent [options]`
"s3_bucket": "",
"s3_key_prefix": "consul-snapshot",
"s3_server_side_encryption": false,
"s3_static_snapshot_name": ""
"s3_static_snapshot_name": "",
"s3_force_path_style": false
},
"azure_blob_storage": {
"account_name": "",
@ -275,6 +276,10 @@ Note that despite the AWS references, any S3-compatible endpoint can be specifie
- `-aws-s3-static-snapshot-name` - If this is given, all snapshots are saved with the same file name. The agent will not rotate or version snapshots, and will save them with the same name each time.
Use this if you want to rely on [S3's versioning capabilities](http://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html) instead of the agent handling it for you.
- `-aws-s3-force-path-style` - Enables the use of legacy path-based addressing instead of virtual addressing. This flag is required by minio
and other 3rd party S3 compatible object storage platforms where DNS or TLS requirements for virtual addressing are prohibitive.
For more information, refer to the AWS documentation on [Methods for accessing a bucket](https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-bucket-intro.html)
- `-aws-s3-enable-kms` - Enables using [Amazon KMS](https://aws.amazon.com/kms/) for encrypting snapshots.
- `-aws-s3-kms-key` - Optional Amazon KMS key to use, if this is not set the default KMS master key will be used. Set this if you want to manage key rotation yourself.

View File

@ -16,9 +16,9 @@ from the given file.
Restores involve a potentially dangerous low-level Raft operation that is not
designed to handle server failures during a restore. This command is primarily
intended to be used when recovering from a disaster, restoring into a fresh
cluster of Consul servers running the same version as the cluster from where the
snapshot was taken.
intended to recover from a disaster. It restores your configuration into a fresh
cluster of Consul servers as long as your new cluster runs the same Consul
version as the cluster that originally took the snapshot.
The table below shows this command's [required ACLs](/api#authentication). Configuration of
[blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching)

View File

@ -349,59 +349,59 @@ populated free list structure.
This is a full list of metrics emitted by Consul.
| Metric | Description | Unit | Type |
| -------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------- | ------- |
| `consul.acl.blocked.{check,service}.deregistration` | Increments whenever a deregistration fails for an entity (check or service) is blocked by an ACL. | requests | counter |
| `consul.acl.blocked.{check,node,service}.registration` | Increments whenever a registration fails for an entity (check, node or service) is blocked by an ACL. | requests | counter |
| `consul.api.http` | This samples how long it takes to service the given HTTP request for the given verb and path. Includes labels for `path` and `method`. `path` does not include details like service or key names, for these an underscore will be present as a placeholder (eg. path=`v1.kv._`) | ms | timer |
| `consul.client.rpc` | Increments whenever a Consul agent in client mode makes an RPC request to a Consul server. This gives a measure of how much a given agent is loading the Consul servers. Currently, this is only generated by agents in client mode, not Consul servers. | requests | counter |
| `consul.client.rpc.exceeded` | Increments whenever a Consul agent in client mode makes an RPC request to a Consul server gets rate limited by that agent's [`limits`](/docs/agent/config/config-files#limits) configuration. This gives an indication that there's an abusive application making too many requests on the agent, or that the rate limit needs to be increased. Currently, this only applies to agents in client mode, not Consul servers. | rejected requests | counter |
| `consul.client.rpc.failed` | Increments whenever a Consul agent in client mode makes an RPC request to a Consul server and fails. | requests | counter |
| `consul.client.api.catalog_register.` | Increments whenever a Consul agent receives a catalog register request. | requests | counter |
| `consul.client.api.success.catalog_register.` | Increments whenever a Consul agent successfully responds to a catalog register request. | requests | counter |
| `consul.client.rpc.error.catalog_register.` | Increments whenever a Consul agent receives an RPC error for a catalog register request. | errors | counter |
| `consul.client.api.catalog_deregister.` | Increments whenever a Consul agent receives a catalog deregister request. | requests | counter |
| `consul.client.api.success.catalog_deregister.` | Increments whenever a Consul agent successfully responds to a catalog deregister request. | requests | counter |
| `consul.client.rpc.error.catalog_deregister.` | Increments whenever a Consul agent receives an RPC error for a catalog deregister request. | errors | counter |
| `consul.client.api.catalog_datacenters.` | Increments whenever a Consul agent receives a request to list datacenters in the catalog. | requests | counter |
| `consul.client.api.success.catalog_datacenters.` | Increments whenever a Consul agent successfully responds to a request to list datacenters. | requests | counter |
| `consul.client.rpc.error.catalog_datacenters.` | Increments whenever a Consul agent receives an RPC error for a request to list datacenters. | errors | counter |
| `consul.client.api.catalog_nodes.` | Increments whenever a Consul agent receives a request to list nodes from the catalog. | requests | counter |
| `consul.client.api.success.catalog_nodes.` | Increments whenever a Consul agent successfully responds to a request to list nodes. | requests | counter |
| `consul.client.rpc.error.catalog_nodes.` | Increments whenever a Consul agent receives an RPC error for a request to list nodes. | errors | counter |
| `consul.client.api.catalog_services.` | Increments whenever a Consul agent receives a request to list services from the catalog. | requests | counter |
| `consul.client.api.success.catalog_services.` | Increments whenever a Consul agent successfully responds to a request to list services. | requests | counter |
| `consul.client.rpc.error.catalog_services.` | Increments whenever a Consul agent receives an RPC error for a request to list services. | errors | counter |
| `consul.client.api.catalog_service_nodes.` | Increments whenever a Consul agent receives a request to list nodes offering a service. | requests | counter |
| `consul.client.api.success.catalog_service_nodes.` | Increments whenever a Consul agent successfully responds to a request to list nodes offering a service. | requests | counter |
| `consul.client.api.error.catalog_service_nodes.` | Increments whenever a Consul agent receives an RPC error for request to list nodes offering a service. | requests | counter |
| `consul.client.rpc.error.catalog_service_nodes.` | Increments whenever a Consul agent receives an RPC error for a request to list nodes offering a service.   | errors | counter |
| `consul.client.api.catalog_node_services.` | Increments whenever a Consul agent receives a request to list services registered in a node.   | requests | counter |
| `consul.client.api.success.catalog_node_services.` | Increments whenever a Consul agent successfully responds to a request to list services in a node.   | requests | counter |
| `consul.client.rpc.error.catalog_node_services.` | Increments whenever a Consul agent receives an RPC error for a request to list services in a node.   | errors | counter |
| `consul.client.api.catalog_node_service_list` | Increments whenever a Consul agent receives a request to list a node's registered services. | requests | counter |
| `consul.client.rpc.error.catalog_node_service_list` | Increments whenever a Consul agent receives an RPC error for request to list a node's registered services. | errors | counter |
| `consul.client.api.success.catalog_node_service_list` | Increments whenever a Consul agent successfully responds to a request to list a node's registered services. | requests | counter |
| `consul.client.api.catalog_gateway_services.` | Increments whenever a Consul agent receives a request to list services associated with a gateway. | requests | counter |
| `consul.client.api.success.catalog_gateway_services.` | Increments whenever a Consul agent successfully responds to a request to list services associated with a gateway. | requests | counter |
| `consul.client.rpc.error.catalog_gateway_services.` | Increments whenever a Consul agent receives an RPC error for a request to list services associated with a gateway. | errors | counter |
| `consul.runtime.num_goroutines` | Tracks the number of running goroutines and is a general load pressure indicator. This may burst from time to time but should return to a steady state value. | number of goroutines | gauge |
| `consul.runtime.alloc_bytes` | Measures the number of bytes allocated by the Consul process. This may burst from time to time but should return to a steady state value. | bytes | gauge |
| `consul.runtime.heap_objects` | Measures the number of objects allocated on the heap and is a general memory pressure indicator. This may burst from time to time but should return to a steady state value. | number of objects | gauge |
| `consul.state.nodes` | Measures the current number of nodes registered with Consul. It is only emitted by Consul servers. Added in v1.9.0. | number of objects | gauge |
| `consul.state.peerings` | Measures the current number of peerings registered with Consul. It is only emitted by Consul servers. Added in v1.13.0. | number of objects | gauge |
| `consul.state.services` | Measures the current number of unique services registered with Consul, based on service name. It is only emitted by Consul servers. Added in v1.9.0. | number of objects | gauge |
| `consul.state.service_instances` | Measures the current number of unique service instances registered with Consul. It is only emitted by Consul servers. Added in v1.9.0. | number of objects | gauge |
| `consul.state.kv_entries` | Measures the current number of unique KV entries written in Consul. It is only emitted by Consul servers. Added in v1.10.3. | number of objects | gauge |
| `consul.state.connect_instances` | Measures the current number of unique connect service instances registered with Consul labeled by Kind (e.g. connect-proxy, connect-native, etc). Added in v1.10.4 | number of objects | gauge |
| `consul.state.config_entries` | Measures the current number of configuration entries registered with Consul labeled by Kind (e.g. service-defaults, proxy-defaults, etc). See [Configuration Entries](/docs/connect/config-entries) for more information. Added in v1.10.4 | number of objects | gauge |
| `consul.members.clients` | Measures the current number of client agents registered with Consul. It is only emitted by Consul servers. Added in v1.9.6. | number of clients | gauge |
| `consul.members.servers` | Measures the current number of server agents registered with Consul. It is only emitted by Consul servers. Added in v1.9.6. | number of servers | gauge |
| `consul.dns.stale_queries` | Increments when an agent serves a query within the allowed stale threshold. | queries | counter |
| `consul.dns.ptr_query.` | Measures the time spent handling a reverse DNS query for the given node. | ms | timer |
| `consul.dns.domain_query.` | Measures the time spent handling a domain query for the given node. | ms | timer |
| `consul.system.licenseExpiration` | <EnterpriseAlert inline /> This measures the number of hours remaining on the agents license. | hours | gauge |
| `consul.version` | Represents the Consul version. | agents | gauge |
| Metric | Description | Unit | Type |
|--------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------|---------|
| `consul.acl.blocked.{check,service}.deregistration` | Increments whenever a deregistration fails for an entity (check or service) is blocked by an ACL. | requests | counter |
| `consul.acl.blocked.{check,node,service}.registration` | Increments whenever a registration fails for an entity (check, node or service) is blocked by an ACL. | requests | counter |
| `consul.api.http` | This samples how long it takes to service the given HTTP request for the given verb and path. Includes labels for `path` and `method`. `path` does not include details like service or key names, for these an underscore will be present as a placeholder (eg. path=`v1.kv._`) | ms | timer |
| `consul.client.rpc` | Increments whenever a Consul agent in client mode makes an RPC request to a Consul server. This gives a measure of how much a given agent is loading the Consul servers. Currently, this is only generated by agents in client mode, not Consul servers. | requests | counter |
| `consul.client.rpc.exceeded` | Increments whenever a Consul agent in client mode makes an RPC request to a Consul server gets rate limited by that agent's [`limits`](/docs/agent/config/config-files#limits) configuration. This gives an indication that there's an abusive application making too many requests on the agent, or that the rate limit needs to be increased. Currently, this only applies to agents in client mode, not Consul servers. | rejected requests | counter |
| `consul.client.rpc.failed` | Increments whenever a Consul agent in client mode makes an RPC request to a Consul server and fails. | requests | counter |
| `consul.client.api.catalog_register.` | Increments whenever a Consul agent receives a catalog register request. | requests | counter |
| `consul.client.api.success.catalog_register.` | Increments whenever a Consul agent successfully responds to a catalog register request. | requests | counter |
| `consul.client.rpc.error.catalog_register.` | Increments whenever a Consul agent receives an RPC error for a catalog register request. | errors | counter |
| `consul.client.api.catalog_deregister.` | Increments whenever a Consul agent receives a catalog deregister request. | requests | counter |
| `consul.client.api.success.catalog_deregister.` | Increments whenever a Consul agent successfully responds to a catalog deregister request. | requests | counter |
| `consul.client.rpc.error.catalog_deregister.` | Increments whenever a Consul agent receives an RPC error for a catalog deregister request. | errors | counter |
| `consul.client.api.catalog_datacenters.` | Increments whenever a Consul agent receives a request to list datacenters in the catalog. | requests | counter |
| `consul.client.api.success.catalog_datacenters.` | Increments whenever a Consul agent successfully responds to a request to list datacenters. | requests | counter |
| `consul.client.rpc.error.catalog_datacenters.` | Increments whenever a Consul agent receives an RPC error for a request to list datacenters. | errors | counter |
| `consul.client.api.catalog_nodes.` | Increments whenever a Consul agent receives a request to list nodes from the catalog. | requests | counter |
| `consul.client.api.success.catalog_nodes.` | Increments whenever a Consul agent successfully responds to a request to list nodes. | requests | counter |
| `consul.client.rpc.error.catalog_nodes.` | Increments whenever a Consul agent receives an RPC error for a request to list nodes. | errors | counter |
| `consul.client.api.catalog_services.` | Increments whenever a Consul agent receives a request to list services from the catalog. | requests | counter |
| `consul.client.api.success.catalog_services.` | Increments whenever a Consul agent successfully responds to a request to list services. | requests | counter |
| `consul.client.rpc.error.catalog_services.` | Increments whenever a Consul agent receives an RPC error for a request to list services. | errors | counter |
| `consul.client.api.catalog_service_nodes.` | Increments whenever a Consul agent receives a request to list nodes offering a service. | requests | counter |
| `consul.client.api.success.catalog_service_nodes.` | Increments whenever a Consul agent successfully responds to a request to list nodes offering a service. | requests | counter |
| `consul.client.api.error.catalog_service_nodes.` | Increments whenever a Consul agent receives an RPC error for request to list nodes offering a service. | requests | counter |
| `consul.client.rpc.error.catalog_service_nodes.` | Increments whenever a Consul agent receives an RPC error for a request to list nodes offering a service.   | errors | counter |
| `consul.client.api.catalog_node_services.` | Increments whenever a Consul agent receives a request to list services registered in a node.   | requests | counter |
| `consul.client.api.success.catalog_node_services.` | Increments whenever a Consul agent successfully responds to a request to list services in a node.   | requests | counter |
| `consul.client.rpc.error.catalog_node_services.` | Increments whenever a Consul agent receives an RPC error for a request to list services in a node.   | errors | counter |
| `consul.client.api.catalog_node_service_list` | Increments whenever a Consul agent receives a request to list a node's registered services. | requests | counter |
| `consul.client.rpc.error.catalog_node_service_list` | Increments whenever a Consul agent receives an RPC error for request to list a node's registered services. | errors | counter |
| `consul.client.api.success.catalog_node_service_list` | Increments whenever a Consul agent successfully responds to a request to list a node's registered services. | requests | counter |
| `consul.client.api.catalog_gateway_services.` | Increments whenever a Consul agent receives a request to list services associated with a gateway. | requests | counter |
| `consul.client.api.success.catalog_gateway_services.` | Increments whenever a Consul agent successfully responds to a request to list services associated with a gateway. | requests | counter |
| `consul.client.rpc.error.catalog_gateway_services.` | Increments whenever a Consul agent receives an RPC error for a request to list services associated with a gateway. | errors | counter |
| `consul.runtime.num_goroutines` | Tracks the number of running goroutines and is a general load pressure indicator. This may burst from time to time but should return to a steady state value. | number of goroutines | gauge |
| `consul.runtime.alloc_bytes` | Measures the number of bytes allocated by the Consul process. This may burst from time to time but should return to a steady state value. | bytes | gauge |
| `consul.runtime.heap_objects` | Measures the number of objects allocated on the heap and is a general memory pressure indicator. This may burst from time to time but should return to a steady state value. | number of objects | gauge |
| `consul.state.nodes` | Measures the current number of nodes registered with Consul. It is only emitted by Consul servers. Added in v1.9.0. | number of objects | gauge |
| `consul.state.peerings` | Measures the current number of peerings registered with Consul. It is only emitted by Consul servers. Added in v1.13.0. | number of objects | gauge |
| `consul.state.services` | Measures the current number of unique services registered with Consul, based on service name. It is only emitted by Consul servers. Added in v1.9.0. | number of objects | gauge |
| `consul.state.service_instances` | Measures the current number of unique service instances registered with Consul. It is only emitted by Consul servers. Added in v1.9.0. | number of objects | gauge |
| `consul.state.kv_entries` | Measures the current number of entries in the Consul KV store. It is only emitted by Consul servers. Added in v1.10.3. | number of objects | gauge |
| `consul.state.connect_instances` | Measures the current number of unique connect service instances registered with Consul labeled by Kind (e.g. connect-proxy, connect-native, etc). Added in v1.10.4 | number of objects | gauge |
| `consul.state.config_entries` | Measures the current number of configuration entries registered with Consul labeled by Kind (e.g. service-defaults, proxy-defaults, etc). See [Configuration Entries](/docs/connect/config-entries) for more information. Added in v1.10.4 | number of objects | gauge |
| `consul.members.clients` | Measures the current number of client agents registered with Consul. It is only emitted by Consul servers. Added in v1.9.6. | number of clients | gauge |
| `consul.members.servers` | Measures the current number of server agents registered with Consul. It is only emitted by Consul servers. Added in v1.9.6. | number of servers | gauge |
| `consul.dns.stale_queries` | Increments when an agent serves a query within the allowed stale threshold. | queries | counter |
| `consul.dns.ptr_query.` | Measures the time spent handling a reverse DNS query for the given node. | ms | timer |
| `consul.dns.domain_query.` | Measures the time spent handling a domain query for the given node. | ms | timer |
| `consul.system.licenseExpiration` | <EnterpriseAlert inline /> This measures the number of hours remaining on the agents license. | hours | gauge |
| `consul.version` | Represents the Consul version. | agents | gauge |
## Server Health
@ -695,14 +695,14 @@ agent. The table below describes the additional metrics exported by the proxy.
**Requirements:**
- Consul 1.13.0+
[Cluster peering](/docs/connect/cluster-peering) refers to enabling communication between Consul clusters through a peer connection, as opposed to a federated connection. Consul collects metrics that describe the number of services exported to a peered cluster. Peering metrics are only emitted by the leader server.
[Cluster peering](/docs/connect/cluster-peering) refers to enabling communication between Consul clusters through a peer connection, as opposed to a federated connection. Consul collects metrics that describe the number of services exported to a peered cluster. Peering metrics are only emitted by the leader server.
| Metric | Description | Unit | Type |
| ------------------------------------- | ----------------------------------------------------------------------| ------ | ------- |
| `consul.peering.exported_services` | Counts the number of services exported to a peer cluster. | count | gauge |
### Labels
Consul attaches the following labels to metric values.
Consul attaches the following labels to metric values.
| Label Name | Description | Possible values |
| ------------------------------------- | ---------------------------------------------------------------------- | ------------------------------------------ |
| `peer_name` | The name of the peering on the reporting cluster or leader. | Any defined peer name in the cluster |

View File

@ -108,7 +108,7 @@ First, create a configuration entry and specify the `Kind` as `"exported-service
```hcl
Kind = "exported-services"
Name = "default"
Services = [
{
## The name and namespace of the service to export.
@ -120,10 +120,11 @@ Services = [
{
## The peer name to reference in config is the one set
## during the peering process.
Peer = "cluster-02"
PeerName = "cluster-02"
}
}
]
}
]
```
</CodeBlockConfig>

View File

@ -25,50 +25,82 @@ You must implement the following requirements to create and use cluster peering
- At least two Kubernetes clusters
- The installation must be running on Consul on Kubernetes version 0.47.1 or later
### Helm chart configuration
### Prepare for install
To establish cluster peering through Kubernetes, deploy clusters with the following Helm values.
1. After provisioning a Kubernetes cluster and setting up your kubeconfig file to manage access to multiple Kubernetes clusters, export the Kubernetes context names for future use with `kubectl`. For more information on how to use kubeconfig and contexts, refer to [Configure access to multiple clusters](https://kubernetes.io/docs/tasks/access-application-cluster/configure-access-multiple-clusters/) on the Kubernetes documentation website.
<CodeBlockConfig filename="values.yaml">
You can use the following methods to get the context names for your clusters:
* Issue the `kubectl config current-context` command to get the context for the cluster you are currently in.
* Issue the `kubectl config get-contexts` command to get all configured contexts in your kubeconfig file.
```shell-session
$ export CLUSTER1_CONTEXT=<CONTEXT for first Kubernetes cluster>
$ export CLUSTER2_CONTEXT=<CONTEXT for second Kubernetes cluster>
```
```yaml
global:
image: "hashicorp/consul:1.13.1"
peering:
1. To establish cluster peering through Kubernetes, create a `values.yaml` file with the following Helm values.
With these values,
the servers in each cluster will be exposed over a Kubernetes Load balancer service. This service can be customized
using [`server.exposeService`](/docs/k8s/helm#v-server-exposeservice).
When generating a peering token from one of the clusters, Consul uses the address(es) of the load balancer in the peering token so that the peering stream goes through the load balancer in front of the servers. For customizing the addresses used in the peering token, refer to [`global.peering.tokenGeneration`](/docs/k8s/helm#v-global-peering-tokengeneration).
<CodeBlockConfig filename="values.yaml">
```yaml
global:
image: "hashicorp/consul:1.13.1"
peering:
enabled: true
connectInject:
enabled: true
connectInject:
enabled: true
controller:
enabled: true
meshGateway:
enabled: true
replicas: 1
```
dns:
enabled: true
enableRedirection: true
server:
exposeService:
enabeld: true
controller:
enabled: true
meshGateway:
enabled: true
replicas: 1
```
</CodeBlockConfig>
</CodeBlockConfig>
### Install Consul on Kubernetes
Install Consul on Kubernetes on each Kubernetes cluster by applying `values.yaml` using the Helm CLI. With these values,
the servers in each cluster will be exposed over a Kubernetes Load balancer service. This service can be customized
using [`server.exposeService`](/docs/k8s/helm#v-server-exposeservice). When generating a peering token from one of the
clusters, the address(es) of the load balancer will be used in the peering token, so the peering stream will go through
the load balancer in front of the servers. For customizing the addresses used in the peering token, see
[`global.peering.tokenGeneration`](/docs/k8s/helm#v-global-peering-tokengeneration).
1. Install Consul on Kubernetes on each Kubernetes cluster by applying `values.yaml` using the Helm CLI.
1. Install Consul on Kubernetes on `cluster-01`
```shell-session
$ export HELM_RELEASE_NAME=cluster-01
```
```shell-session
$ export HELM_RELEASE_NAME=cluster-name
```
```shell-session
$ helm install ${HELM_RELEASE_NAME} hashicorp/consul --create-namespace --namespace consul --version "0.47.1" --values values.yaml --kube-context $CLUSTER1_CONTEXT
```
1. Install Consul on Kubernetes on `cluster-02`
```shell-session
$ export HELM_RELEASE_NAME=cluster-02
```
```shell-session
$ helm install ${HELM_RELEASE_NAME} hashicorp/consul --version "0.47.1" --values values.yaml
```
```shell-session
$ helm install ${HELM_RELEASE_NAME} hashicorp/consul --create-namespace --namespace consul --version "0.47.1" --values values.yaml --kube-context $CLUSTER2_CONTEXT
```
## Create a peering token
To peer Kubernetes clusters running Consul, you need to create a peering token and share it with the other cluster.
To peer Kubernetes clusters running Consul, you need to create a peering token and share it with the other cluster. As part of the peering process, the peer names for each respective cluster within the peering are established by using the `metadata.name` values for the `PeeringAcceptor` and `PeeringDialer` CRDs.
1. In `cluster-01`, create the `PeeringAcceptor` custom resource.
<CodeBlockConfig filename="acceptor.yml">
<CodeBlockConfig filename="acceptor.yaml">
```yaml
apiVersion: consul.hashicorp.com/v1alpha1
@ -88,13 +120,13 @@ To peer Kubernetes clusters running Consul, you need to create a peering token a
1. Apply the `PeeringAcceptor` resource to the first cluster.
```shell-session
$ kubectl apply --filename acceptor.yml
$ kubectl --context $CLUSTER1_CONTEXT apply --filename acceptor.yaml
````
1. Save your peering token so that you can export it to the other cluster.
```shell-session
$ kubectl get secret peering-token --output yaml > peering-token.yml
$ kubectl --context $CLUSTER1_CONTEXT get secret peering-token --output yaml > peering-token.yaml
```
## Establish a peering connection between clusters
@ -102,12 +134,12 @@ To peer Kubernetes clusters running Consul, you need to create a peering token a
1. Apply the peering token to the second cluster.
```shell-session
$ kubectl apply --filename peering-token.yml
$ kubectl --context $CLUSTER2_CONTEXT apply --filename peering-token.yaml
```
1. In `cluster-02`, create the `PeeringDialer` custom resource.
1. In `cluster-02`, create the `PeeringDialer` custom resource.
<CodeBlockConfig filename="dialer.yml">
<CodeBlockConfig filename="dialer.yaml">
```yaml
apiVersion: consul.hashicorp.com/v1alpha1
@ -127,27 +159,74 @@ To peer Kubernetes clusters running Consul, you need to create a peering token a
1. Apply the `PeeringDialer` resource to the second cluster.
```shell-session
$ kubectl apply --filename dialer.yml
$ kubectl --context $CLUSTER2_CONTEXT apply --filename dialer.yaml
```
## Export services between clusters
1. For the service in "cluster-02" that you want to export, add the following [annotation](/docs/k8s/annotations-and-labels) to your service's pods.
<CodeBlockConfig filename="backend-service.yml">
<CodeBlockConfig filename="backend-service.yaml">
```yaml
##…
annotations:
"consul.hashicorp.com/connect-inject": "true"
##…
# Service to expose backend
apiVersion: v1
kind: Service
metadata:
name: backend-service
spec:
selector:
app: backend
ports:
- name: http
protocol: TCP
port: 80
targetPort: 9090
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: backend
---
# deployment for backend
apiVersion: apps/v1
kind: Deployment
metadata:
name: backend
labels:
app: backend
spec:
replicas: 1
selector:
matchLabels:
app: backend
template:
metadata:
labels:
app: backend
annotations:
"consul.hashicorp.com/connect-inject": "true"
spec:
serviceAccountName: backend
containers:
- name: backend
image: nicholasjackson/fake-service:v0.22.4
ports:
- containerPort: 9090
env:
- name: "LISTEN_ADDR"
value: "0.0.0.0:9090"
- name: "NAME"
value: "backend"
- name: "MESSAGE"
value: "Response from backend"
```
</CodeBlockConfig>
1. In `cluster-02`, create an `ExportedServices` custom resource.
<CodeBlockConfig filename="exportedsvc.yml">
<CodeBlockConfig filename="exportedsvc.yaml">
```yaml
apiVersion: consul.hashicorp.com/v1alpha1
@ -166,7 +245,7 @@ To peer Kubernetes clusters running Consul, you need to create a peering token a
1. Apply the service file and the `ExportedServices` resource for the second cluster.
```shell-session
$ kubectl apply --filename backend-service.yml --filename exportedsvc.yml
$ kubectl apply --context $CLUSTER2_CONTEXT --filename backend-service.yaml --filename exportedsvc.yaml
```
## Authorize services for peers
@ -195,18 +274,71 @@ To peer Kubernetes clusters running Consul, you need to create a peering token a
1. Apply the intentions to the second cluster.
```shell-session
$ kubectl apply --filename intention.yml
$ kubectl --context $CLUSTER2_CONTEXT apply --filename intention.yml
```
1. For the services in `cluster-01` that you want to access the "backend-service," add the following annotations to the service file.
1. For the services in `cluster-01` that you want to access the "backend-service," add the following annotations to the service file. To dial the upstream service from an application, ensure that the requests are sent to the correct DNS name as specified in [Service Virtual IP Lookups](/docs/discovery/dns#service-virtual-ip-lookups).
<CodeBlockConfig filename="frontend-service.yml">
<CodeBlockConfig filename="frontend-service.yaml">
```yaml
##…
annotations:
"consul.hashicorp.com/connect-inject": "true"
##…
# Service to expose frontend
apiVersion: v1
kind: Service
metadata:
name: frontend-service
spec:
selector:
app: frontend
ports:
- name: http
protocol: TCP
port: 9090
targetPort: 9090
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: frontend
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: frontend
labels:
app: frontend
spec:
replicas: 1
selector:
matchLabels:
app: frontend
template:
metadata:
labels:
app: frontend
annotations:
"consul.hashicorp.com/connect-inject": "true"
spec:
serviceAccountName: frontend
containers:
- name: frontend
image: nicholasjackson/fake-service:v0.22.4
securityContext:
capabilities:
add: ["NET_ADMIN"]
ports:
- containerPort: 9090
env:
- name: "LISTEN_ADDR"
value: "0.0.0.0:9090"
- name: "UPSTREAM_URIS"
value: "http://backend-service.virtual.cluster-02.consul"
- name: "NAME"
value: "frontend"
- name: "MESSAGE"
value: "Hello World"
- name: "HTTP_CLIENT_KEEP_ALIVES"
value: "false"
```
</CodeBlockConfig>
@ -214,18 +346,45 @@ To peer Kubernetes clusters running Consul, you need to create a peering token a
1. Apply the service file to the first cluster.
```shell-session
$ kubectl apply --filename frontend-service.yml
$ kubectl --context $CLUSTER1_CONTEXT apply --filename frontend-service.yaml
```
1. Run the following command in `frontend-service` and check the output to confirm that you peered your clusters successfully.
```shell-session
$ kubectl exec -it $(kubectl get pod -l app=frontend -o name) -- curl localhost:1234
$ kubectl --context $CLUSTER1_CONTEXT exec -it $(kubectl --context $CLUSTER1_CONTEXT get pod -l app=frontend -o name) -- curl localhost:9090
{
"name": "backend-service",
##…
"body": "Response from backend",
"code": 200
"name": "frontend",
"uri": "/",
"type": "HTTP",
"ip_addresses": [
"10.16.2.11"
],
"start_time": "2022-08-26T23:40:01.167199",
"end_time": "2022-08-26T23:40:01.226951",
"duration": "59.752279ms",
"body": "Hello World",
"upstream_calls": {
"http://backend-service.virtual.cluster-02.consul": {
"name": "backend",
"uri": "http://backend-service.virtual.cluster-02.consul",
"type": "HTTP",
"ip_addresses": [
"10.32.2.10"
],
"start_time": "2022-08-26T23:40:01.223503",
"end_time": "2022-08-26T23:40:01.224653",
"duration": "1.149666ms",
"headers": {
"Content-Length": "266",
"Content-Type": "text/plain; charset=utf-8",
"Date": "Fri, 26 Aug 2022 23:40:01 GMT"
},
"body": "Response from backend",
"code": 200
}
},
"code": 200
}
```

View File

@ -302,7 +302,7 @@ spec:
name: 'weight',
type: 'float32: 0',
description:
'A value between 0 and 100 reflecting what portion of traffic should be directed to this split. The smallest representable eight is 1/10000 or .01%',
'A value between 0 and 100 reflecting what portion of traffic should be directed to this split. The smallest representable weight is 1/10000 or .01%',
},
{
name: 'Service',

View File

@ -96,6 +96,23 @@ pairs according to [RFC1464](https://www.ietf.org/rfc/rfc1464.txt).
Alternatively, the TXT record will only include the node's metadata value when the
node's metadata key starts with `rfc1035-`.
### Node Lookups for Consul Enterprise <EnterpriseAlert inline />
Consul nodes exist at the admin partition level within a datacenter.
By default, the partition and datacenter used in a [node lookup](#node-lookups) are
the partition and datacenter of the Consul agent that received the DNS query.
Use the following query format to specify a partition for a node lookup:
```text
<node>.node.<partition>.ap.<datacenter>.dc.<domain>
```
Consul server agents are in the `default` partition.
If DNS queries are addressed to Consul server agents,
node lookups to non-`default` partitions must explicitly specify
the partition of the target node.
## Service Lookups
A service lookup is used to query for service providers. Service queries support
@ -334,6 +351,28 @@ $ echo -n "20010db800010002cafe000000001337" | perl -ne 'printf join(":", unpack
</Tabs>
### Service Lookups for Consul Enterprise <EnterpriseAlert inline />
By default, all service lookups use the `default` namespace
within the partition and datacenter of the Consul agent that received the DNS query.
Use the following query format to specify a namespace, partition, and/or datacenter
for all service lookup types except `.query`,
including `.service`, `.connect`, `.virtual`, and `.ingress`.
At least two of those three fields (`namespace`, `partition`, `datacenter`)
must be specified.
```text
[<tag>.]<service>.service.<namespace>.ns.<partition>.ap.<datacenter>.dc.<domain>
```
Consul server agents are in the `default` partition.
If DNS queries are addressed to Consul server agents,
service lookups to non-`default` partitions must explicitly specify
the partition of the target service.
To lookup services imported from a cluster peer,
use a [service virtual IP lookups for Consul Enterprise](#service-virtual-ip-lookups-for-consul-enterprise) instead.
### Prepared Query Lookups
The format of a prepared query lookup is:
@ -398,7 +437,21 @@ of a service imported from that peer.
The virtual IP is also added to the service's [Tagged Addresses](/docs/discovery/services#tagged-addresses)
under the `consul-virtual` tag.
#### Service Virtual IP Lookups for Consul Enterprise <EnterpriseAlert inline />
By default, a service virtual IP lookup uses the `default` namespace
within the partition and datacenter of the Consul agent that received the DNS query.
To lookup services imported from a cluster peered partition or open-source datacenter,
specify the namespace and peer name in the lookup:
```text
<service>.virtual[.<namespace>].<peer>.<domain>
```
To lookup services not imported from a cluster peer,
refer to [service lookups for Consul Enterprise](#service-lookups-for-consul-enterprise) instead.
### Ingress Service Lookups
To find ingress-enabled services:
@ -480,38 +533,6 @@ using the [`advertise-wan`](/docs/agent/config/cli-flags#_advertise-wan) and
[`translate_wan_addrs`](/docs/agent/config/config-files#translate_wan_addrs) configuration
options.
## Namespaced/Partitioned Services and Nodes <EnterpriseAlert inline />
Consul Enterprise supports resolving namespaced and partitioned services via DNS.
The DNS server in Consul Enterprise can resolve services assigned to namespaces and partitions.
The DNS server can also resolve nodes assigned to partitions.
To maintain backwards compatibility existing queries can be used and these will
resolve services within the `default` namespace and partition. However, for resolving
services from other namespaces or partitions the following form can be used:
```text
[<tag>.]<service>.service.<namespace>.ns.<partition>.ap.<datacenter>.dc.<domain>
```
This sequence is the canonical naming convention of a Consul Enterprise service. At least two of the following
fields must be present:
* `namespace`
* `partition`
* `datacenter`
For imported lookups, only the namespace and peer need to be specified as the partition can be inferred from the peering:
```text
<service>.virtual[.<namespace>].<peer>.<domain>
```
For node lookups, only the partition and datacenter need to be specified as nodes cannot be
namespaced.
```text
[<tag>.]<node>.node.<partition>.ap.<datacenter>.dc.<domain>
```
## DNS with ACLs
In order to use the DNS interface when

View File

@ -58,7 +58,7 @@ The partition in which [`proxy-defaults`](/docs/connect/config-entries/proxy-def
### Cross-partition Networking
You can configure services to be discoverable by downstream services in any partition within the datacenter. Specify the upstream services that you want to be available for discovery by configuring the `exported-services` configuration entry in the partition where the services are registered. Refer to the [`exported-services` documentation](/docs/connect/config-entries/exported-services) for details. Additionally, the `upstreams` configuration for proxies in the source partition must specify the name of the destination partition so that listeners can be created. Refer to the [Upstream Configuration Reference](/docs/connect/registration/service-registration#upstream-configuration-reference) for additional information.
You can configure services to be discoverable by downstream services in any partition within the datacenter. Specify the upstream services that you want to be available for discovery by configuring the `exported-services` configuration entry in the partition where the services are registered. Refer to the [`exported-services` documentation](/docs/connect/config-entries/exported-services) for details. Additionally, the requests made by dowstream applications must have the correct DNS name for the Virtual IP Service lookup to occur. Service Virtual IP lookups allow for communications across Admin Partitions when using Transparent Proxy. Refer to the [Service Virtual IP Lookups for Consul Enterprise](/docs/discovery/dns#service-virtual-ip-lookups-for-consul-enterprise) for additional information.
## Requirements

View File

@ -17,7 +17,7 @@ description: >-
With Consul Enterprise v1.8.0+, audit logging can be used to capture a clear and
actionable log of authenticated events (both attempted and committed) that Consul
processes via its HTTP API. These events are compiled them into a JSON format for easy export
processes via its HTTP API. These events are then compiled into a JSON format for easy export
and contain a timestamp, the operation performed, and the user who initiated the action.
Audit logging enables security and compliance teams within an organization to get

View File

@ -99,12 +99,13 @@ Here are links to resources, documentation, examples and best practices to guide
- [Consul Telemetry Documentation](/docs/agent/telemetry)
- [Monitoring Consul with Datadog APM](https://www.datadoghq.com/blog/consul-datadog/)
- [Monitoring Consul with Dynatrace APM](https://www.dynatrace.com/news/blog/automatic-intelligent-observability-into-your-hashicorp-consul-service-mesh/)
- [Monitoring Consul with New Relic APM](https://newrelic.com/instant-observability/consul/b65825cc-faee-47b5-8d7c-6d60d6ab3c59)
- [Monitoring HCP Consul with New Relic APM](https://newrelic.com/instant-observability/hcp-consul/bc99ad15-7aba-450e-8236-6ea667d50cae)
**Logging**
- [Monitor Consul with Logz.io](https://www.hashicorp.com/integrations/logz-io/consul)
- [Monitor Consul with Splunk SignalFx](https://www.hashicorp.com/integrations/splunksignalfx/consul)
- [Consul Datacenter Monitoring with New Relic](https://www.hashicorp.com/integrations/new-relic/consul)
#### Platform:

View File

@ -6,10 +6,6 @@ description: Configuring Terminating Gateways on Kubernetes
# Terminating Gateways on Kubernetes
-> 1.9.0+: This feature is available in Consul versions 1.9.0 and higher
~> This topic requires familiarity with [Terminating Gateways](/docs/connect/gateways/terminating-gateway).
Adding a terminating gateway is a multi-step process:
- Update the Helm chart with terminating gateway config options
@ -17,7 +13,13 @@ Adding a terminating gateway is a multi-step process:
- Access the Consul agent
- Register external services with Consul
## Update the helm chart with terminating gateway config options
## Requirements
- [Consul](https://www.consul.io/docs/install#install-consul)
- [Consul on Kubernetes CLI](/docs/k8s/k8s-cli)
- Familiarity with [Terminating Gateways](/docs/connect/gateways/terminating-gateway)
## Update the Helm chart with terminating gateway config options
Minimum required Helm options:
@ -38,37 +40,41 @@ terminatingGateways:
## Deploying the Helm chart
Ensure you have the latest consul-helm chart and install Consul via helm using the following
[guide](/docs/k8s/installation/install#installing-consul) while being sure to provide the yaml configuration
as previously discussed.
The Helm chart may be deployed using the [Consul on Kubernetes CLI](/docs/k8s/k8s-cli).
```shell-session
$ consul-k8s install -f config.yaml
```
## Accessing the Consul agent
You can access the Consul server directly from your host via `kubectl port-forward`. This is helpful for interacting with your Consul UI locally as well as to validate connectivity of the application.
You can access the Consul server directly from your host via `kubectl port-forward`. This is helpful for interacting with your Consul UI locally as well as for validating the connectivity of the application.
<Tabs>
<Tab heading="Without TLS">
```shell-session
$ kubectl port-forward consul-server-0 8500 &
```
```shell-session
$ export CONSUL_HTTP_ADDR=http://localhost:8500
```
</Tab>
<Tab heading="With TLS">
If TLS is enabled use port 8501:
```shell-session
$ kubectl port-forward consul-server-0 8501 &
```
-> Be sure the latest consul binary is installed locally on your host.
[https://releases.hashicorp.com/consul/](https://releases.hashicorp.com/consul/)
```shell-session
$ export CONSUL_HTTP_ADDR=http://localhost:8500
```
If TLS is enabled set:
```shell-session
$ export CONSUL_HTTP_ADDR=https://localhost:8501
$ export CONSUL_HTTP_SSL_VERIFY=false
```
</Tab>
</Tabs>
If ACLs are enabled also set:
@ -88,34 +94,35 @@ Registering the external services with Consul is a multi-step process:
### Register external services with Consul
There are two ways to register an external service with Consul:
1. If [`TransparentProxy`](/docs/connect/transparent-proxy) is enabled, the preferred method is to declare external endpoints in the [`destination`](/docs/connect/config-entries/service-defaults#terminating-gateway-destination) field of `ServiceDefaults`.
1. You can add the service as a node in the Consul catalog.
You may register an external service with Consul using `ServiceDefaults` if
[`TransparentProxy`](/docs/connect/transparent-proxy) is enabled. Otherwise,
you may register the service as a node in the Consul catalog.
#### Register an external service as a destination
<Tabs>
<Tab heading="Using ServiceDefaults and TransparentProxy">
The [`destination`](/docs/connect/config-entries/service-defaults#terminating-gateway-destination) field of the `ServiceDefaults` Custom Resource Definition (CRD) allows clients to dial the external service directly. It is valid only in [`TransparentProxy`](/docs/connect/transparent-proxy)) mode.
The following table describes traffic behaviors when using `destination`s to route traffic through a terminating gateway:
The [`destination`](/docs/connect/config-entries/service-defaults#terminating-gateway-destination) field of the `ServiceDefaults` Custom Resource Definition (CRD) allows clients to dial an external service directly. For this method to work, [`TransparentProxy`](/docs/connect/transparent-proxy) must be enabled.
The following table describes traffic behaviors when using the `destination` field to route traffic through a terminating gateway:
| External Services Layer | Client dials | Client uses TLS | Allowed | Notes |
|---|---|---|---|---|
| L4 | Hostname | Yes | Allowed | `CAFiles` are not allowed because traffic is already end-to-end encrypted by the client. |
| L4 | IP | Yes | Allowed | `CAFiles` are not allowed because traffic is already end-to-end encrypted by the client. |
| L4 | Hostname | No | Not allowed | The sidecar is not protocol aware and can not identify traffic going to the external service. |
| L4 | IP | No | Allowed | There are no limitations on dialing IPs without TLS. |
| L7 | Hostname | Yes | Not allowed | Because traffic is already encrypted before the sidecar, it cannot route as L7 traffic. |
| L7 | IP | Yes | Not allowed | Because traffic is already encrypted before the sidecar, it cannot route as L7 traffic. |
| L7 | Hostname | No | Allowed | A `Host` or `:authority` header is required. |
| L7 | IP | No | Allowed | There are no limitations on dialing IPs without TLS. |
| <nobr>External Services Layer</nobr> | <nobr>Client dials</nobr> | <nobr>Client uses TLS</nobr> | Allowed | Notes |
|--------------------------------------|---------------------------|------------------------------|--------------------------|-----------------------------------------------------------------------------------------------|
| L4 | Hostname | Yes | <nobr>Allowed</nobr> | `CAFiles` are not allowed because traffic is already end-to-end encrypted by the client. |
| L4 | IP | Yes | <nobr>Allowed</nobr> | `CAFiles` are not allowed because traffic is already end-to-end encrypted by the client. |
| L4 | Hostname | No | <nobr>Not allowed</nobr> | The sidecar is not protocol aware and can not identify traffic going to the external service. |
| L4 | IP | No | <nobr>Allowed</nobr> | There are no limitations on dialing IPs without TLS. |
| L7 | Hostname | Yes | <nobr>Not allowed</nobr> | Because traffic is already encrypted before the sidecar, it cannot route as L7 traffic. |
| L7 | IP | Yes | <nobr>Not allowed</nobr> | Because traffic is already encrypted before the sidecar, it cannot route as L7 traffic. |
| L7 | Hostname | No | <nobr>Allowed</nobr> | A `Host` or `:authority` header is required. |
| L7 | IP | No | <nobr>Allowed</nobr> | There are no limitations on dialing IPs without TLS. |
You can provide a `caFile` to secure traffic between unencrypted clients that connect to external services through the terminating gateway.
Refer to [Create the configuration entry for the terminating gateway](#create-the-configuration-entry-for-the-terminating-gateway) for details.
Also note that regardless of the `protocol` specified in the `ServiceDefaults`, [L7 intentions](/docs/connect/config-entries/service-intentions#permissions) are not currently supported with `ServiceDefaults` destinations.
-> **Note:** Regardless of the `protocol` specified in the `ServiceDefaults`, [L7 intentions](/docs/connect/config-entries/service-intentions#permissions) are not currently supported with `ServiceDefaults` destinations.
Create a `ServiceDefaults` custom resource for the external service:
<CodeBlockConfig filename="serviceDefaults.yaml">
<CodeBlockConfig filename="service-defaults.yaml">
```yaml
apiVersion: consul.hashicorp.com/v1alpha1
@ -135,14 +142,15 @@ Create a `ServiceDefaults` custom resource for the external service:
Apply the `ServiceDefaults` resource with `kubectl apply`:
```shell-session
$ kubectl apply --filename serviceDefaults.yaml
$ kubectl apply --filename service-defaults.yaml
```
All other terminating gateway operations can use the name of the `ServiceDefaults` in place of a typical Consul service name.
All other terminating gateway operations can use the name of the `ServiceDefaults` component, in this case "example-https", as a Consul service name.
#### Register an external service as a Catalog Node
</Tab>
<Tab heading="Using Consul catalog">
-> **Note:** Normal Consul services are registered with the Consul client on the node that
Normally, Consul services are registered with the Consul client on the node that
they're running on. Since this is an external service, there is no Consul node
to register it onto. Instead, we will make up a node name and register the
service to that node.
@ -191,14 +199,15 @@ If ACLs and TLS are enabled :
$ curl --request PUT --header "X-Consul-Token: $CONSUL_HTTP_TOKEN" --data @external.json --insecure $CONSUL_HTTP_ADDR/v1/catalog/register
true
```
</Tab>
</Tabs>
### Update terminating gateway ACL role if ACLs are enabled
If ACLs are enabled, update the terminating gateway acl role to have `service: write` permissions on all of the services
being represented by the gateway:
being represented by the gateway.
- Create a new policy that includes these permissions
- Update the existing role to include the new policy
Create a new policy that includes the write permission for the service you created.
<CodeBlockConfig filename="write-policy.hcl">
@ -222,7 +231,7 @@ service "example-https" {
}
```
Now fetch the ID of the terminating gateway token
Fetch the ID of the terminating gateway token.
```shell-session
consul acl role list | grep -B 6 -- "- RELEASE_NAME-terminating-gateway-policy" | grep ID
@ -230,7 +239,7 @@ consul acl role list | grep -B 6 -- "- RELEASE_NAME-terminating-gateway-policy"
ID: <role id>
```
Update the terminating gateway acl token with the new policy
Update the terminating gateway ACL token with the new policy.
```shell-session
$ consul acl role update -id <role id> -policy-name example-https-write-policy
@ -269,8 +278,6 @@ Configure the [`caFile`](https://www.consul.io/docs/connect/config-entries/termi
- Consul Helm chart 0.43 or older
- An Envoy image with an alpine base image
For `ServiceDefaults` destinations, refer to [Register an external service as a destination](#register-an-external-service-as-a-destination).
Apply the `TerminatingGateway` resource with `kubectl apply`:
```shell-session
@ -306,7 +313,7 @@ $ kubectl apply --filename service-intentions.yaml
### Define the external services as upstreams for services in the mesh
Finally define and deploy the external services as upstreams for the internal mesh services that wish to talk to them.
As a final step, you may define and deploy the external services as upstreams for the internal mesh services that wish to talk to them.
An example deployment is provided which will serve as a static client for the terminating gateway service.
<CodeBlockConfig filename="static-client.yaml">
@ -355,33 +362,35 @@ spec:
</CodeBlockConfig>
Run the service via `kubectl apply`:
Deploy the service with `kubectl apply`.
```shell-session
$ kubectl apply --filename static-client.yaml
```
Wait for the service to be ready:
Wait for the service to be ready.
```shell-session
$ kubectl rollout status deploy static-client --watch
deployment "static-client" successfully rolled out
```
You can verify connectivity of the static-client and terminating gateway via a curl command:
You can verify connectivity of the static-client and terminating gateway via a curl command.
<CodeBlockConfig heading="External services registered with the Consul catalog">
```shell-session
$ kubectl exec deploy/static-client -- curl -vvvs --header "Host: example-https.com" http://localhost:1234/
```
</CodeBlockConfig>
<CodeBlockConfig heading="External services registered with `ServiceDefaults` destinations">
<Tabs>
<Tab heading="Registered with `ServiceDefaults` destinations">
```shell-session
$ kubectl exec deploy/static-client -- curl -vvvs https://example.com/
```
</CodeBlockConfig>
</Tab>
<Tab heading="Registered with the Consul catalog">
```shell-session
$ kubectl exec deploy/static-client -- curl -vvvs --header "Host: example-https.com" http://localhost:1234/
```
</Tab>
</Tabs>

View File

@ -72,7 +72,7 @@ service mesh.
}
```
1. Issue the `consul services register` command to store the configuration:
```shell-sesion
```shell-session
$ consul services register api-sidecar-proxy.hcl
```
1. Call the upstream service to invoke the Lambda function. In the following example, the `api` service invokes the `authentication` service at `localhost:2345`:

View File

@ -8,7 +8,7 @@ description: >-
# Consul API Gateway 0.1.0
## OVerview
## Overview
This is the first general availability (GA) release of Consul API Gateway. It
provides controlled access for network traffic from outside a Consul service

View File

@ -0,0 +1,47 @@
---
layout: docs
page_title: 0.47.x
description: >-
Consul on Kubernetes release notes for version 0.47.x
---
# Consul on Kubernetes 0.47.0
## Release Highlights
- **Cluster Peering (Beta)**: This release introduces support for Cluster Peering, which allows service connectivity between two independent clusters. Enabling peering will deploy the peering controllers and PeeringAcceptor and PeeringDialer CRDs. The new CRDs are used to establish a peering connection between two clusters. Refer to [Cluster Peering on Kubernetes](/docs/connect/cluster-peering/k8s) for full instructions on using Cluster Peering on Kubernetes.
- **Envoy Proxy Debugging CLI Commands**: This release introduces new commands to quickly identify proxies and troubleshoot Envoy proxies for sidecars and gateways.
* Add `consul-k8s proxy list` command for displaying pods running Envoy managed by Consul.
* Add `consul-k8s proxy read podname` command for displaying Envoy configuration for a given pod
- **Transparent Proxy Egress**: Adds support for destinations on the Service Defaults CRD when using transparent proxy for terminating gateways.
## Supported Software
- Consul 1.11.x, Consul 1.12.x and Consul 1.13.1+
- Kubernetes 1.19-1.23
- Kubectl 1.21+
- Envoy proxy support is determined by the Consul version deployed. Refer to
[Envoy Integration](/docs/connect/proxies/envoy) for details.
## Upgrading
For detailed information on upgrading, please refer to the [Upgrades page](/docs/k8s/upgrade)
## Known Issues
The following issues are know to exist in the v0.47.0 and v0.47.1 releases
- Kubernetes 1.24 is not supported because secret-based tokens are no longer autocreated by default for service accounts. Refer to GitHub issue
[[GH-1145](https://github.com/hashicorp/consul-k8s/issues/1145)] for
details.
## Changelogs
The changelogs for this major release version and any maintenance versions are listed below.
~> **Note:** The following link takes you to the changelogs on the GitHub website.
- [0.47.0](https://github.com/hashicorp/consul-k8s/releases/tag/v0.47.0)
- [0.47.1](https://github.com/hashicorp/consul-k8s/releases/tag/v0.47.1)

View File

@ -24,6 +24,8 @@ description: >-
- Drops support for Envoy version 1.13.x.
- (Enterprise Only) Consul Enterprise has removed support for temporary licensing. All server agents must have a valid license at startup and client agents must have a license at startup or be able to retrieve one from the servers.
## Upgrading
For more detailed information, please refer to the [upgrade details page](/docs/upgrading/upgrade-specific#consul-1-10-0) and the changelogs.
## Changelogs

View File

@ -27,6 +27,8 @@ description: >-
- Drops support for Envoy versions 1.15.x and 1.16.x
## Upgrading
For more detailed information, please refer to the [upgrade details page](/docs/upgrading/upgrade-specific#consul-1-11-0) and the changelogs.
## Changelogs

View File

@ -0,0 +1,54 @@
---
layout: docs
page_title: 1.12.x
description: >-
Consul release notes for version 1.12.x
---
# Consul 1.12.0
## Release Highlights
- **AWS IAM Auth Method**: Consul now provides an AWS IAM auth method that allows AWS IAM roles and users to authenticate with Consul to obtain ACL tokens. Refer to [AWS IAM Auth Method](/docs/security/acl/auth-methods/aws-iam) for detailed configuration information.
- **Per listener TLS Config**: It is now possible to configure TLS differently for each of Consul's listeners, such as HTTPS, gRPC, and the internal multiplexed RPC listener, using the `tls` stanza. Refer to [TLS Configuration Reference](/docs/agent/config/config-files#tls-configuration-reference) for more details.
- **AWS Lambda**: Adds the ability to invoke AWS Lambdas through terminating gateways, which allows for cross-datacenter communication, transparent proxy, and intentions with Consul Service Mesh. Refer to [AWS Lambda](/docs]/lambda) and [Invoke Lambda Functions](/docs/lambda/invocation) for more details.
- **Mesh-wide TLS min/max versions and cipher suites:** Using the [Mesh](/docs/connect/config-entries/mesh#tls) Config Entry or CRD, it is now possible to set TLS min/max versions and cipher suites for both inbound and outbound mTLS connections.
- **Expanded details for ACL Permission Denied errors**: Details are now provided when a permission denied errors surface for RPC calls. Details include the accessor ID of the ACL token, the missing permission, and any namespace or partition that the error occurred on.
- **ACL token read**: The `consul acl token read -rules` command now includes an `-expanded` option to display detailed info about any policies and rules affecting the token. Refer to [Consul ACL Token read](/commands/acl/token/read) for more details.
- **Automatically reload agent config when watching agent config file changes**: When using the `auto-reload-config` CLI flag or `auto_reload_config` agent config option, Consul now automatically reloads the [reloadable configuration options](/docs/agent/config#reloadable-configuration) when configuration files change. Refer to [auto_reload_config](/docs/agent/config/cli-flags#_auto_reload_config) for more details.
## What's Changed
- Removes support for Envoy 1.17.x and Envoy 1.18.x, and adds support for Envoy 1.21.x and Envoy 1.22.x. Refer to the [Envoy Compatibility matrix](/docs/connect/proxies/envoy) for more details.
- The `disable_compat_1.9` option now defaults to true. Metrics formatted in the style of version 1.9, such as `consul.http...`, can still be enabled by setting disable_compat_1.9 = false. However, these metrics will be removed in 1.13.
- The `agent_master` ACL token has been renamed to `agent_recovery` ACL token. In addition, the `consul acl set-agent-token master` command has been replaced with `consul acl set-agent-token recovery`. Refer to [ACL Agent Recovery Token](/docs/security/acl/acl-tokens#acl-agent-recovery-token) and [Consul ACL Set Agent Token](/commands/acl/set-agent-token) for more information.
- If TLS min versions and max versions are not specified, the TLS min/max versions default to the following values. For details on how to configure TLS min and max, refer to the [Mesh TLS config entry](/docs/connect/config-entries/mesh#tls) or CRD documentation.
- Incoming connections: TLS 1.2 for min0 version, TLS 1.3 for max version
- Outgoing connections: TLS 1.2 for both TLS min and TLS max versions.
## Upgrading
For more detailed information, please refer to the [upgrade details page](/docs/upgrading/upgrade-specific#consul-1-12-0) and the changelogs.
## Changelogs
The changelogs for this major release version and any maintenance versions are listed below.
-> **Note**: These links take you to the changelogs on the GitHub website.
- [1.12.0](https://github.com/hashicorp/consul/releases/tag/v1.12.0)
- [1.12.1](https://github.com/hashicorp/consul/releases/tag/v1.12.1)
- [1.12.2](https://github.com/hashicorp/consul/releases/tag/v1.12.2)
- [1.12.3](https://github.com/hashicorp/consul/releases/tag/v1.12.3)
- [1.12.4](https://github.com/hashicorp/consul/releases/tag/v1.12.4)

View File

@ -0,0 +1,44 @@
---
layout: docs
page_title: 1.13.x
description: >-
Consul release notes for version 1.13.x
---
# Consul 1.13.0
## Release Highlights
- **Cluster Peering (Beta)**: This version adds a new model to federate Consul clusters for both service mesh and traditional service discovery. Cluster peering allows for service interconnectivity with looser coupling than the existing WAN federation. For more information, refer to the [cluster peering](/docs/connect/cluster-peering) documentation.
- **Transparent proxying through terminating gateways**: This version adds egress traffic control to destinations outside of Consul's catalog, such as APIs on the public internet. Transparent proxies can dial [destinations defined in service-defaults](/docs/connect/config-entries/service-defaults#destination) and have the traffic routed through terminating gateways. For more information, refer to the [terminating gateway](/docs/connect/gateways/terminating-gateway#terminating-gateway-configuration) documentation.
- **Enables TLS on the Envoy Prometheus endpoint**: The Envoy prometheus endpoint can be enabled when `envoy_prometheus_bind_addr` is set and then secured over TLS using new CLI flags for the `consul connect envoy` command. These commands are: `-prometheus-ca-file`, `-prometheus-ca-path`, `-prometheus-cert-file` and `-prometheus-key-file`. The CA, cert, and key can be provided to Envoy by a Kubernetes mounted volume so that Envoy can watch the files and dynamically reload the certs when the volume is updated.
- **UDP Health Checks**: Adds the ability to register service discovery health checks that periodically send UDP datagrams to the specified IP/hostname and port. Refer to [UDP checks](/docs/discovery/checks#udp-interval).
## What's Changed
- Removes support for Envoy 1.19.x and adds suport for Envoy 1.23. Refer to the [Envoy Compatibility matrix](/docs/connect/proxies/envoy) for more details.
- The [`disable_compat_19`](/docs/agent/options#telemetry-disable_compat_1.9) telemetry configuration option is now removed. In Consul versions 1.10.x through 1.11.x, the config defaulted to `false`. In version 1.12.x it defaulted to `true`. Before upgrading you should remove this flag from your config if the flag is being used.
## Upgrading
For more detailed information, please refer to the [upgrade details page](/docs/upgrading/upgrade-specific#consul-1-13-0) and the changelogs.
## Known Issues
The following issues are know to exist in the 1.13.0 release:
- Consul 1.13.1 fixes a compatibility issue when restoring snapshots from pre-1.13.0 versions of Consul. Refer to GitHub issue [[GH-14149](https://github.com/hashicorp/consul/issues/14149)] for more details.
- Consul 1.13.0 and Consul 1.13.1 default to requiring TLS for gRPC communication with Envoy proxies when auto-encrypt and auto-config are enabled. In environments where Envoy proxies are not already configured to use TLS for gRPC, upgrading Consul 1.13 will cause Envoy proxies to disconnect from the control plane (Consul agents). A future patch release will default to disabling TLS by default for GRPC communication with Envoy proxies when using Service Mesh and auto-config or auto-encrypt. Refer to GitHub issue [GH-14253](https://github.com/hashicorp/consul/issues/14253) and [Service Mesh deployments using auto-config and auto-enrypt](https://www.consul.io/docs/upgrading/upgrade-specific#service-mesh-deployments-using-auto-encrypt-or-auto-config) for more details.
## Changelogs
The changelogs for this major release version and any maintenance versions are listed below.
-> **Note**: These links take you to the changelogs on the GitHub website.
- [1.13.0](https://github.com/hashicorp/consul/releases/tag/v1.13.0)
- [1.13.1](https://github.com/hashicorp/consul/releases/tag/v1.13.1)