Merge branch 'main' into jkirschner-hashicorp-patch-3

This commit is contained in:
Jared Kirschner 2022-06-15 00:06:40 -04:00 committed by GitHub
commit bd68f0f6f6
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
117 changed files with 3988 additions and 1370 deletions

4
.changelog/13357.txt Normal file
View file

@ -0,0 +1,4 @@
```release-note:feature
agent: Added information about build date alongside other version information for Consul. Extended /agent/self endpoint and `consul version` commands
to report this. Agent also reports build date in log on startup.
```

3
.changelog/13421.txt Normal file
View file

@ -0,0 +1,3 @@
```release-note:improvement
dns: Added support for specifying admin partition in node lookups.
```

3
.changelog/13431.txt Normal file
View file

@ -0,0 +1,3 @@
```release-note:improvement
connect: Update Envoy support matrix to latest patch releases (1.22.2, 1.21.3, 1.20.4, 1.19.5)
```

View file

@ -4,4 +4,7 @@ export GIT_COMMIT=$(git rev-parse --short HEAD)
export GIT_COMMIT_YEAR=$(git show -s --format=%cd --date=format:%Y HEAD)
export GIT_DIRTY=$(test -n "`git status --porcelain`" && echo "+CHANGES" || true)
export GIT_IMPORT=github.com/hashicorp/consul/version
export GOLDFLAGS="-X ${GIT_IMPORT}.GitCommit=${GIT_COMMIT}${GIT_DIRTY}"
# we're using this for build date because it's stable across platform builds
# the env -i and -noprofile are used to ensure we don't try to recursively call this profile when starting bash
export GIT_DATE=$(env -i /bin/bash --noprofile -norc ${CIRCLE_WORKING_DIRECTORY}/build-support/scripts/build-date.sh)
export GOLDFLAGS="-X ${GIT_IMPORT}.GitCommit=${GIT_COMMIT}${GIT_DIRTY} -X ${GIT_IMPORT}.BuildDate=${GIT_DATE}"

View file

@ -30,7 +30,7 @@ references:
ember: &EMBER_IMAGE docker.mirror.hashicorp.services/circleci/node:14-browsers
ubuntu: &UBUNTU_CI_IMAGE ubuntu-2004:202201-02
cache:
yarn: &YARN_CACHE_KEY consul-ui-v7-{{ checksum "ui/yarn.lock" }}
yarn: &YARN_CACHE_KEY consul-ui-v8-{{ checksum "ui/yarn.lock" }}
steps:
install-gotestsum: &install-gotestsum
@ -852,13 +852,13 @@ jobs:
path: *TEST_RESULTS_DIR
- run: *notify-slack-failure
envoy-integration-test-1_19_3: &ENVOY_TESTS
envoy-integration-test-1_19_5: &ENVOY_TESTS
machine:
image: *UBUNTU_CI_IMAGE
parallelism: 4
resource_class: medium
environment:
ENVOY_VERSION: "1.19.3"
ENVOY_VERSION: "1.19.5"
steps: &ENVOY_INTEGRATION_TEST_STEPS
- checkout
# Get go binary from workspace
@ -891,20 +891,20 @@ jobs:
path: *TEST_RESULTS_DIR
- run: *notify-slack-failure
envoy-integration-test-1_20_2:
envoy-integration-test-1_20_4:
<<: *ENVOY_TESTS
environment:
ENVOY_VERSION: "1.20.2"
ENVOY_VERSION: "1.20.4"
envoy-integration-test-1_21_1:
envoy-integration-test-1_21_3:
<<: *ENVOY_TESTS
environment:
ENVOY_VERSION: "1.21.1"
ENVOY_VERSION: "1.21.3"
envoy-integration-test-1_22_0:
envoy-integration-test-1_22_2:
<<: *ENVOY_TESTS
environment:
ENVOY_VERSION: "1.22.0"
ENVOY_VERSION: "1.22.2"
# run integration tests for the connect ca providers
test-connect-ca-providers:
@ -1131,16 +1131,16 @@ workflows:
- nomad-integration-0_8:
requires:
- dev-build
- envoy-integration-test-1_19_3:
- envoy-integration-test-1_19_5:
requires:
- dev-build
- envoy-integration-test-1_20_2:
- envoy-integration-test-1_20_4:
requires:
- dev-build
- envoy-integration-test-1_21_1:
- envoy-integration-test-1_21_3:
requires:
- dev-build
- envoy-integration-test-1_22_0:
- envoy-integration-test-1_22_2:
requires:
- dev-build
- compatibility-integration-test:

View file

@ -6,7 +6,7 @@ set -uo pipefail
### It is still up to the reviewer to make sure that any tests added are needed and meaningful.
# search for any "new" or modified metric emissions
metrics_modified=$(git --no-pager diff HEAD origin/main | grep -i "SetGauge\|EmitKey\|IncrCounter\|AddSample\|MeasureSince\|UpdateFilter")
metrics_modified=$(git --no-pager diff origin/main...HEAD | grep -i "SetGauge\|EmitKey\|IncrCounter\|AddSample\|MeasureSince\|UpdateFilter")
# search for PR body or title metric references
metrics_in_pr_body=$(echo "${PR_BODY-""}" | grep -i "metric")
metrics_in_pr_title=$(echo "${PR_TITLE-""}" | grep -i "metric")

View file

@ -15,6 +15,7 @@ jobs:
runs-on: ubuntu-latest
outputs:
product-version: ${{ steps.get-product-version.outputs.product-version }}
product-date: ${{ steps.get-product-version.outputs.product-date }}
pre-version: ${{ steps.get-product-version.outputs.pre-version }}
pkg-version: ${{ steps.get-product-version.outputs.pkg-version }}
shared-ldflags: ${{ steps.shared-ldflags.outputs.shared-ldflags }}
@ -24,6 +25,7 @@ jobs:
id: get-product-version
run: |
CONSUL_VERSION=$(build-support/scripts/version.sh -r)
CONSUL_DATE=$(build-support/scripts/build-date.sh)
## TODO: This assumes `make version` outputs 1.1.1+ent-prerel
IFS="+" read VERSION _other <<< "$CONSUL_VERSION"
IFS="-" read _other PREREL_VERSION <<< "$CONSUL_VERSION"
@ -32,12 +34,15 @@ jobs:
## [version]{-prerelease}+ent before then, we'll need to add
## logic to handle presense/absence of the prerelease
echo "::set-output name=product-version::${CONSUL_VERSION}"
echo "::set-output name=product-date::${CONSUL_DATE}"
echo "::set-output name=pre-version::${PREREL_VERSION}"
echo "::set-output name=pkg-version::${VERSION}"
- name: Set shared -ldflags
id: shared-ldflags
run: echo "::set-output name=shared-ldflags::-X github.com/hashicorp/consul/version.GitCommit=${GITHUB_SHA::8} -X github.com/hashicorp/consul/version.GitDescribe=${{ steps.get-product-version.outputs.product-version }}"
run: |
T="github.com/hashicorp/consul/version"
echo "::set-output name=shared-ldflags::-X ${T}.GitCommit=${GITHUB_SHA::8} -X ${T}.GitDescribe=${{ steps.get-product-version.outputs.product-version }} -X ${T}.BuildDate=${{ steps.get-product-version.outputs.product-date }}"
generate-metadata-file:
needs: get-product-version
@ -95,9 +100,11 @@ jobs:
- name: Build UI
run: |
CONSUL_VERSION=${{ needs.get-product-version.outputs.product-version }}
CONSUL_DATE=${{ needs.get-product-version.outputs.product-date }}
CONSUL_BINARY_TYPE=${CONSUL_BINARY_TYPE}
CONSUL_COPYRIGHT_YEAR=$(git show -s --format=%cd --date=format:%Y HEAD)
echo "consul_version is ${CONSUL_VERSION}"
echo "consul_date is ${CONSUL_DATE}"
echo "consul binary type is ${CONSUL_BINARY_TYPE}"
echo "consul copyright year is ${CONSUL_COPYRIGHT_YEAR}"
cd ui && make && cd ..

View file

@ -25,7 +25,9 @@ GIT_COMMIT?=$(shell git rev-parse --short HEAD)
GIT_COMMIT_YEAR?=$(shell git show -s --format=%cd --date=format:%Y HEAD)
GIT_DIRTY?=$(shell test -n "`git status --porcelain`" && echo "+CHANGES" || true)
GIT_IMPORT=github.com/hashicorp/consul/version
GOLDFLAGS=-X $(GIT_IMPORT).GitCommit=$(GIT_COMMIT)$(GIT_DIRTY)
DATE_FORMAT="%Y-%m-%dT%H:%M:%SZ" # it's tricky to do an RFC3339 format in a cross platform way, so we hardcode UTC
GIT_DATE=$(shell $(CURDIR)/build-support/scripts/build-date.sh) # we're using this for build date because it's stable across platform builds
GOLDFLAGS=-X $(GIT_IMPORT).GitCommit=$(GIT_COMMIT)$(GIT_DIRTY) -X $(GIT_IMPORT).BuildDate=$(GIT_DATE)
ifeq ($(FORCE_REBUILD),1)
NOCACHE=--no-cache

View file

@ -91,6 +91,7 @@ func (s *HTTPHandlers) AgentSelf(resp http.ResponseWriter, req *http.Request) (i
Revision string
Server bool
Version string
BuildDate string
}{
Datacenter: s.agent.config.Datacenter,
PrimaryDatacenter: s.agent.config.PrimaryDatacenter,
@ -100,8 +101,10 @@ func (s *HTTPHandlers) AgentSelf(resp http.ResponseWriter, req *http.Request) (i
Revision: s.agent.config.Revision,
Server: s.agent.config.ServerMode,
// We expect the ent version to be part of the reported version string, and that's now part of the metadata, not the actual version.
Version: s.agent.config.VersionWithMetadata(),
Version: s.agent.config.VersionWithMetadata(),
BuildDate: s.agent.config.BuildDate.Format(time.RFC3339),
}
return Self{
Config: config,
DebugConfig: s.agent.config.Sanitized(),

View file

@ -804,6 +804,8 @@ func (b *builder) build() (rt RuntimeConfig, err error) {
Version: stringVal(c.Version),
VersionPrerelease: stringVal(c.VersionPrerelease),
VersionMetadata: stringVal(c.VersionMetadata),
// What is a sensible default for BuildDate?
BuildDate: timeValWithDefault(c.BuildDate, time.Date(1970, 1, 00, 00, 00, 01, 0, time.UTC)),
// consul configuration
ConsulCoordinateUpdateBatchSize: intVal(c.Consul.Coordinate.UpdateBatchSize),
@ -1946,6 +1948,13 @@ func stringVal(v *string) string {
return *v
}
func timeValWithDefault(v *time.Time, defaultVal time.Time) time.Time {
if v == nil {
return defaultVal
}
return *v
}
func float64ValWithDefault(v *float64, defaultVal float64) float64 {
if v == nil {
return defaultVal

View file

@ -3,6 +3,7 @@ package config
import (
"encoding/json"
"fmt"
"time"
"github.com/hashicorp/consul/agent/consul"
@ -261,18 +262,19 @@ type Config struct {
SnapshotAgent map[string]interface{} `mapstructure:"snapshot_agent"`
// non-user configurable values
AEInterval *string `mapstructure:"ae_interval"`
CheckDeregisterIntervalMin *string `mapstructure:"check_deregister_interval_min"`
CheckReapInterval *string `mapstructure:"check_reap_interval"`
Consul Consul `mapstructure:"consul"`
Revision *string `mapstructure:"revision"`
SegmentLimit *int `mapstructure:"segment_limit"`
SegmentNameLimit *int `mapstructure:"segment_name_limit"`
SyncCoordinateIntervalMin *string `mapstructure:"sync_coordinate_interval_min"`
SyncCoordinateRateTarget *float64 `mapstructure:"sync_coordinate_rate_target"`
Version *string `mapstructure:"version"`
VersionPrerelease *string `mapstructure:"version_prerelease"`
VersionMetadata *string `mapstructure:"version_metadata"`
AEInterval *string `mapstructure:"ae_interval"`
CheckDeregisterIntervalMin *string `mapstructure:"check_deregister_interval_min"`
CheckReapInterval *string `mapstructure:"check_reap_interval"`
Consul Consul `mapstructure:"consul"`
Revision *string `mapstructure:"revision"`
SegmentLimit *int `mapstructure:"segment_limit"`
SegmentNameLimit *int `mapstructure:"segment_name_limit"`
SyncCoordinateIntervalMin *string `mapstructure:"sync_coordinate_interval_min"`
SyncCoordinateRateTarget *float64 `mapstructure:"sync_coordinate_rate_target"`
Version *string `mapstructure:"version"`
VersionPrerelease *string `mapstructure:"version_prerelease"`
VersionMetadata *string `mapstructure:"version_metadata"`
BuildDate *time.Time `mapstructure:"build_date"`
// Enterprise Only
Audit Audit `mapstructure:"audit"`

View file

@ -2,6 +2,7 @@ package config
import (
"strconv"
"time"
"github.com/hashicorp/raft"
@ -197,8 +198,8 @@ func NonUserSource() Source {
# SegmentNameLimit is the maximum segment name length.
segment_name_limit = 64
connect = {
connect = {
# 0s causes the value to be ignored and operate without capping
# the max time before leaf certs can be generated after a roots change.
test_ca_leaf_root_change_spread = "0s"
@ -210,7 +211,7 @@ func NonUserSource() Source {
// versionSource creates a config source for the version parameters.
// This should be merged in the tail since these values are not
// user configurable.
func versionSource(rev, ver, verPre, meta string) Source {
func versionSource(rev, ver, verPre, meta string, buildDate time.Time) Source {
return LiteralSource{
Name: "version",
Config: Config{
@ -218,6 +219,7 @@ func versionSource(rev, ver, verPre, meta string) Source {
Version: &ver,
VersionPrerelease: &verPre,
VersionMetadata: &meta,
BuildDate: &buildDate,
},
}
}
@ -225,7 +227,8 @@ func versionSource(rev, ver, verPre, meta string) Source {
// defaultVersionSource returns the version config source for the embedded
// version numbers.
func defaultVersionSource() Source {
return versionSource(version.GitCommit, version.Version, version.VersionPrerelease, version.VersionMetadata)
buildDate, _ := time.Parse(time.RFC3339, version.BuildDate) // This has been checked elsewhere
return versionSource(version.GitCommit, version.Version, version.VersionPrerelease, version.VersionMetadata, buildDate)
}
// DefaultConsulSource returns the default configuration for the consul agent.

View file

@ -62,6 +62,7 @@ type RuntimeConfig struct {
Version string
VersionPrerelease string
VersionMetadata string
BuildDate time.Time
// consul config
ConsulCoordinateUpdateMaxBatches int
@ -1700,6 +1701,10 @@ func sanitize(name string, v reflect.Value) reflect.Value {
x := v.Interface().(time.Duration)
return reflect.ValueOf(x.String())
case isTime(typ):
x := v.Interface().(time.Time)
return reflect.ValueOf(x.String())
case isString(typ):
if strings.HasPrefix(name, "RetryJoinLAN[") || strings.HasPrefix(name, "RetryJoinWAN[") {
x := v.Interface().(string)
@ -1771,6 +1776,7 @@ func sanitize(name string, v reflect.Value) reflect.Value {
}
func isDuration(t reflect.Type) bool { return t == reflect.TypeOf(time.Second) }
func isTime(t reflect.Type) bool { return t == reflect.TypeOf(time.Time{}) }
func isMap(t reflect.Type) bool { return t.Kind() == reflect.Map }
func isNetAddr(t reflect.Type) bool { return t.Implements(reflect.TypeOf((*net.Addr)(nil)).Elem()) }
func isPtr(t reflect.Type) bool { return t.Kind() == reflect.Ptr }

View file

@ -5661,6 +5661,7 @@ func TestLoad_FullConfig(t *testing.T) {
Version: "R909Hblt",
VersionPrerelease: "ZT1JOQLn",
VersionMetadata: "GtTCa13",
BuildDate: time.Date(2019, 11, 20, 5, 0, 0, 0, time.UTC),
// consul configuration
ConsulCoordinateUpdateBatchSize: 128,
@ -6447,7 +6448,8 @@ func TestLoad_FullConfig(t *testing.T) {
ConfigFiles: []string{"testdata/full-config." + format},
HCL: []string{fmt.Sprintf(`data_dir = "%s"`, dataDir)},
}
opts.Overrides = append(opts.Overrides, versionSource("JNtPSav3", "R909Hblt", "ZT1JOQLn", "GtTCa13"))
opts.Overrides = append(opts.Overrides, versionSource("JNtPSav3", "R909Hblt", "ZT1JOQLn", "GtTCa13",
time.Date(2019, 11, 20, 5, 0, 0, 0, time.UTC)))
r, err := Load(opts)
require.NoError(t, err)
prototest.AssertDeepEqual(t, expected, r.RuntimeConfig)
@ -6641,6 +6643,7 @@ func parseCIDR(t *testing.T, cidr string) *net.IPNet {
func TestRuntimeConfig_Sanitize(t *testing.T) {
rt := RuntimeConfig{
BindAddr: &net.IPAddr{IP: net.ParseIP("127.0.0.1")},
BuildDate: time.Date(2019, 11, 20, 5, 0, 0, 0, time.UTC),
CheckOutputMaxSize: checks.DefaultBufSize,
SerfAdvertiseAddrLAN: &net.TCPAddr{IP: net.ParseIP("1.2.3.4"), Port: 5678},
DNSAddrs: []net.Addr{

View file

@ -76,6 +76,7 @@
"BindAddr": "127.0.0.1",
"Bootstrap": false,
"BootstrapExpect": 0,
"BuildDate": "2019-11-20 05:00:00 +0000 UTC",
"Cache": {
"EntryFetchMaxBurst": 42,
"EntryFetchRate": 0.334,

View file

@ -1,6 +1,7 @@
package connect
import (
"fmt"
"net/url"
"github.com/hashicorp/consul/acl"
@ -23,10 +24,6 @@ func (id SpiffeIDService) MatchesPartition(partition string) bool {
return id.PartitionOrDefault() == acl.PartitionOrDefault(partition)
}
func (id SpiffeIDService) PartitionOrDefault() string {
return acl.PartitionOrDefault(id.Partition)
}
// URI returns the *url.URL for this SPIFFE ID.
func (id SpiffeIDService) URI() *url.URL {
var result url.URL
@ -35,3 +32,20 @@ func (id SpiffeIDService) URI() *url.URL {
result.Path = id.uriPath()
return &result
}
func (id SpiffeIDService) uriPath() string {
path := fmt.Sprintf("/ns/%s/dc/%s/svc/%s",
id.NamespaceOrDefault(),
id.Datacenter,
id.Service,
)
// Although OSS has no support for partitions, it still needs to be able to
// handle exportedPartition from peered Consul Enterprise clusters in order
// to generate the correct SpiffeID.
// We intentionally avoid using pbpartition.DefaultName here to be OSS friendly.
if ap := id.PartitionOrDefault(); ap != "" && ap != "default" {
return "/ap/" + ap + path
}
return path
}

View file

@ -4,7 +4,7 @@
package connect
import (
"fmt"
"strings"
"github.com/hashicorp/consul/acl"
)
@ -15,10 +15,14 @@ func (id SpiffeIDService) GetEnterpriseMeta() *acl.EnterpriseMeta {
return &acl.EnterpriseMeta{}
}
func (id SpiffeIDService) uriPath() string {
return fmt.Sprintf("/ns/%s/dc/%s/svc/%s",
id.NamespaceOrDefault(),
id.Datacenter,
id.Service,
)
// PartitionOrDefault breaks from OSS's pattern of returning empty strings.
// Although OSS has no support for partitions, it still needs to be able to
// handle exportedPartition from peered Consul Enterprise clusters in order
// to generate the correct SpiffeID.
func (id SpiffeIDService) PartitionOrDefault() string {
if id.Partition == "" {
return "default"
}
return strings.ToLower(id.Partition)
}

View file

@ -19,16 +19,6 @@ func TestSpiffeIDServiceURI(t *testing.T) {
require.Equal(t, "spiffe://1234.consul/ns/default/dc/dc1/svc/web", svc.URI().String())
})
t.Run("partitions are ignored", func(t *testing.T) {
svc := &SpiffeIDService{
Host: "1234.consul",
Partition: "other",
Datacenter: "dc1",
Service: "web",
}
require.Equal(t, "spiffe://1234.consul/ns/default/dc/dc1/svc/web", svc.URI().String())
})
t.Run("namespaces are ignored", func(t *testing.T) {
svc := &SpiffeIDService{
Host: "1234.consul",

View file

@ -57,6 +57,12 @@ func (s *Server) revokeEnterpriseLeadership() error {
return nil
}
func (s *Server) startTenancyDeferredDeletion(ctx context.Context) {
}
func (s *Server) stopTenancyDeferredDeletion() {
}
func (s *Server) validateEnterpriseRequest(entMeta *acl.EnterpriseMeta, write bool) error {
return nil
}

View file

@ -47,6 +47,9 @@ var LeaderSummaries = []prometheus.SummaryDefinition{
const (
newLeaderEvent = "consul:new-leader"
barrierWriteTimeout = 2 * time.Minute
defaultDeletionRoundBurst int = 5 // number replication round bursts
defaultDeletionApplyRate rate.Limit = 10 // raft applies per second
)
var (
@ -313,6 +316,8 @@ func (s *Server) establishLeadership(ctx context.Context) error {
s.startPeeringStreamSync(ctx)
s.startDeferredDeletion(ctx)
if err := s.startConnectLeader(ctx); err != nil {
return err
}
@ -751,6 +756,16 @@ func (s *Server) stopACLReplication() {
s.leaderRoutineManager.Stop(aclTokenReplicationRoutineName)
}
func (s *Server) startDeferredDeletion(ctx context.Context) {
s.startPeeringDeferredDeletion(ctx)
s.startTenancyDeferredDeletion(ctx)
}
func (s *Server) stopDeferredDeletion() {
s.leaderRoutineManager.Stop(peeringDeletionRoutineName)
s.stopTenancyDeferredDeletion()
}
func (s *Server) startConfigReplication(ctx context.Context) {
if s.config.PrimaryDatacenter == "" || s.config.PrimaryDatacenter == s.config.Datacenter {
// replication shouldn't run in the primary DC

View file

@ -12,12 +12,17 @@ import (
"github.com/hashicorp/go-memdb"
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/go-uuid"
"golang.org/x/time/rate"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/consul/state"
"github.com/hashicorp/consul/agent/pool"
"github.com/hashicorp/consul/agent/rpc/peering"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/logging"
"github.com/hashicorp/consul/proto/pbpeering"
)
@ -114,18 +119,24 @@ func (s *Server) syncPeeringsAndBlock(ctx context.Context, logger hclog.Logger,
for _, peer := range peers {
logger.Trace("evaluating stored peer", "peer", peer.Name, "should_dial", peer.ShouldDial(), "sequence_id", seq)
if !peer.ShouldDial() {
if !peer.IsActive() {
// The peering was marked for deletion by ourselves or our peer, no need to dial or track them.
continue
}
// TODO(peering) Account for deleted peers that are still in the state store
// Track all active peerings,since the reconciliation loop below applies to the token generator as well.
stored[peer.ID] = struct{}{}
if !peer.ShouldDial() {
// We do not need to dial peerings where we generated the peering token.
continue
}
status, found := s.peeringService.StreamStatus(peer.ID)
// TODO(peering): If there is new peering data and a connected stream, should we tear down the stream?
// If the data in the updated token is bad, the user wouldn't know until the old servers/certs become invalid.
// Alternatively we could do a basic Ping from the initiate peering endpoint to avoid dealing with that here.
// Alternatively we could do a basic Ping from the establish peering endpoint to avoid dealing with that here.
if found && status.Connected {
// Nothing to do when we already have an active stream to the peer.
continue
@ -179,6 +190,8 @@ func (s *Server) syncPeeringsAndBlock(ctx context.Context, logger hclog.Logger,
}
func (s *Server) establishStream(ctx context.Context, logger hclog.Logger, peer *pbpeering.Peering, cancelFns map[string]context.CancelFunc) error {
logger = logger.With("peer_name", peer.Name, "peer_id", peer.ID)
tlsOption := grpc.WithInsecure()
if len(peer.PeerCAPems) > 0 {
var haveCerts bool
@ -208,7 +221,7 @@ func (s *Server) establishStream(ctx context.Context, logger hclog.Logger, peer
buffer = buffer.Next()
}
logger.Trace("establishing stream to peer", "peer_id", peer.ID)
logger.Trace("establishing stream to peer")
retryCtx, cancel := context.WithCancel(ctx)
cancelFns[peer.ID] = cancel
@ -224,7 +237,7 @@ func (s *Server) establishStream(ctx context.Context, logger hclog.Logger, peer
return fmt.Errorf("peer server address type %T is not a string", buffer.Value)
}
logger.Trace("dialing peer", "peer_id", peer.ID, "addr", addr)
logger.Trace("dialing peer", "addr", addr)
conn, err := grpc.DialContext(retryCtx, addr,
grpc.WithContextDialer(newPeerDialer(addr)),
grpc.WithBlock(),
@ -241,16 +254,23 @@ func (s *Server) establishStream(ctx context.Context, logger hclog.Logger, peer
return err
}
err = s.peeringService.HandleStream(peering.HandleStreamRequest{
streamReq := peering.HandleStreamRequest{
LocalID: peer.ID,
RemoteID: peer.PeerID,
PeerName: peer.Name,
Partition: peer.Partition,
Stream: stream,
})
}
err = s.peeringService.HandleStream(streamReq)
// A nil error indicates that the peering was deleted and the stream needs to be gracefully shutdown.
if err == nil {
stream.CloseSend()
s.peeringService.DrainStream(streamReq)
// This will cancel the retry-er context, letting us break out of this loop when we want to shut down the stream.
cancel()
logger.Info("closed outbound stream")
}
return err
@ -282,3 +302,156 @@ func newPeerDialer(peerAddr string) func(context.Context, string) (net.Conn, err
return conn, nil
}
}
func (s *Server) startPeeringDeferredDeletion(ctx context.Context) {
s.leaderRoutineManager.Start(ctx, peeringDeletionRoutineName, s.runPeeringDeletions)
}
// runPeeringDeletions watches for peerings marked for deletions and then cleans up data for them.
func (s *Server) runPeeringDeletions(ctx context.Context) error {
logger := s.loggers.Named(logging.Peering)
// This limiter's purpose is to control the rate of raft applies caused by the deferred deletion
// process. This includes deletion of the peerings themselves in addition to any peering data
raftLimiter := rate.NewLimiter(defaultDeletionApplyRate, int(defaultDeletionApplyRate))
for {
ws := memdb.NewWatchSet()
state := s.fsm.State()
_, peerings, err := s.fsm.State().PeeringListDeleted(ws)
if err != nil {
logger.Warn("encountered an error while searching for deleted peerings", "error", err)
continue
}
if len(peerings) == 0 {
ws.Add(state.AbandonCh())
// wait for a peering to be deleted or the routine to be cancelled
if err := ws.WatchCtx(ctx); err != nil {
return err
}
continue
}
for _, p := range peerings {
s.removePeeringAndData(ctx, logger, raftLimiter, p)
}
}
}
// removepPeeringAndData removes data imported for a peering and the peering itself.
func (s *Server) removePeeringAndData(ctx context.Context, logger hclog.Logger, limiter *rate.Limiter, peer *pbpeering.Peering) {
logger = logger.With("peer_name", peer.Name, "peer_id", peer.ID)
entMeta := *structs.NodeEnterpriseMetaInPartition(peer.Partition)
// First delete all imported data.
// By deleting all imported nodes we also delete all services and checks registered on them.
if err := s.deleteAllNodes(ctx, limiter, entMeta, peer.Name); err != nil {
logger.Error("Failed to remove Nodes for peer", "error", err)
return
}
if err := s.deleteTrustBundleFromPeer(ctx, limiter, entMeta, peer.Name); err != nil {
logger.Error("Failed to remove trust bundle for peer", "error", err)
return
}
if err := limiter.Wait(ctx); err != nil {
return
}
if peer.State == pbpeering.PeeringState_TERMINATED {
// For peerings terminated by our peer we only clean up the local data, we do not delete the peering itself.
// This is to avoid a situation where the peering disappears without the local operator's knowledge.
return
}
// Once all imported data is deleted, the peering itself is also deleted.
req := &pbpeering.PeeringDeleteRequest{
Name: peer.Name,
Partition: acl.PartitionOrDefault(peer.Partition),
}
_, err := s.raftApplyProtobuf(structs.PeeringDeleteType, req)
if err != nil {
logger.Error("failed to apply full peering deletion", "error", err)
return
}
}
// deleteAllNodes will delete all nodes in a partition or all nodes imported from a given peer name.
func (s *Server) deleteAllNodes(ctx context.Context, limiter *rate.Limiter, entMeta acl.EnterpriseMeta, peerName string) error {
// Same as ACL batch upsert size
nodeBatchSizeBytes := 256 * 1024
_, nodes, err := s.fsm.State().NodeDump(nil, &entMeta, peerName)
if err != nil {
return err
}
if len(nodes) == 0 {
return nil
}
i := 0
for {
var ops structs.TxnOps
for batchSize := 0; batchSize < nodeBatchSizeBytes && i < len(nodes); i++ {
entry := nodes[i]
op := structs.TxnOp{
Node: &structs.TxnNodeOp{
Verb: api.NodeDelete,
Node: structs.Node{
Node: entry.Node,
Partition: entry.Partition,
PeerName: entry.PeerName,
},
},
}
ops = append(ops, &op)
// Add entries to the transaction until it reaches the max batch size
batchSize += len(entry.Node) + len(entry.Partition) + len(entry.PeerName)
}
// Send each batch as a TXN Req to avoid sending one at a time
req := structs.TxnRequest{
Datacenter: s.config.Datacenter,
Ops: ops,
}
if len(req.Ops) > 0 {
if err := limiter.Wait(ctx); err != nil {
return err
}
_, err := s.raftApplyMsgpack(structs.TxnRequestType, &req)
if err != nil {
return err
}
} else {
break
}
}
return nil
}
// deleteTrustBundleFromPeer deletes the trust bundle imported from a peer, if present.
func (s *Server) deleteTrustBundleFromPeer(ctx context.Context, limiter *rate.Limiter, entMeta acl.EnterpriseMeta, peerName string) error {
_, bundle, err := s.fsm.State().PeeringTrustBundleRead(nil, state.Query{Value: peerName, EnterpriseMeta: entMeta})
if err != nil {
return err
}
if bundle == nil {
return nil
}
if err := limiter.Wait(ctx); err != nil {
return err
}
req := &pbpeering.PeeringTrustBundleDeleteRequest{
Name: peerName,
Partition: entMeta.PartitionOrDefault(),
}
_, err = s.raftApplyProtobuf(structs.PeeringTrustBundleDeleteType, req)
return err
}

View file

@ -7,6 +7,8 @@ import (
"testing"
"time"
"github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/api"
"github.com/stretchr/testify/require"
"google.golang.org/grpc"
@ -88,10 +90,12 @@ func TestLeader_PeeringSync_Lifecycle_ClientDeletion(t *testing.T) {
require.True(r, status.Connected)
})
// Delete the peering to trigger the termination sequence
require.NoError(t, s2.fsm.State().PeeringDelete(2000, state.Query{
Value: "my-peer-s1",
}))
// Delete the peering to trigger the termination sequence.
deleted := &pbpeering.Peering{
Name: "my-peer-s1",
DeletedAt: structs.TimeToProto(time.Now()),
}
require.NoError(t, s2.fsm.State().PeeringWrite(2000, deleted))
s2.logger.Trace("deleted peering for my-peer-s1")
retry.Run(t, func(r *retry.R) {
@ -175,10 +179,12 @@ func TestLeader_PeeringSync_Lifecycle_ServerDeletion(t *testing.T) {
require.True(r, status.Connected)
})
// Delete the peering from the server peer to trigger the termination sequence
require.NoError(t, s1.fsm.State().PeeringDelete(2000, state.Query{
Value: "my-peer-s2",
}))
// Delete the peering from the server peer to trigger the termination sequence.
deleted := &pbpeering.Peering{
Name: "my-peer-s2",
DeletedAt: structs.TimeToProto(time.Now()),
}
require.NoError(t, s1.fsm.State().PeeringWrite(2000, deleted))
s2.logger.Trace("deleted peering for my-peer-s1")
retry.Run(t, func(r *retry.R) {
@ -186,7 +192,7 @@ func TestLeader_PeeringSync_Lifecycle_ServerDeletion(t *testing.T) {
require.False(r, found)
})
// s2 should have received the termination message and updated the peering state
// s2 should have received the termination message and updated the peering state.
retry.Run(t, func(r *retry.R) {
_, peering, err := s2.fsm.State().PeeringRead(nil, state.Query{
Value: "my-peer-s1",
@ -195,3 +201,159 @@ func TestLeader_PeeringSync_Lifecycle_ServerDeletion(t *testing.T) {
require.Equal(r, pbpeering.PeeringState_TERMINATED, peering.State)
})
}
func TestLeader_Peering_DeferredDeletion(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")
}
// TODO(peering): Configure with TLS
_, s1 := testServerWithConfig(t, func(c *Config) {
c.NodeName = "s1.dc1"
c.Datacenter = "dc1"
c.TLSConfig.Domain = "consul"
})
testrpc.WaitForLeader(t, s1.RPC, "dc1")
var (
peerName = "my-peer-s2"
defaultMeta = acl.DefaultEnterpriseMeta()
lastIdx = uint64(0)
)
// Simulate a peering initiation event by writing a peering to the state store.
lastIdx++
require.NoError(t, s1.fsm.State().PeeringWrite(lastIdx, &pbpeering.Peering{
Name: peerName,
}))
// Insert imported data: nodes, services, checks, trust bundle
lastIdx = insertTestPeeringData(t, s1.fsm.State(), peerName, lastIdx)
// Mark the peering for deletion to trigger the termination sequence.
lastIdx++
require.NoError(t, s1.fsm.State().PeeringWrite(lastIdx, &pbpeering.Peering{
Name: peerName,
DeletedAt: structs.TimeToProto(time.Now()),
}))
// Ensure imported data is gone:
retry.Run(t, func(r *retry.R) {
_, csn, err := s1.fsm.State().ServiceDump(nil, "", false, defaultMeta, peerName)
require.NoError(r, err)
require.Len(r, csn, 0)
_, checks, err := s1.fsm.State().ChecksInState(nil, api.HealthAny, defaultMeta, peerName)
require.NoError(r, err)
require.Len(r, checks, 0)
_, nodes, err := s1.fsm.State().NodeDump(nil, defaultMeta, peerName)
require.NoError(r, err)
require.Len(r, nodes, 0)
_, tb, err := s1.fsm.State().PeeringTrustBundleRead(nil, state.Query{Value: peerName})
require.NoError(r, err)
require.Nil(r, tb)
})
// The leader routine should pick up the deletion and finish deleting the peering.
retry.Run(t, func(r *retry.R) {
_, peering, err := s1.fsm.State().PeeringRead(nil, state.Query{
Value: peerName,
})
require.NoError(r, err)
require.Nil(r, peering)
})
}
func insertTestPeeringData(t *testing.T, store *state.Store, peer string, lastIdx uint64) uint64 {
lastIdx++
require.NoError(t, store.PeeringTrustBundleWrite(lastIdx, &pbpeering.PeeringTrustBundle{
TrustDomain: "952e6bd1-f4d6-47f7-83ff-84b31babaa17",
PeerName: peer,
RootPEMs: []string{"certificate bundle"},
}))
lastIdx++
require.NoError(t, store.EnsureRegistration(lastIdx, &structs.RegisterRequest{
Node: "aaa",
Address: "10.0.0.1",
PeerName: peer,
Service: &structs.NodeService{
Service: "a-service",
ID: "a-service-1",
Port: 8080,
PeerName: peer,
},
Checks: structs.HealthChecks{
{
CheckID: "a-service-1-check",
ServiceName: "a-service",
ServiceID: "a-service-1",
Node: "aaa",
PeerName: peer,
},
{
CheckID: structs.SerfCheckID,
Node: "aaa",
PeerName: peer,
},
},
}))
lastIdx++
require.NoError(t, store.EnsureRegistration(lastIdx, &structs.RegisterRequest{
Node: "bbb",
Address: "10.0.0.2",
PeerName: peer,
Service: &structs.NodeService{
Service: "b-service",
ID: "b-service-1",
Port: 8080,
PeerName: peer,
},
Checks: structs.HealthChecks{
{
CheckID: "b-service-1-check",
ServiceName: "b-service",
ServiceID: "b-service-1",
Node: "bbb",
PeerName: peer,
},
{
CheckID: structs.SerfCheckID,
Node: "bbb",
PeerName: peer,
},
},
}))
lastIdx++
require.NoError(t, store.EnsureRegistration(lastIdx, &structs.RegisterRequest{
Node: "ccc",
Address: "10.0.0.3",
PeerName: peer,
Service: &structs.NodeService{
Service: "c-service",
ID: "c-service-1",
Port: 8080,
PeerName: peer,
},
Checks: structs.HealthChecks{
{
CheckID: "c-service-1-check",
ServiceName: "c-service",
ServiceID: "c-service-1",
Node: "ccc",
PeerName: peer,
},
{
CheckID: structs.SerfCheckID,
Node: "ccc",
PeerName: peer,
},
},
}))
return lastIdx
}

View file

@ -148,11 +148,6 @@ func (a *peeringApply) PeeringWrite(req *pbpeering.PeeringWriteRequest) error {
return err
}
func (a *peeringApply) PeeringDelete(req *pbpeering.PeeringDeleteRequest) error {
_, err := a.srv.raftApplyProtobuf(structs.PeeringDeleteType, req)
return err
}
// TODO(peering): This needs RPC metrics interceptor since it's not triggered by an RPC.
func (a *peeringApply) PeeringTerminateByID(req *pbpeering.PeeringTerminateByIDRequest) error {
_, err := a.srv.raftApplyProtobuf(structs.PeeringTerminateByIDType, req)
@ -169,5 +164,10 @@ func (a *peeringApply) CatalogRegister(req *structs.RegisterRequest) error {
return err
}
func (a *peeringApply) CatalogDeregister(req *structs.DeregisterRequest) error {
_, err := a.srv.leaderRaftApply("Catalog.Deregister", structs.DeregisterRequestType, req)
return err
}
var _ peering.Apply = (*peeringApply)(nil)
var _ peering.LeaderAddress = (*leaderAddr)(nil)

View file

@ -126,6 +126,7 @@ const (
backgroundCAInitializationRoutineName = "CA initialization"
virtualIPCheckRoutineName = "virtual IP version check"
peeringStreamsRoutineName = "streaming peering resources"
peeringDeletionRoutineName = "peering deferred deletion"
)
var (

View file

@ -42,6 +42,15 @@ func peeringTableSchema() *memdb.TableSchema {
prefixIndex: prefixIndexFromQueryNoNamespace,
},
},
indexDeleted: {
Name: indexDeleted,
AllowMissing: false,
Unique: false,
Indexer: indexerSingle{
readIndex: indexDeletedFromBoolQuery,
writeIndex: indexDeletedFromPeering,
},
},
},
}
}
@ -82,6 +91,17 @@ func indexIDFromPeering(raw interface{}) ([]byte, error) {
return b.Bytes(), nil
}
func indexDeletedFromPeering(raw interface{}) ([]byte, error) {
p, ok := raw.(*pbpeering.Peering)
if !ok {
return nil, fmt.Errorf("unexpected type %T for *pbpeering.Peering index", raw)
}
var b indexBuilder
b.Bool(!p.IsActive())
return b.Bytes(), nil
}
func (s *Store) PeeringReadByID(ws memdb.WatchSet, id string) (uint64, *pbpeering.Peering, error) {
tx := s.db.ReadTxn()
defer tx.Abort()
@ -205,10 +225,19 @@ func (s *Store) PeeringWrite(idx uint64, p *pbpeering.Peering) error {
}
if existing != nil {
// Prevent modifications to Peering marked for deletion
if !existing.IsActive() {
return fmt.Errorf("cannot write to peering that is marked for deletion")
}
p.CreateIndex = existing.CreateIndex
p.ID = existing.ID
} else {
if !p.IsActive() {
return fmt.Errorf("cannot create a new peering marked for deletion")
}
// TODO(peering): consider keeping PeeringState enum elsewhere?
p.State = pbpeering.PeeringState_INITIAL
p.CreateIndex = idx
@ -230,8 +259,6 @@ func (s *Store) PeeringWrite(idx uint64, p *pbpeering.Peering) error {
return tx.Commit()
}
// TODO(peering): replace with deferred deletion since this operation
// should involve cleanup of data associated with the peering.
func (s *Store) PeeringDelete(idx uint64, q Query) error {
tx := s.db.WriteTxn(idx)
defer tx.Abort()
@ -245,6 +272,10 @@ func (s *Store) PeeringDelete(idx uint64, q Query) error {
return nil
}
if existing.(*pbpeering.Peering).IsActive() {
return fmt.Errorf("cannot delete a peering without first marking for deletion")
}
if err := tx.Delete(tablePeering, existing); err != nil {
return fmt.Errorf("failed deleting peering: %v", err)
}
@ -499,7 +530,7 @@ func peeringsForServiceTxn(tx ReadTxn, ws memdb.WatchSet, serviceName string, en
if idx > maxIdx {
maxIdx = idx
}
if peering == nil {
if peering == nil || !peering.IsActive() {
continue
}
peerings = append(peerings, peering)
@ -734,3 +765,28 @@ func peersForServiceTxn(
}
return idx, results, nil
}
func (s *Store) PeeringListDeleted(ws memdb.WatchSet) (uint64, []*pbpeering.Peering, error) {
tx := s.db.ReadTxn()
defer tx.Abort()
return peeringListDeletedTxn(tx, ws)
}
func peeringListDeletedTxn(tx ReadTxn, ws memdb.WatchSet) (uint64, []*pbpeering.Peering, error) {
iter, err := tx.Get(tablePeering, indexDeleted, BoolQuery{Value: true})
if err != nil {
return 0, nil, fmt.Errorf("failed peering lookup: %v", err)
}
// Instead of watching iter.WatchCh() we only need to watch the index entry for the peering table
// This is sufficient to pick up any changes to peerings.
idx := maxIndexWatchTxn(tx, ws, tablePeering)
var result []*pbpeering.Peering
for t := iter.Next(); t != nil; t = iter.Next() {
result = append(result, t.(*pbpeering.Peering))
}
return idx, result, nil
}

View file

@ -0,0 +1,85 @@
//go:build !consulent
// +build !consulent
package state
import (
"time"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/proto/pbpeering"
)
func testIndexerTablePeering() map[string]indexerTestCase {
id := "432feb2f-5476-4ae2-b33c-e43640ca0e86"
encodedID := []byte{0x43, 0x2f, 0xeb, 0x2f, 0x54, 0x76, 0x4a, 0xe2, 0xb3, 0x3c, 0xe4, 0x36, 0x40, 0xca, 0xe, 0x86}
obj := &pbpeering.Peering{
Name: "TheName",
ID: id,
DeletedAt: structs.TimeToProto(time.Now()),
}
return map[string]indexerTestCase{
indexID: {
read: indexValue{
source: "432feb2f-5476-4ae2-b33c-e43640ca0e86",
expected: encodedID,
},
write: indexValue{
source: obj,
expected: encodedID,
},
},
indexName: {
read: indexValue{
source: Query{
Value: "TheNAME",
EnterpriseMeta: *structs.DefaultEnterpriseMetaInPartition("pArTition"),
},
expected: []byte("thename\x00"),
},
write: indexValue{
source: obj,
expected: []byte("thename\x00"),
},
prefix: []indexValue{
{
source: *structs.DefaultEnterpriseMetaInPartition("pArTition"),
expected: nil,
},
},
},
indexDeleted: {
read: indexValue{
source: BoolQuery{
Value: true,
EnterpriseMeta: *structs.DefaultEnterpriseMetaInPartition("partITION"),
},
expected: []byte("\x01"),
},
write: indexValue{
source: obj,
expected: []byte("\x01"),
},
extra: []indexerTestCase{
{
read: indexValue{
source: BoolQuery{
Value: false,
EnterpriseMeta: *structs.DefaultEnterpriseMetaInPartition("partITION"),
},
expected: []byte("\x00"),
},
write: indexValue{
source: &pbpeering.Peering{
Name: "TheName",
Partition: "PartItioN",
},
expected: []byte("\x00"),
},
},
},
},
}
}

View file

@ -4,6 +4,7 @@ import (
"fmt"
"math/rand"
"testing"
"time"
"github.com/hashicorp/go-memdb"
"github.com/hashicorp/go-uuid"
@ -235,8 +236,8 @@ func TestStore_Peering_Watch(t *testing.T) {
// foo write should fire watch
lastIdx++
err = s.PeeringWrite(lastIdx, &pbpeering.Peering{
Name: "foo",
State: pbpeering.PeeringState_FAILING,
Name: "foo",
DeletedAt: structs.TimeToProto(time.Now()),
})
require.NoError(t, err)
require.True(t, watchFired(ws))
@ -245,28 +246,36 @@ func TestStore_Peering_Watch(t *testing.T) {
idx, p, err := s.PeeringRead(ws, Query{Value: "foo"})
require.NoError(t, err)
require.Equal(t, lastIdx, idx)
require.Equal(t, pbpeering.PeeringState_FAILING, p.State)
require.False(t, p.IsActive())
})
t.Run("delete fires watch", func(t *testing.T) {
// watch on existing foo
ws := newWatch(t, Query{Value: "foo"})
ws := newWatch(t, Query{Value: "bar"})
// delete on bar shouldn't fire watch
lastIdx++
require.NoError(t, s.PeeringWrite(lastIdx, &pbpeering.Peering{Name: "bar"}))
lastIdx++
require.NoError(t, s.PeeringDelete(lastIdx, Query{Value: "bar"}))
require.NoError(t, s.PeeringDelete(lastIdx, Query{Value: "foo"}))
require.False(t, watchFired(ws))
// delete on foo should fire watch
// mark for deletion before actually deleting
lastIdx++
err := s.PeeringDelete(lastIdx, Query{Value: "foo"})
err := s.PeeringWrite(lastIdx, &pbpeering.Peering{
Name: "bar",
DeletedAt: structs.TimeToProto(time.Now()),
})
require.NoError(t, err)
require.True(t, watchFired(ws))
// check foo is gone
idx, p, err := s.PeeringRead(ws, Query{Value: "foo"})
ws = newWatch(t, Query{Value: "bar"})
// delete on bar should fire watch
lastIdx++
err = s.PeeringDelete(lastIdx, Query{Value: "bar"})
require.NoError(t, err)
require.True(t, watchFired(ws))
// check bar is gone
idx, p, err := s.PeeringRead(ws, Query{Value: "bar"})
require.NoError(t, err)
require.Equal(t, lastIdx, idx)
require.Nil(t, p)
@ -320,13 +329,13 @@ func TestStore_PeeringList_Watch(t *testing.T) {
return ws
}
t.Run("insert fires watch", func(t *testing.T) {
testutil.RunStep(t, "insert fires watch", func(t *testing.T) {
ws := newWatch(t, acl.EnterpriseMeta{})
lastIdx++
// insert a peering
err := s.PeeringWrite(lastIdx, &pbpeering.Peering{
Name: "bar",
Name: "foo",
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
})
require.NoError(t, err)
@ -341,27 +350,16 @@ func TestStore_PeeringList_Watch(t *testing.T) {
require.Len(t, pp, count)
})
t.Run("update fires watch", func(t *testing.T) {
// set up initial write
lastIdx++
err := s.PeeringWrite(lastIdx, &pbpeering.Peering{
Name: "foo",
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
})
require.NoError(t, err)
count++
testutil.RunStep(t, "update fires watch", func(t *testing.T) {
ws := newWatch(t, acl.EnterpriseMeta{})
// update peering
lastIdx++
err = s.PeeringWrite(lastIdx, &pbpeering.Peering{
require.NoError(t, s.PeeringWrite(lastIdx, &pbpeering.Peering{
Name: "foo",
State: pbpeering.PeeringState_FAILING,
DeletedAt: structs.TimeToProto(time.Now()),
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
})
require.NoError(t, err)
}))
require.True(t, watchFired(ws))
idx, pp, err := s.PeeringList(ws, acl.EnterpriseMeta{})
@ -370,21 +368,12 @@ func TestStore_PeeringList_Watch(t *testing.T) {
require.Len(t, pp, count)
})
t.Run("delete fires watch", func(t *testing.T) {
// set up initial write
lastIdx++
err := s.PeeringWrite(lastIdx, &pbpeering.Peering{
Name: "baz",
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
})
require.NoError(t, err)
count++
testutil.RunStep(t, "delete fires watch", func(t *testing.T) {
ws := newWatch(t, acl.EnterpriseMeta{})
// delete peering
lastIdx++
err = s.PeeringDelete(lastIdx, Query{Value: "baz"})
err := s.PeeringDelete(lastIdx, Query{Value: "foo"})
require.NoError(t, err)
count--
@ -398,14 +387,22 @@ func TestStore_PeeringList_Watch(t *testing.T) {
}
func TestStore_PeeringWrite(t *testing.T) {
// Note that all test cases in this test share a state store and must be run sequentially.
// Each case depends on the previous.
s := NewStateStore(nil)
insertTestPeerings(t, s)
type testcase struct {
name string
input *pbpeering.Peering
name string
input *pbpeering.Peering
expectErr string
}
run := func(t *testing.T, tc testcase) {
require.NoError(t, s.PeeringWrite(10, tc.input))
err := s.PeeringWrite(10, tc.input)
if tc.expectErr != "" {
testutil.RequireErrorContains(t, err, tc.expectErr)
return
}
require.NoError(t, err)
q := Query{
Value: tc.input.Name,
@ -414,6 +411,7 @@ func TestStore_PeeringWrite(t *testing.T) {
_, p, err := s.PeeringRead(nil, q)
require.NoError(t, err)
require.NotNil(t, p)
if tc.input.State == 0 {
require.Equal(t, pbpeering.PeeringState_INITIAL, p.State)
}
@ -428,16 +426,46 @@ func TestStore_PeeringWrite(t *testing.T) {
},
},
{
name: "update foo",
name: "update baz",
input: &pbpeering.Peering{
Name: "foo",
Name: "baz",
State: pbpeering.PeeringState_FAILING,
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
},
},
{
name: "mark baz for deletion",
input: &pbpeering.Peering{
Name: "baz",
State: pbpeering.PeeringState_TERMINATED,
DeletedAt: structs.TimeToProto(time.Now()),
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
},
},
{
name: "cannot update peering marked for deletion",
input: &pbpeering.Peering{
Name: "baz",
// Attempt to add metadata
Meta: map[string]string{
"source": "kubernetes",
},
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
},
expectErr: "cannot write to peering that is marked for deletion",
},
{
name: "cannot create peering marked for deletion",
input: &pbpeering.Peering{
Name: "foo",
DeletedAt: structs.TimeToProto(time.Now()),
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
},
expectErr: "cannot create a new peering marked for deletion",
},
}
for _, tc := range tcs {
t.Run(tc.name, func(t *testing.T) {
testutil.RunStep(t, tc.name, func(t *testing.T) {
run(t, tc)
})
}
@ -495,13 +523,25 @@ func TestStore_PeeringDelete(t *testing.T) {
s := NewStateStore(nil)
insertTestPeerings(t, s)
q := Query{Value: "foo"}
testutil.RunStep(t, "cannot delete without marking for deletion", func(t *testing.T) {
q := Query{Value: "foo"}
err := s.PeeringDelete(10, q)
testutil.RequireErrorContains(t, err, "cannot delete a peering without first marking for deletion")
})
require.NoError(t, s.PeeringDelete(10, q))
testutil.RunStep(t, "can delete after marking for deletion", func(t *testing.T) {
require.NoError(t, s.PeeringWrite(11, &pbpeering.Peering{
Name: "foo",
DeletedAt: structs.TimeToProto(time.Now()),
}))
_, p, err := s.PeeringRead(nil, q)
require.NoError(t, err)
require.Nil(t, p)
q := Query{Value: "foo"}
require.NoError(t, s.PeeringDelete(12, q))
_, p, err := s.PeeringRead(nil, q)
require.NoError(t, err)
require.Nil(t, p)
})
}
func TestStore_PeeringTerminateByID(t *testing.T) {
@ -903,10 +943,14 @@ func TestStateStore_ExportedServicesForPeer(t *testing.T) {
}
func TestStateStore_PeeringsForService(t *testing.T) {
type testPeering struct {
peering *pbpeering.Peering
delete bool
}
type testCase struct {
name string
services []structs.ServiceName
peerings []*pbpeering.Peering
peerings []testPeering
entry *structs.ExportedServicesConfigEntry
query []string
expect [][]*pbpeering.Peering
@ -918,12 +962,24 @@ func TestStateStore_PeeringsForService(t *testing.T) {
var lastIdx uint64
// Create peerings
for _, peering := range tc.peerings {
for _, tp := range tc.peerings {
lastIdx++
require.NoError(t, s.PeeringWrite(lastIdx, peering))
require.NoError(t, s.PeeringWrite(lastIdx, tp.peering))
// New peerings can't be marked for deletion so there is a two step process
// of first creating the peering and then marking it for deletion by setting DeletedAt.
if tp.delete {
lastIdx++
copied := pbpeering.Peering{
Name: tp.peering.Name,
DeletedAt: structs.TimeToProto(time.Now()),
}
require.NoError(t, s.PeeringWrite(lastIdx, &copied))
}
// make sure it got created
q := Query{Value: peering.Name}
q := Query{Value: tp.peering.Name}
_, p, err := s.PeeringRead(nil, q)
require.NoError(t, err)
require.NotNil(t, p)
@ -976,20 +1032,73 @@ func TestStateStore_PeeringsForService(t *testing.T) {
services: []structs.ServiceName{
{Name: "foo"},
},
peerings: []*pbpeering.Peering{},
peerings: []testPeering{},
entry: nil,
query: []string{"foo"},
expect: [][]*pbpeering.Peering{{}},
},
{
name: "peerings marked for deletion are excluded",
services: []structs.ServiceName{
{Name: "foo"},
},
peerings: []testPeering{
{
peering: &pbpeering.Peering{
Name: "peer1",
State: pbpeering.PeeringState_INITIAL,
},
},
{
peering: &pbpeering.Peering{
Name: "peer2",
},
delete: true,
},
},
entry: &structs.ExportedServicesConfigEntry{
Name: "default",
Services: []structs.ExportedService{
{
Name: "foo",
Consumers: []structs.ServiceConsumer{
{
PeerName: "peer1",
},
{
PeerName: "peer2",
},
},
},
},
},
query: []string{"foo"},
expect: [][]*pbpeering.Peering{
{
{Name: "peer1", State: pbpeering.PeeringState_INITIAL},
},
},
expectIdx: uint64(6), // config entries max index
},
{
name: "config entry with exact service name",
services: []structs.ServiceName{
{Name: "foo"},
{Name: "bar"},
},
peerings: []*pbpeering.Peering{
{Name: "peer1", State: pbpeering.PeeringState_INITIAL},
{Name: "peer2", State: pbpeering.PeeringState_INITIAL},
peerings: []testPeering{
{
peering: &pbpeering.Peering{
Name: "peer1",
State: pbpeering.PeeringState_INITIAL,
},
},
{
peering: &pbpeering.Peering{
Name: "peer2",
State: pbpeering.PeeringState_INITIAL,
},
},
},
entry: &structs.ExportedServicesConfigEntry{
Name: "default",
@ -1029,10 +1138,25 @@ func TestStateStore_PeeringsForService(t *testing.T) {
{Name: "foo"},
{Name: "bar"},
},
peerings: []*pbpeering.Peering{
{Name: "peer1", State: pbpeering.PeeringState_INITIAL},
{Name: "peer2", State: pbpeering.PeeringState_INITIAL},
{Name: "peer3", State: pbpeering.PeeringState_INITIAL},
peerings: []testPeering{
{
peering: &pbpeering.Peering{
Name: "peer1",
State: pbpeering.PeeringState_INITIAL,
},
},
{
peering: &pbpeering.Peering{
Name: "peer2",
State: pbpeering.PeeringState_INITIAL,
},
},
{
peering: &pbpeering.Peering{
Name: "peer3",
State: pbpeering.PeeringState_INITIAL,
},
},
},
entry: &structs.ExportedServicesConfigEntry{
Name: "default",
@ -1269,7 +1393,10 @@ func TestStore_TrustBundleListByService(t *testing.T) {
testutil.RunStep(t, "deleting the peering excludes its trust bundle", func(t *testing.T) {
lastIdx++
require.NoError(t, store.PeeringDelete(lastIdx, Query{Value: "peer1"}))
require.NoError(t, store.PeeringWrite(lastIdx, &pbpeering.Peering{
Name: "peer1",
DeletedAt: structs.TimeToProto(time.Now()),
}))
require.True(t, watchFired(ws))
ws = memdb.NewWatchSet()
@ -1294,3 +1421,62 @@ func TestStore_TrustBundleListByService(t *testing.T) {
require.Equal(t, []string{"peer-root-2"}, resp[0].RootPEMs)
})
}
func TestStateStore_Peering_ListDeleted(t *testing.T) {
s := testStateStore(t)
// Insert one active peering and two marked for deletion.
{
tx := s.db.WriteTxn(0)
defer tx.Abort()
err := tx.Insert(tablePeering, &pbpeering.Peering{
Name: "foo",
Partition: acl.DefaultPartitionName,
ID: "9e650110-ac74-4c5a-a6a8-9348b2bed4e9",
DeletedAt: structs.TimeToProto(time.Now()),
CreateIndex: 1,
ModifyIndex: 1,
})
require.NoError(t, err)
err = tx.Insert(tablePeering, &pbpeering.Peering{
Name: "bar",
Partition: acl.DefaultPartitionName,
ID: "5ebcff30-5509-4858-8142-a8e580f1863f",
CreateIndex: 2,
ModifyIndex: 2,
})
require.NoError(t, err)
err = tx.Insert(tablePeering, &pbpeering.Peering{
Name: "baz",
Partition: acl.DefaultPartitionName,
ID: "432feb2f-5476-4ae2-b33c-e43640ca0e86",
DeletedAt: structs.TimeToProto(time.Now()),
CreateIndex: 3,
ModifyIndex: 3,
})
require.NoError(t, err)
err = tx.Insert(tableIndex, &IndexEntry{
Key: tablePeering,
Value: 3,
})
require.NoError(t, err)
require.NoError(t, tx.Commit())
}
idx, deleted, err := s.PeeringListDeleted(nil)
require.NoError(t, err)
require.Equal(t, uint64(3), idx)
require.Len(t, deleted, 2)
var names []string
for _, peering := range deleted {
names = append(names, peering.Name)
}
require.ElementsMatch(t, []string{"foo", "baz"}, names)
}

View file

@ -64,7 +64,10 @@ type IndexEntry struct {
Value uint64
}
const tableIndex = "index"
const (
tableIndex = "index"
indexDeleted = "deleted"
)
// indexTableSchema returns a new table schema used for tracking various the
// latest raft index for a table or entities within a table.
@ -115,3 +118,14 @@ func indexFromString(raw interface{}) ([]byte, error) {
b.String(strings.ToLower(q))
return b.Bytes(), nil
}
func indexDeletedFromBoolQuery(raw interface{}) ([]byte, error) {
q, ok := raw.(BoolQuery)
if !ok {
return nil, fmt.Errorf("unexpected type %T for BoolQuery index", raw)
}
var b indexBuilder
b.Bool(q.Value)
return b.Bytes(), nil
}

View file

@ -56,6 +56,8 @@ func TestNewDBSchema_Indexers(t *testing.T) {
tableTombstones: testIndexerTableTombstones,
// config
tableConfigEntries: testIndexerTableConfigEntries,
// peerings
tablePeering: testIndexerTablePeering,
}
addEnterpriseIndexerTestCases(testcases)

View file

@ -107,6 +107,14 @@ type serviceLookup struct {
acl.EnterpriseMeta
}
type nodeLookup struct {
Datacenter string
Node string
Tag string
MaxRecursionLevel int
acl.EnterpriseMeta
}
// DNSServer is used to wrap an Agent and expose various
// service discovery endpoints using a DNS interface.
type DNSServer struct {
@ -846,13 +854,27 @@ func (d *DNSServer) dispatch(remoteAddr net.Addr, req, resp *dns.Msg, maxRecursi
return invalid()
}
if !d.parseDatacenter(querySuffixes, &datacenter) {
if !d.parseDatacenterAndEnterpriseMeta(querySuffixes, cfg, &datacenter, &entMeta) {
return invalid()
}
// Namespace should not be set for node queries
ns := entMeta.NamespaceOrEmpty()
if ns != "" && ns != acl.DefaultNamespaceName {
return invalid()
}
// Allow a "." in the node name, just join all the parts
node := strings.Join(queryParts, ".")
return d.nodeLookup(cfg, datacenter, node, req, resp, maxRecursionLevel)
lookup := nodeLookup{
Datacenter: datacenter,
Node: node,
MaxRecursionLevel: maxRecursionLevel,
EnterpriseMeta: entMeta,
}
return d.nodeLookup(cfg, lookup, req, resp)
case "query":
// ensure we have a query name
@ -959,7 +981,7 @@ func rCodeFromError(err error) int {
}
// nodeLookup is used to handle a node query
func (d *DNSServer) nodeLookup(cfg *dnsConfig, datacenter, node string, req, resp *dns.Msg, maxRecursionLevel int) error {
func (d *DNSServer) nodeLookup(cfg *dnsConfig, lookup nodeLookup, req, resp *dns.Msg) error {
// Only handle ANY, A, AAAA, and TXT type requests
qType := req.Question[0].Qtype
if qType != dns.TypeANY && qType != dns.TypeA && qType != dns.TypeAAAA && qType != dns.TypeTXT {
@ -968,12 +990,13 @@ func (d *DNSServer) nodeLookup(cfg *dnsConfig, datacenter, node string, req, res
// Make an RPC request
args := &structs.NodeSpecificRequest{
Datacenter: datacenter,
Node: node,
Datacenter: lookup.Datacenter,
Node: lookup.Node,
QueryOptions: structs.QueryOptions{
Token: d.agent.tokens.UserToken(),
AllowStale: cfg.AllowStale,
},
EnterpriseMeta: lookup.EnterpriseMeta,
}
out, err := d.lookupNode(cfg, args)
if err != nil {
@ -996,7 +1019,7 @@ func (d *DNSServer) nodeLookup(cfg *dnsConfig, datacenter, node string, req, res
q := req.Question[0]
// Only compute A and CNAME record if query is not TXT type
if qType != dns.TypeTXT {
records := d.makeRecordFromNode(n, q.Qtype, q.Name, cfg.NodeTTL, maxRecursionLevel)
records := d.makeRecordFromNode(n, q.Qtype, q.Name, cfg.NodeTTL, lookup.MaxRecursionLevel)
resp.Answer = append(resp.Answer, records...)
}

View file

@ -41,8 +41,10 @@ func newLoggerForRequest(l Logger, req *pbsubscribe.SubscribeRequest) Logger {
return l.With(
"topic", req.Topic.String(),
"dc", req.Datacenter,
"peer", req.PeerName,
"key", req.Key,
"namespace", req.Namespace,
"partition", req.Partition,
"request_index", req.Index,
"stream_id", &streamID{})
}

View file

@ -104,7 +104,7 @@ func init() {
registerEndpoint("/v1/operator/autopilot/health", []string{"GET"}, (*HTTPHandlers).OperatorServerHealth)
registerEndpoint("/v1/operator/autopilot/state", []string{"GET"}, (*HTTPHandlers).OperatorAutopilotState)
registerEndpoint("/v1/peering/token", []string{"POST"}, (*HTTPHandlers).PeeringGenerateToken)
registerEndpoint("/v1/peering/initiate", []string{"POST"}, (*HTTPHandlers).PeeringInitiate)
registerEndpoint("/v1/peering/establish", []string{"POST"}, (*HTTPHandlers).PeeringEstablish)
registerEndpoint("/v1/peering/", []string{"GET", "DELETE"}, (*HTTPHandlers).PeeringEndpoint)
registerEndpoint("/v1/peerings", []string{"GET"}, (*HTTPHandlers).PeeringList)
registerEndpoint("/v1/query", []string{"GET", "POST"}, (*HTTPHandlers).PreparedQueryGeneral)

View file

@ -62,6 +62,36 @@ func TestIntentionList(t *testing.T) {
ids = append(ids, reply)
}
// set up an intention for a peered service
// TODO(peering): when we handle Upserts, we can use the for loop above. But it may be that we
// rip out legacy intentions before supporting that use case so run a config entry request instead here.
{
configEntryIntention := structs.ServiceIntentionsConfigEntry{
Kind: structs.ServiceIntentions,
Name: "bar",
Sources: []*structs.SourceIntention{
{
Name: "peered",
Peer: "peer1",
Action: structs.IntentionActionAllow,
},
},
}
req, err := http.NewRequest("PUT", "/v1/config", jsonReader(configEntryIntention))
require.NoError(t, err)
resp := httptest.NewRecorder()
obj, err := a.srv.ConfigApply(resp, req)
require.NoError(t, err)
if applied, ok := obj.(bool); ok {
require.True(t, applied)
} else {
t.Fatal("ConfigApply returns a boolean type")
}
}
// Request
req, err := http.NewRequest("GET", "/v1/connect/intentions", nil)
require.NoError(t, err)
@ -71,22 +101,27 @@ func TestIntentionList(t *testing.T) {
require.NoError(t, err)
value := obj.(structs.Intentions)
require.Len(t, value, 4)
require.Len(t, value, 5)
require.Equal(t, []string{"bar->db", "foo->db", "zim->gir", "*->db"},
require.Equal(t, []string{"bar->db", "foo->db", "zim->gir", "peered->bar", "*->db"},
[]string{
value[0].SourceName + "->" + value[0].DestinationName,
value[1].SourceName + "->" + value[1].DestinationName,
value[2].SourceName + "->" + value[2].DestinationName,
value[3].SourceName + "->" + value[3].DestinationName,
value[4].SourceName + "->" + value[4].DestinationName,
})
require.Equal(t, []string{ids[2], ids[1], "", ids[0]},
require.Equal(t, []string{ids[2], ids[1], "", "", ids[0]},
[]string{
value[0].ID,
value[1].ID,
value[2].ID,
value[3].ID,
value[4].ID,
})
// check that a source peer exists for the intention of the peered service
require.Equal(t, "peer1", value[3].SourcePeer)
})
}

View file

@ -107,30 +107,30 @@ func (s *HTTPHandlers) PeeringGenerateToken(resp http.ResponseWriter, req *http.
return out.ToAPI(), nil
}
// PeeringInitiate handles POSTs to the /v1/peering/initiate endpoint. The request
// PeeringEstablish handles POSTs to the /v1/peering/establish endpoint. The request
// will always be forwarded via RPC to the local leader.
func (s *HTTPHandlers) PeeringInitiate(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
func (s *HTTPHandlers) PeeringEstablish(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
if req.Body == nil {
return nil, HTTPError{StatusCode: http.StatusBadRequest, Reason: "The peering arguments must be provided in the body"}
}
apiRequest := &api.PeeringInitiateRequest{
apiRequest := &api.PeeringEstablishRequest{
Datacenter: s.agent.config.Datacenter,
}
if err := lib.DecodeJSON(req.Body, apiRequest); err != nil {
return nil, HTTPError{StatusCode: http.StatusBadRequest, Reason: fmt.Sprintf("Body decoding failed: %v", err)}
}
args := pbpeering.NewInitiateRequestFromAPI(apiRequest)
args := pbpeering.NewEstablishRequestFromAPI(apiRequest)
if args.PeerName == "" {
return nil, HTTPError{StatusCode: http.StatusBadRequest, Reason: "PeerName is required in the payload when initiating a peering."}
return nil, HTTPError{StatusCode: http.StatusBadRequest, Reason: "PeerName is required in the payload when establishing a peering."}
}
if args.PeeringToken == "" {
return nil, HTTPError{StatusCode: http.StatusBadRequest, Reason: "PeeringToken is required in the payload when initiating a peering."}
return nil, HTTPError{StatusCode: http.StatusBadRequest, Reason: "PeeringToken is required in the payload when establishing a peering."}
}
out, err := s.agent.rpcClientPeering.Initiate(req.Context(), args)
out, err := s.agent.rpcClientPeering.Establish(req.Context(), args)
if err != nil {
return nil, err
}

View file

@ -17,6 +17,7 @@ import (
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/proto/pbpeering"
"github.com/hashicorp/consul/sdk/testutil/retry"
"github.com/hashicorp/consul/testrpc"
)
@ -114,7 +115,7 @@ func TestHTTP_Peering_GenerateToken(t *testing.T) {
})
}
func TestHTTP_Peering_Initiate(t *testing.T) {
func TestHTTP_Peering_Establish(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")
}
@ -125,7 +126,7 @@ func TestHTTP_Peering_Initiate(t *testing.T) {
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
t.Run("No Body", func(t *testing.T) {
req, err := http.NewRequest("POST", "/v1/peering/initiate", nil)
req, err := http.NewRequest("POST", "/v1/peering/establish", nil)
require.NoError(t, err)
resp := httptest.NewRecorder()
a.srv.h.ServeHTTP(resp, req)
@ -135,7 +136,7 @@ func TestHTTP_Peering_Initiate(t *testing.T) {
})
t.Run("Body Invalid", func(t *testing.T) {
req, err := http.NewRequest("POST", "/v1/peering/initiate", bytes.NewReader([]byte("abc")))
req, err := http.NewRequest("POST", "/v1/peering/establish", bytes.NewReader([]byte("abc")))
require.NoError(t, err)
resp := httptest.NewRecorder()
a.srv.h.ServeHTTP(resp, req)
@ -145,7 +146,7 @@ func TestHTTP_Peering_Initiate(t *testing.T) {
})
t.Run("No Name", func(t *testing.T) {
req, err := http.NewRequest("POST", "/v1/peering/initiate",
req, err := http.NewRequest("POST", "/v1/peering/establish",
bytes.NewReader([]byte(`{}`)))
require.NoError(t, err)
resp := httptest.NewRecorder()
@ -156,7 +157,7 @@ func TestHTTP_Peering_Initiate(t *testing.T) {
})
t.Run("No Token", func(t *testing.T) {
req, err := http.NewRequest("POST", "/v1/peering/initiate",
req, err := http.NewRequest("POST", "/v1/peering/establish",
bytes.NewReader([]byte(`{"PeerName": "peer1-usw1"}`)))
require.NoError(t, err)
resp := httptest.NewRecorder()
@ -177,7 +178,7 @@ func TestHTTP_Peering_Initiate(t *testing.T) {
}
tokenJSON, _ := json.Marshal(&token)
tokenB64 := base64.StdEncoding.EncodeToString(tokenJSON)
body := &pbpeering.InitiateRequest{
body := &pbpeering.EstablishRequest{
PeerName: "peering-a",
PeeringToken: tokenB64,
Meta: map[string]string{"foo": "bar"},
@ -186,7 +187,7 @@ func TestHTTP_Peering_Initiate(t *testing.T) {
bodyBytes, err := json.Marshal(body)
require.NoError(t, err)
req, err := http.NewRequest("POST", "/v1/peering/initiate", bytes.NewReader(bodyBytes))
req, err := http.NewRequest("POST", "/v1/peering/establish", bytes.NewReader(bodyBytes))
require.NoError(t, err)
resp := httptest.NewRecorder()
a.srv.h.ServeHTTP(resp, req)
@ -343,12 +344,14 @@ func TestHTTP_Peering_Delete(t *testing.T) {
require.Equal(t, "", resp.Body.String())
})
t.Run("now the token is deleted, a read should 404", func(t *testing.T) {
req, err := http.NewRequest("GET", "/v1/peering/foo", nil)
require.NoError(t, err)
resp := httptest.NewRecorder()
a.srv.h.ServeHTTP(resp, req)
require.Equal(t, http.StatusNotFound, resp.Code)
t.Run("now the token is deleted and reads should yield a 404", func(t *testing.T) {
retry.Run(t, func(r *retry.R) {
req, err := http.NewRequest("GET", "/v1/peering/foo", nil)
require.NoError(r, err)
resp := httptest.NewRecorder()
a.srv.h.ServeHTTP(resp, req)
require.Equal(r, http.StatusNotFound, resp.Code)
})
})
t.Run("delete a token that does not exist", func(t *testing.T) {

View file

@ -32,6 +32,7 @@ func (s *handlerConnectProxy) initialize(ctx context.Context) (ConfigSnapshot, e
snap.ConnectProxy.PassthroughUpstreams = make(map[UpstreamID]map[string]map[string]struct{})
snap.ConnectProxy.PassthroughIndices = make(map[string]indexedTarget)
snap.ConnectProxy.PeerUpstreamEndpoints = make(map[UpstreamID]structs.CheckServiceNodes)
snap.ConnectProxy.PeerUpstreamEndpointsUseHostnames = make(map[UpstreamID]struct{})
// Watch for root changes
err := s.dataSources.CARoots.Notify(ctx, &structs.DCSpecificRequest{

View file

@ -236,10 +236,11 @@ func TestManager_BasicLifecycle(t *testing.T) {
NewUpstreamID(&upstreams[1]): &upstreams[1],
NewUpstreamID(&upstreams[2]): &upstreams[2],
},
PassthroughUpstreams: map[UpstreamID]map[string]map[string]struct{}{},
PassthroughIndices: map[string]indexedTarget{},
PeerTrustBundles: map[string]*pbpeering.PeeringTrustBundle{},
PeerUpstreamEndpoints: map[UpstreamID]structs.CheckServiceNodes{},
PassthroughUpstreams: map[UpstreamID]map[string]map[string]struct{}{},
PassthroughIndices: map[string]indexedTarget{},
PeerTrustBundles: map[string]*pbpeering.PeeringTrustBundle{},
PeerUpstreamEndpoints: map[UpstreamID]structs.CheckServiceNodes{},
PeerUpstreamEndpointsUseHostnames: map[UpstreamID]struct{}{},
},
PreparedQueryEndpoints: map[UpstreamID]structs.CheckServiceNodes{},
WatchedServiceChecks: map[structs.ServiceID][]structs.CheckType{},
@ -296,10 +297,11 @@ func TestManager_BasicLifecycle(t *testing.T) {
NewUpstreamID(&upstreams[1]): &upstreams[1],
NewUpstreamID(&upstreams[2]): &upstreams[2],
},
PassthroughUpstreams: map[UpstreamID]map[string]map[string]struct{}{},
PassthroughIndices: map[string]indexedTarget{},
PeerTrustBundles: map[string]*pbpeering.PeeringTrustBundle{},
PeerUpstreamEndpoints: map[UpstreamID]structs.CheckServiceNodes{},
PassthroughUpstreams: map[UpstreamID]map[string]map[string]struct{}{},
PassthroughIndices: map[string]indexedTarget{},
PeerTrustBundles: map[string]*pbpeering.PeeringTrustBundle{},
PeerUpstreamEndpoints: map[UpstreamID]structs.CheckServiceNodes{},
PeerUpstreamEndpointsUseHostnames: map[UpstreamID]struct{}{},
},
PreparedQueryEndpoints: map[UpstreamID]structs.CheckServiceNodes{},
WatchedServiceChecks: map[structs.ServiceID][]structs.CheckType{},

View file

@ -83,7 +83,8 @@ type ConfigSnapshotUpstreams struct {
// PeerUpstreamEndpoints is a map of UpstreamID -> (set of IP addresses)
// and used to determine the backing endpoints of an upstream in another
// peer.
PeerUpstreamEndpoints map[UpstreamID]structs.CheckServiceNodes
PeerUpstreamEndpoints map[UpstreamID]structs.CheckServiceNodes
PeerUpstreamEndpointsUseHostnames map[UpstreamID]struct{}
}
// indexedTarget is used to associate the Raft modify index of a resource
@ -162,7 +163,8 @@ func (c *configSnapshotConnectProxy) isEmpty() bool {
len(c.IntentionUpstreams) == 0 &&
!c.PeeringTrustBundlesSet &&
!c.MeshConfigSet &&
len(c.PeerUpstreamEndpoints) == 0
len(c.PeerUpstreamEndpoints) == 0 &&
len(c.PeerUpstreamEndpointsUseHostnames) == 0
}
type configSnapshotTerminatingGateway struct {

View file

@ -51,6 +51,16 @@ func TestConfigSnapshotPeering(t testing.T) *ConfigSnapshot {
Service: "payments-sidecar-proxy",
Kind: structs.ServiceKindConnectProxy,
Port: 443,
TaggedAddresses: map[string]structs.ServiceAddress{
structs.TaggedAddressLAN: {
Address: "85.252.102.31",
Port: 443,
},
structs.TaggedAddressWAN: {
Address: "123.us-east-1.elb.notaws.com",
Port: 8443,
},
},
Connect: structs.ServiceConnect{
PeerMeta: &structs.PeeringServiceMeta{
SNI: []string{

View file

@ -97,7 +97,18 @@ func (s *handlerUpstreams) handleUpdateUpstreams(ctx context.Context, u UpdateEv
uid := UpstreamIDFromString(uidString)
upstreamsSnapshot.PeerUpstreamEndpoints[uid] = resp.Nodes
filteredNodes := hostnameEndpoints(
s.logger,
GatewayKey{ /*empty so it never matches*/ },
resp.Nodes,
)
if len(filteredNodes) > 0 {
upstreamsSnapshot.PeerUpstreamEndpoints[uid] = filteredNodes
upstreamsSnapshot.PeerUpstreamEndpointsUseHostnames[uid] = struct{}{}
} else {
upstreamsSnapshot.PeerUpstreamEndpoints[uid] = resp.Nodes
delete(upstreamsSnapshot.PeerUpstreamEndpointsUseHostnames, uid)
}
if s.kind != structs.ServiceKindConnectProxy || s.proxyCfg.Mode != structs.ProxyModeTransparent {
return nil

View file

@ -7,6 +7,7 @@ import (
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes"
"github.com/hashicorp/consul/types"
"github.com/hashicorp/go-hclog"
"google.golang.org/genproto/googleapis/rpc/code"
"google.golang.org/protobuf/types/known/anypb"
@ -44,12 +45,7 @@ func makeServiceResponse(
return nil
}
var serviceName string
if strings.HasPrefix(update.CorrelationID, subExportedService) {
serviceName = strings.TrimPrefix(update.CorrelationID, subExportedService)
} else {
serviceName = strings.TrimPrefix(update.CorrelationID, subExportedProxyService) + syntheticProxyNameSuffix
}
serviceName := strings.TrimPrefix(update.CorrelationID, subExportedService)
// If no nodes are present then it's due to one of:
// 1. The service is newly registered or exported and yielded a transient empty update.
@ -214,7 +210,7 @@ func (s *Service) handleUpsert(
return fmt.Errorf("failed to unmarshal resource: %w", err)
}
return s.handleUpsertService(peerName, partition, sn, csn)
return s.handleUpdateService(peerName, partition, sn, csn)
case pbpeering.TypeURLRoots:
roots := &pbpeering.PeeringTrustBundle{}
@ -229,24 +225,29 @@ func (s *Service) handleUpsert(
}
}
func (s *Service) handleUpsertService(
// handleUpdateService handles both deletion and upsert events for a service.
// On an UPSERT event:
// - All nodes, services, checks in the input pbNodes are re-applied through Raft.
// - Any nodes, services, or checks in the catalog that were not in the input pbNodes get deleted.
//
// On a DELETE event:
// - A reconciliation against nil or empty input pbNodes leads to deleting all stored catalog resources
// associated with the service name.
func (s *Service) handleUpdateService(
peerName string,
partition string,
sn structs.ServiceName,
csn *pbservice.IndexedCheckServiceNodes,
pbNodes *pbservice.IndexedCheckServiceNodes,
) error {
if csn == nil || len(csn.Nodes) == 0 {
return s.handleDeleteService(peerName, partition, sn)
// Capture instances in the state store for reconciliation later.
_, storedInstances, err := s.Backend.Store().CheckServiceNodes(nil, sn.Name, &sn.EnterpriseMeta, peerName)
if err != nil {
return fmt.Errorf("failed to read imported services: %w", err)
}
// Convert exported data into structs format.
structsNodes := make([]structs.CheckServiceNode, 0, len(csn.Nodes))
for _, pb := range csn.Nodes {
instance, err := pbservice.CheckServiceNodeToStructs(pb)
if err != nil {
return fmt.Errorf("failed to convert instance: %w", err)
}
structsNodes = append(structsNodes, *instance)
structsNodes, err := pbNodes.CheckServiceNodesToStruct()
if err != nil {
return fmt.Errorf("failed to convert protobuf instances to structs: %w", err)
}
// Normalize the data into a convenient form for operation.
@ -282,8 +283,145 @@ func (s *Service) handleUpsertService(
}
}
// TODO(peering): cleanup and deregister existing data that is now missing safely somehow
//
// Now that the data received has been stored in the state store, the rest of this
// function is responsible for cleaning up data in the catalog that wasn't in the snapshot.
//
// nodeCheckTuple uniquely identifies a node check in the catalog.
// The partition is not needed because we are only operating on one partition's catalog.
type nodeCheckTuple struct {
checkID types.CheckID
node string
}
var (
// unusedNodes tracks node names that were not present in the latest response.
// Missing nodes are not assumed to be deleted because there may be other service names
// registered on them.
// Inside we also track a map of node checks associated with the node.
unusedNodes = make(map[string]struct{})
// deletedNodeChecks tracks node checks that were not present in the latest response.
// A single node check will be attached to all service instances of a node, so this
// deduplication prevents issuing multiple deregistrations for a single check.
deletedNodeChecks = make(map[nodeCheckTuple]struct{})
)
for _, csn := range storedInstances {
if _, ok := snap.Nodes[csn.Node.ID]; !ok {
unusedNodes[string(csn.Node.ID)] = struct{}{}
// Since the node is not in the snapshot we can know the associated service
// instance is not in the snapshot either, since a service instance can't
// exist without a node.
// This will also delete all service checks.
err := s.Backend.Apply().CatalogDeregister(&structs.DeregisterRequest{
Node: csn.Node.Node,
ServiceID: csn.Service.ID,
EnterpriseMeta: csn.Service.EnterpriseMeta,
PeerName: peerName,
})
if err != nil {
return fmt.Errorf("failed to deregister service %q: %w", csn.Service.CompoundServiceID(), err)
}
// We can't know if a node check was deleted from the exporting cluster
// (but not the node itself) if the node wasn't in the snapshot,
// so we do not loop over checks here.
// If the unusedNode gets deleted below that will also delete node checks.
continue
}
// Delete the service instance if not in the snapshot.
sid := csn.Service.CompoundServiceID()
if _, ok := snap.Nodes[csn.Node.ID].Services[sid]; !ok {
err := s.Backend.Apply().CatalogDeregister(&structs.DeregisterRequest{
Node: csn.Node.Node,
ServiceID: csn.Service.ID,
EnterpriseMeta: csn.Service.EnterpriseMeta,
PeerName: peerName,
})
if err != nil {
ident := fmt.Sprintf("partition:%s/peer:%s/node:%s/ns:%s/service_id:%s",
csn.Service.PartitionOrDefault(), peerName, csn.Node.Node, csn.Service.NamespaceOrDefault(), csn.Service.ID)
return fmt.Errorf("failed to deregister service %q: %w", ident, err)
}
// When a service is deleted all associated checks also get deleted as a side effect.
continue
}
// Reconcile checks.
for _, chk := range csn.Checks {
if _, ok := snap.Nodes[csn.Node.ID].Services[sid].Checks[chk.CheckID]; !ok {
// Checks without a ServiceID are node checks.
// If the node exists but the check does not then the check was deleted.
if chk.ServiceID == "" {
// Deduplicate node checks to avoid deregistering a check multiple times.
tuple := nodeCheckTuple{
checkID: chk.CheckID,
node: chk.Node,
}
deletedNodeChecks[tuple] = struct{}{}
continue
}
// If the check isn't a node check then it's a service check.
// Service checks that were not present can be deleted immediately because
// checks for a given service ID will only be attached to a single CheckServiceNode.
err := s.Backend.Apply().CatalogDeregister(&structs.DeregisterRequest{
Node: chk.Node,
CheckID: chk.CheckID,
EnterpriseMeta: chk.EnterpriseMeta,
PeerName: peerName,
})
if err != nil {
ident := fmt.Sprintf("partition:%s/peer:%s/node:%s/ns:%s/check_id:%s",
chk.PartitionOrDefault(), peerName, chk.Node, chk.NamespaceOrDefault(), chk.CheckID)
return fmt.Errorf("failed to deregister check %q: %w", ident, err)
}
}
}
}
// Delete all deduplicated node checks.
for chk := range deletedNodeChecks {
nodeMeta := structs.NodeEnterpriseMetaInPartition(sn.PartitionOrDefault())
err := s.Backend.Apply().CatalogDeregister(&structs.DeregisterRequest{
Node: chk.node,
CheckID: chk.checkID,
EnterpriseMeta: *nodeMeta,
PeerName: peerName,
})
if err != nil {
ident := fmt.Sprintf("partition:%s/peer:%s/node:%s/check_id:%s", nodeMeta.PartitionOrDefault(), peerName, chk.node, chk.checkID)
return fmt.Errorf("failed to deregister node check %q: %w", ident, err)
}
}
// Delete any nodes that do not have any other services registered on them.
for node := range unusedNodes {
nodeMeta := structs.NodeEnterpriseMetaInPartition(sn.PartitionOrDefault())
_, ns, err := s.Backend.Store().NodeServices(nil, node, nodeMeta, peerName)
if err != nil {
return fmt.Errorf("failed to query services on node: %w", err)
}
if ns != nil && len(ns.Services) >= 1 {
// At least one service is still registered on this node, so we keep it.
continue
}
// All services on the node were deleted, so the node is also cleaned up.
err = s.Backend.Apply().CatalogDeregister(&structs.DeregisterRequest{
Node: node,
PeerName: peerName,
EnterpriseMeta: *nodeMeta,
})
if err != nil {
ident := fmt.Sprintf("partition:%s/peer:%s/node:%s", nodeMeta.PartitionOrDefault(), peerName, node)
return fmt.Errorf("failed to deregister node %q: %w", ident, err)
}
}
return nil
}
@ -312,25 +450,13 @@ func (s *Service) handleDelete(
case pbpeering.TypeURLService:
sn := structs.ServiceNameFromString(resourceID)
sn.OverridePartition(partition)
return s.handleDeleteService(peerName, partition, sn)
return s.handleUpdateService(peerName, partition, sn, nil)
default:
return fmt.Errorf("unexpected resourceURL: %s", resourceURL)
}
}
func (s *Service) handleDeleteService(
peerName string,
partition string,
sn structs.ServiceName,
) error {
// Deregister: ServiceID == DeleteService ANd checks
// Deregister: ServiceID(empty) CheckID(empty) == DeleteNode
// TODO(peering): implement
return nil
}
func makeReply(resourceURL, nonce string, errCode code.Code, errMsg string) *pbpeering.ReplicationMessage {
var rpcErr *pbstatus.Status
if errCode != code.Code_OK || errMsg != "" {

View file

@ -34,7 +34,7 @@ var (
errPeeringTokenEmptyPeerID = errors.New("peering token peer ID value is empty")
)
// errPeeringInvalidServerAddress is returned when an initiate request contains
// errPeeringInvalidServerAddress is returned when an establish request contains
// an invalid server address.
type errPeeringInvalidServerAddress struct {
addr string
@ -48,8 +48,6 @@ func (e *errPeeringInvalidServerAddress) Error() string {
type Config struct {
Datacenter string
ConnectEnabled bool
// TODO(peering): remove this when we're ready
DisableMeshGatewayMode bool
}
// Service implements pbpeering.PeeringService to provide RPC operations for
@ -62,7 +60,6 @@ type Service struct {
}
func NewService(logger hclog.Logger, cfg Config, backend Backend) *Service {
cfg.DisableMeshGatewayMode = true
return &Service{
Backend: backend,
logger: logger,
@ -133,6 +130,8 @@ type Store interface {
PeeringTrustBundleRead(ws memdb.WatchSet, q state.Query) (uint64, *pbpeering.PeeringTrustBundle, error)
ExportedServicesForPeer(ws memdb.WatchSet, peerID string) (uint64, *structs.ExportedServiceList, error)
ServiceDump(ws memdb.WatchSet, kind structs.ServiceKind, useKind bool, entMeta *acl.EnterpriseMeta, peerName string) (uint64, structs.CheckServiceNodes, error)
CheckServiceNodes(ws memdb.WatchSet, serviceName string, entMeta *acl.EnterpriseMeta, peerName string) (uint64, structs.CheckServiceNodes, error)
NodeServices(ws memdb.WatchSet, nodeNameOrID string, entMeta *acl.EnterpriseMeta, peerName string) (uint64, *structs.NodeServices, error)
CAConfig(ws memdb.WatchSet) (uint64, *structs.CAConfiguration, error)
TrustBundleListByService(ws memdb.WatchSet, service string, entMeta acl.EnterpriseMeta) (uint64, []*pbpeering.PeeringTrustBundle, error)
AbandonCh() <-chan struct{}
@ -141,10 +140,10 @@ type Store interface {
// Apply provides a write-only interface for persisting Peering data.
type Apply interface {
PeeringWrite(req *pbpeering.PeeringWriteRequest) error
PeeringDelete(req *pbpeering.PeeringDeleteRequest) error
PeeringTerminateByID(req *pbpeering.PeeringTerminateByIDRequest) error
PeeringTrustBundleWrite(req *pbpeering.PeeringTrustBundleWriteRequest) error
CatalogRegister(req *structs.RegisterRequest) error
CatalogDeregister(req *structs.DeregisterRequest) error
}
// GenerateToken implements the PeeringService RPC method to generate a
@ -229,13 +228,13 @@ func (s *Service) GenerateToken(
return resp, err
}
// Initiate implements the PeeringService RPC method to finalize peering
// Establish implements the PeeringService RPC method to finalize peering
// registration. Given a valid token output from a peer's GenerateToken endpoint,
// a peering is registered.
func (s *Service) Initiate(
func (s *Service) Establish(
ctx context.Context,
req *pbpeering.InitiateRequest,
) (*pbpeering.InitiateResponse, error) {
req *pbpeering.EstablishRequest,
) (*pbpeering.EstablishResponse, error) {
// validate prior to forwarding to the leader, this saves a network hop
if err := dns.ValidateLabel(req.PeerName); err != nil {
return nil, fmt.Errorf("%s is not a valid peer name: %w", req.PeerName, err)
@ -252,17 +251,17 @@ func (s *Service) Initiate(
return nil, fmt.Errorf("meta tags failed validation: %w", err)
}
resp := &pbpeering.InitiateResponse{}
resp := &pbpeering.EstablishResponse{}
handled, err := s.Backend.Forward(req, func(conn *grpc.ClientConn) error {
var err error
resp, err = pbpeering.NewPeeringServiceClient(conn).Initiate(ctx, req)
resp, err = pbpeering.NewPeeringServiceClient(conn).Establish(ctx, req)
return err
})
if handled || err != nil {
return resp, err
}
defer metrics.MeasureSince([]string{"peering", "initiate"}, time.Now())
defer metrics.MeasureSince([]string{"peering", "establish"}, time.Now())
// convert ServiceAddress values to strings
serverAddrs := make([]string, len(tok.ServerAddresses))
@ -395,7 +394,35 @@ func (s *Service) PeeringDelete(ctx context.Context, req *pbpeering.PeeringDelet
// TODO(peering): ACL check request token
// TODO(peering): handle blocking queries
err = s.Backend.Apply().PeeringDelete(req)
q := state.Query{
Value: strings.ToLower(req.Name),
EnterpriseMeta: *structs.NodeEnterpriseMetaInPartition(req.Partition),
}
_, existing, err := s.Backend.Store().PeeringRead(nil, q)
if err != nil {
return nil, err
}
if existing == nil || !existing.IsActive() {
// Return early when the Peering doesn't exist or is already marked for deletion.
// We don't return nil because the pb will fail to marshal.
return &pbpeering.PeeringDeleteResponse{}, nil
}
// We are using a write request due to needing to perform a deferred deletion.
// The peering gets marked for deletion by setting the DeletedAt field,
// and a leader routine will handle deleting the peering.
writeReq := &pbpeering.PeeringWriteRequest{
Peering: &pbpeering.Peering{
// We only need to include the name and partition for the peering to be identified.
// All other data associated with the peering can be discarded because once marked
// for deletion the peering is effectively gone.
Name: req.Name,
Partition: req.Partition,
DeletedAt: structs.TimeToProto(time.Now().UTC()),
},
}
err = s.Backend.Apply().PeeringWrite(writeReq)
if err != nil {
return nil, err
}
@ -529,17 +556,24 @@ func (s *Service) StreamResources(stream pbpeering.PeeringService_StreamResource
// TODO(peering): If the peering is marked as deleted, send a Terminated message and return
// TODO(peering): Store subscription request so that an event publisher can separately handle pushing messages for it
s.logger.Info("accepted initial replication request from peer", "peer_id", req.PeerID)
s.logger.Info("accepted initial replication request from peer", "peer_id", p.ID)
// For server peers both of these ID values are the same, because we generated a token with a local ID,
// and the client peer dials using that same ID.
return s.HandleStream(HandleStreamRequest{
streamReq := HandleStreamRequest{
LocalID: p.ID,
RemoteID: p.PeerID,
PeerName: p.Name,
Partition: p.Partition,
Stream: stream,
})
}
err = s.HandleStream(streamReq)
// A nil error indicates that the peering was deleted and the stream needs to be gracefully shutdown.
if err == nil {
s.DrainStream(streamReq)
return nil
}
s.logger.Error("error handling stream", "peer_name", p.Name, "peer_id", req.PeerID, "error", err)
return err
}
type HandleStreamRequest struct {
@ -559,10 +593,28 @@ type HandleStreamRequest struct {
Stream BidirectionalStream
}
// DrainStream attempts to gracefully drain the stream when the connection is going to be torn down.
// Tearing down the connection too quickly can lead our peer receiving a context cancellation error before the stream termination message.
// Handling the termination message is important to set the expectation that the peering will not be reestablished unless recreated.
func (s *Service) DrainStream(req HandleStreamRequest) {
for {
// Ensure that we read until an error, or the peer has nothing more to send.
if _, err := req.Stream.Recv(); err != nil {
if err != io.EOF {
s.logger.Warn("failed to tear down stream gracefully: peer may not have received termination message",
"peer_name", req.PeerName, "peer_id", req.LocalID, "error", err)
}
break
}
// Since the peering is being torn down we discard all replication messages without an error.
// We want to avoid importing new data at this point.
}
}
// The localID provided is the locally-generated identifier for the peering.
// The remoteID is an identifier that the remote peer recognizes for the peering.
func (s *Service) HandleStream(req HandleStreamRequest) error {
logger := s.logger.Named("stream").With("peer_id", req.LocalID)
logger := s.logger.Named("stream").With("peer_name", req.PeerName, "peer_id", req.LocalID)
logger.Trace("handling stream for peer")
status, err := s.streams.connected(req.LocalID)
@ -619,25 +671,20 @@ func (s *Service) HandleStream(req HandleStreamRequest) error {
defer close(recvChan)
for {
msg, err := req.Stream.Recv()
if err == nil {
logTraceRecv(logger, msg)
recvChan <- msg
continue
}
if err == io.EOF {
logger.Info("stream ended by peer")
status.trackReceiveError(err.Error())
return
}
if e, ok := grpcstatus.FromError(err); ok {
// Cancelling the stream is not an error, that means we or our peer intended to terminate the peering.
if e.Code() == codes.Canceled {
return
}
}
if err != nil {
logger.Error("failed to receive from stream", "error", err)
status.trackReceiveError(err.Error())
return
}
logTraceRecv(logger, msg)
recvChan <- msg
logger.Error("failed to receive from stream", "error", err)
status.trackReceiveError(err.Error())
return
}
}()
@ -666,13 +713,12 @@ func (s *Service) HandleStream(req HandleStreamRequest) error {
case msg, open := <-recvChan:
if !open {
// No longer receiving data on the stream.
logger.Trace("no longer receiving data on the stream")
return nil
}
if !s.Backend.IsLeader() {
// we are not the leader anymore so we will hang up on the dialer
logger.Error("node is not a leader anymore; cannot continue streaming")
st, err := grpcstatus.New(codes.FailedPrecondition,
@ -723,11 +769,11 @@ func (s *Service) HandleStream(req HandleStreamRequest) error {
}
if term := msg.GetTerminated(); term != nil {
logger.Info("received peering termination message, cleaning up imported resources")
logger.Info("peering was deleted by our peer: marking peering as terminated and cleaning up imported resources")
// Once marked as terminated, a separate deferred deletion routine will clean up imported resources.
if err := s.Backend.Apply().PeeringTerminateByID(&pbpeering.PeeringTerminateByIDRequest{ID: req.LocalID}); err != nil {
return err
logger.Error("failed to mark peering as terminated: %w", err)
}
return nil
}
@ -735,8 +781,7 @@ func (s *Service) HandleStream(req HandleStreamRequest) error {
case update := <-subCh:
var resp *pbpeering.ReplicationMessage
switch {
case strings.HasPrefix(update.CorrelationID, subExportedService),
strings.HasPrefix(update.CorrelationID, subExportedProxyService):
case strings.HasPrefix(update.CorrelationID, subExportedService):
resp = makeServiceResponse(logger, update)
case strings.HasPrefix(update.CorrelationID, subMeshGateway):

View file

@ -18,24 +18,24 @@ import (
gogrpc "google.golang.org/grpc"
"google.golang.org/protobuf/types/known/anypb"
"github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/consul"
"github.com/hashicorp/consul/agent/consul/state"
grpc "github.com/hashicorp/consul/agent/grpc/private"
"github.com/hashicorp/consul/agent/grpc/private/resolver"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/proto/pbservice"
"github.com/hashicorp/consul/proto/prototest"
"github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/consul"
"github.com/hashicorp/consul/agent/pool"
"github.com/hashicorp/consul/agent/router"
"github.com/hashicorp/consul/agent/rpc/middleware"
"github.com/hashicorp/consul/agent/rpc/peering"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/agent/token"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/proto/pbpeering"
"github.com/hashicorp/consul/proto/pbservice"
"github.com/hashicorp/consul/proto/prototest"
"github.com/hashicorp/consul/sdk/freeport"
"github.com/hashicorp/consul/sdk/testutil"
"github.com/hashicorp/consul/sdk/testutil/retry"
"github.com/hashicorp/consul/testrpc"
"github.com/hashicorp/consul/tlsutil"
"github.com/hashicorp/consul/types"
@ -112,7 +112,7 @@ func TestPeeringService_GenerateToken(t *testing.T) {
require.Equal(t, expect, peers[0])
}
func TestPeeringService_Initiate(t *testing.T) {
func TestPeeringService_Establish(t *testing.T) {
validToken := peering.TestPeeringToken("83474a06-cca4-4ff4-99a4-4152929c8160")
validTokenJSON, _ := json.Marshal(&validToken)
validTokenB64 := base64.StdEncoding.EncodeToString(validTokenJSON)
@ -123,8 +123,8 @@ func TestPeeringService_Initiate(t *testing.T) {
type testcase struct {
name string
req *pbpeering.InitiateRequest
expectResp *pbpeering.InitiateResponse
req *pbpeering.EstablishRequest
expectResp *pbpeering.EstablishResponse
expectPeering *pbpeering.Peering
expectErr string
}
@ -132,7 +132,7 @@ func TestPeeringService_Initiate(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
t.Cleanup(cancel)
resp, err := client.Initiate(ctx, tc.req)
resp, err := client.Establish(ctx, tc.req)
if tc.expectErr != "" {
require.Contains(t, err.Error(), tc.expectErr)
return
@ -160,12 +160,12 @@ func TestPeeringService_Initiate(t *testing.T) {
tcs := []testcase{
{
name: "invalid peer name",
req: &pbpeering.InitiateRequest{PeerName: "--AA--"},
req: &pbpeering.EstablishRequest{PeerName: "--AA--"},
expectErr: "--AA-- is not a valid peer name",
},
{
name: "invalid token (base64)",
req: &pbpeering.InitiateRequest{
req: &pbpeering.EstablishRequest{
PeerName: "peer1-usw1",
PeeringToken: "+++/+++",
},
@ -173,7 +173,7 @@ func TestPeeringService_Initiate(t *testing.T) {
},
{
name: "invalid token (JSON)",
req: &pbpeering.InitiateRequest{
req: &pbpeering.EstablishRequest{
PeerName: "peer1-usw1",
PeeringToken: "Cg==", // base64 of "-"
},
@ -181,7 +181,7 @@ func TestPeeringService_Initiate(t *testing.T) {
},
{
name: "invalid token (empty)",
req: &pbpeering.InitiateRequest{
req: &pbpeering.EstablishRequest{
PeerName: "peer1-usw1",
PeeringToken: "e30K", // base64 of "{}"
},
@ -189,7 +189,7 @@ func TestPeeringService_Initiate(t *testing.T) {
},
{
name: "too many meta tags",
req: &pbpeering.InitiateRequest{
req: &pbpeering.EstablishRequest{
PeerName: "peer1-usw1",
PeeringToken: validTokenB64,
Meta: generateTooManyMetaKeys(),
@ -198,12 +198,12 @@ func TestPeeringService_Initiate(t *testing.T) {
},
{
name: "success",
req: &pbpeering.InitiateRequest{
req: &pbpeering.EstablishRequest{
PeerName: "peer1-usw1",
PeeringToken: validTokenB64,
Meta: map[string]string{"foo": "bar"},
},
expectResp: &pbpeering.InitiateResponse{},
expectResp: &pbpeering.EstablishResponse{},
expectPeering: peering.TestPeering(
"peer1-usw1",
pbpeering.PeeringState_INITIAL,
@ -217,6 +217,7 @@ func TestPeeringService_Initiate(t *testing.T) {
})
}
}
func TestPeeringService_Read(t *testing.T) {
// TODO(peering): see note on newTestServer, refactor to not use this
s := newTestServer(t, nil)
@ -273,6 +274,40 @@ func TestPeeringService_Read(t *testing.T) {
}
}
func TestPeeringService_Delete(t *testing.T) {
// TODO(peering): see note on newTestServer, refactor to not use this
s := newTestServer(t, nil)
p := &pbpeering.Peering{
Name: "foo",
State: pbpeering.PeeringState_INITIAL,
PeerCAPems: nil,
PeerServerName: "test",
PeerServerAddresses: []string{"addr1"},
}
err := s.Server.FSM().State().PeeringWrite(10, p)
require.NoError(t, err)
require.Nil(t, p.DeletedAt)
require.True(t, p.IsActive())
client := pbpeering.NewPeeringServiceClient(s.ClientConn(t))
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
t.Cleanup(cancel)
_, err = client.PeeringDelete(ctx, &pbpeering.PeeringDeleteRequest{Name: "foo"})
require.NoError(t, err)
retry.Run(t, func(r *retry.R) {
_, resp, err := s.Server.FSM().State().PeeringRead(nil, state.Query{Value: "foo"})
require.NoError(r, err)
// Initially the peering will be marked for deletion but eventually the leader
// routine will clean it up.
require.Nil(r, resp)
})
}
func TestPeeringService_List(t *testing.T) {
// TODO(peering): see note on newTestServer, refactor to not use this
s := newTestServer(t, nil)

File diff suppressed because it is too large Load diff

View file

@ -75,13 +75,10 @@ func (m *subscriptionManager) subscribe(ctx context.Context, peerID, peerName, p
// Wrap our bare state store queries in goroutines that emit events.
go m.notifyExportedServicesForPeerID(ctx, state, peerID)
if !m.config.DisableMeshGatewayMode && m.config.ConnectEnabled {
go m.notifyMeshGatewaysForPartition(ctx, state, state.partition)
}
// If connect is enabled, watch for updates to CA roots.
if m.config.ConnectEnabled {
go m.notifyRootCAUpdates(ctx, state.updateCh)
go m.notifyMeshGatewaysForPartition(ctx, state, state.partition)
// If connect is enabled, watch for updates to CA roots.
go m.notifyRootCAUpdatesForPartition(ctx, state.updateCh, state.partition)
}
// This goroutine is the only one allowed to manipulate protected
@ -129,12 +126,8 @@ func (m *subscriptionManager) handleEvent(ctx context.Context, state *subscripti
pending := &pendingPayload{}
m.syncNormalServices(ctx, state, pending, evt.Services)
if m.config.DisableMeshGatewayMode {
m.syncProxyServices(ctx, state, pending, evt.Services)
} else {
if m.config.ConnectEnabled {
m.syncDiscoveryChains(ctx, state, pending, evt.ListAllDiscoveryChains())
}
if m.config.ConnectEnabled {
m.syncDiscoveryChains(ctx, state, pending, evt.ListAllDiscoveryChains())
}
state.sendPendingEvents(ctx, m.logger, pending)
@ -152,32 +145,25 @@ func (m *subscriptionManager) handleEvent(ctx context.Context, state *subscripti
// Clear this raft index before exporting.
csn.Index = 0
if !m.config.DisableMeshGatewayMode {
// Ensure that connect things are scrubbed so we don't mix-and-match
// with the synthetic entries that point to mesh gateways.
filterConnectReferences(csn)
// Ensure that connect things are scrubbed so we don't mix-and-match
// with the synthetic entries that point to mesh gateways.
filterConnectReferences(csn)
// Flatten health checks
for _, instance := range csn.Nodes {
instance.Checks = flattenChecks(
instance.Node.Node,
instance.Service.ID,
instance.Service.Service,
instance.Service.EnterpriseMeta,
instance.Checks,
)
}
// Flatten health checks
for _, instance := range csn.Nodes {
instance.Checks = flattenChecks(
instance.Node.Node,
instance.Service.ID,
instance.Service.Service,
instance.Service.EnterpriseMeta,
instance.Checks,
)
}
// Scrub raft indexes
for _, instance := range csn.Nodes {
instance.Node.RaftIndex = nil
instance.Service.RaftIndex = nil
if m.config.DisableMeshGatewayMode {
for _, chk := range instance.Checks {
chk.RaftIndex = nil
}
}
// skip checks since we just generated one from scratch
}
@ -197,61 +183,6 @@ func (m *subscriptionManager) handleEvent(ctx context.Context, state *subscripti
}
state.sendPendingEvents(ctx, m.logger, pending)
case strings.HasPrefix(u.CorrelationID, subExportedProxyService):
csn, ok := u.Result.(*pbservice.IndexedCheckServiceNodes)
if !ok {
return fmt.Errorf("invalid type for response: %T", u.Result)
}
if !m.config.DisableMeshGatewayMode {
return nil // ignore event
}
sn := structs.ServiceNameFromString(strings.TrimPrefix(u.CorrelationID, subExportedProxyService))
spiffeID := connect.SpiffeIDService{
Host: m.trustDomain,
Partition: sn.PartitionOrDefault(),
Namespace: sn.NamespaceOrDefault(),
Datacenter: m.config.Datacenter,
Service: sn.Name,
}
sni := connect.PeeredServiceSNI(
sn.Name,
sn.NamespaceOrDefault(),
sn.PartitionOrDefault(),
state.peerName,
m.trustDomain,
)
peerMeta := &pbservice.PeeringServiceMeta{
SNI: []string{sni},
SpiffeID: []string{spiffeID.URI().String()},
Protocol: "tcp",
}
// skip checks since we just generated one from scratch
// Set peerMeta on all instances and scrub the raft indexes.
for _, instance := range csn.Nodes {
instance.Service.Connect.PeerMeta = peerMeta
instance.Node.RaftIndex = nil
instance.Service.RaftIndex = nil
if m.config.DisableMeshGatewayMode {
for _, chk := range instance.Checks {
chk.RaftIndex = nil
}
}
}
csn.Index = 0
id := proxyServicePayloadIDPrefix + strings.TrimPrefix(u.CorrelationID, subExportedProxyService)
// Just ferry this one directly along to the destination.
pending := &pendingPayload{}
if err := pending.Add(id, u.CorrelationID, csn); err != nil {
return err
}
state.sendPendingEvents(ctx, m.logger, pending)
case strings.HasPrefix(u.CorrelationID, subMeshGateway):
csn, ok := u.Result.(*pbservice.IndexedCheckServiceNodes)
if !ok {
@ -260,7 +191,7 @@ func (m *subscriptionManager) handleEvent(ctx context.Context, state *subscripti
partition := strings.TrimPrefix(u.CorrelationID, subMeshGateway)
if m.config.DisableMeshGatewayMode || !m.config.ConnectEnabled {
if !m.config.ConnectEnabled {
return nil // ignore event
}
@ -360,14 +291,18 @@ func filterConnectReferences(orig *pbservice.IndexedCheckServiceNodes) {
orig.Nodes = newNodes
}
func (m *subscriptionManager) notifyRootCAUpdates(ctx context.Context, updateCh chan<- cache.UpdateEvent) {
func (m *subscriptionManager) notifyRootCAUpdatesForPartition(
ctx context.Context,
updateCh chan<- cache.UpdateEvent,
partition string,
) {
var idx uint64
// TODO(peering): retry logic; fail past a threshold
for {
var err error
// Typically, this function will block inside `m.subscribeCARoots` and only return on error.
// Errors are logged and the watch is retried.
idx, err = m.subscribeCARoots(ctx, idx, updateCh)
idx, err = m.subscribeCARoots(ctx, idx, updateCh, partition)
if errors.Is(err, stream.ErrSubForceClosed) {
m.logger.Trace("subscription force-closed due to an ACL change or snapshot restore, will attempt resume")
} else if !errors.Is(err, context.Canceled) && !errors.Is(err, context.DeadlineExceeded) {
@ -386,7 +321,12 @@ func (m *subscriptionManager) notifyRootCAUpdates(ctx context.Context, updateCh
// subscribeCARoots subscribes to state.EventTopicCARoots for changes to CA roots.
// Upon receiving an event it will send the payload in updateCh.
func (m *subscriptionManager) subscribeCARoots(ctx context.Context, idx uint64, updateCh chan<- cache.UpdateEvent) (uint64, error) {
func (m *subscriptionManager) subscribeCARoots(
ctx context.Context,
idx uint64,
updateCh chan<- cache.UpdateEvent,
partition string,
) (uint64, error) {
// following code adapted from connectca/watch_roots.go
sub, err := m.backend.Subscribe(&stream.SubscribeRequest{
Topic: state.EventTopicCARoots,
@ -451,8 +391,10 @@ func (m *subscriptionManager) subscribeCARoots(ctx context.Context, idx uint64,
updateCh <- cache.UpdateEvent{
CorrelationID: subCARoot,
Result: &pbpeering.PeeringTrustBundle{
TrustDomain: m.trustDomain,
RootPEMs: rootPems,
TrustDomain: m.trustDomain,
RootPEMs: rootPems,
ExportedPartition: partition,
// TODO(peering): revisit decision not to validate datacenter in RBAC
},
}
}
@ -510,57 +452,6 @@ func (m *subscriptionManager) syncNormalServices(
}
}
// TODO(peering): remove
func (m *subscriptionManager) syncProxyServices(
ctx context.Context,
state *subscriptionState,
pending *pendingPayload,
services []structs.ServiceName,
) {
// seen contains the set of exported service names and is used to reconcile the list of watched services.
seen := make(map[structs.ServiceName]struct{})
// Ensure there is a subscription for each service exported to the peer.
for _, svc := range services {
seen[svc] = struct{}{}
if _, ok := state.watchedProxyServices[svc]; ok {
// Exported service is already being watched, nothing to do.
continue
}
notifyCtx, cancel := context.WithCancel(ctx)
if err := m.NotifyConnectProxyService(notifyCtx, svc, state.updateCh); err != nil {
cancel()
m.logger.Error("failed to subscribe to proxy service", "service", svc.String())
continue
}
state.watchedProxyServices[svc] = cancel
}
// For every subscription without an exported service, call the associated cancel fn.
for svc, cancel := range state.watchedProxyServices {
if _, ok := seen[svc]; !ok {
cancel()
delete(state.watchedProxyServices, svc)
// Send an empty event to the stream handler to trigger sending a DELETE message.
// Cancelling the subscription context above is necessary, but does not yield a useful signal on its own.
err := pending.Add(
proxyServicePayloadIDPrefix+svc.String(),
subExportedProxyService+svc.String(),
&pbservice.IndexedCheckServiceNodes{},
)
if err != nil {
m.logger.Error("failed to send event for proxy service", "service", svc.String(), "error", err)
continue
}
}
}
}
func (m *subscriptionManager) syncDiscoveryChains(
ctx context.Context,
state *subscriptionState,
@ -761,10 +652,9 @@ func flattenChecks(
}
const (
subExportedServiceList = "exported-service-list"
subExportedService = "exported-service:"
subExportedProxyService = "exported-proxy-service:"
subMeshGateway = "mesh-gateway:"
subExportedServiceList = "exported-service-list"
subExportedService = "exported-service:"
subMeshGateway = "mesh-gateway:"
)
// NotifyStandardService will notify the given channel when there are updates
@ -777,14 +667,6 @@ func (m *subscriptionManager) NotifyStandardService(
sr := newExportedStandardServiceRequest(m.logger, svc, m.backend)
return m.viewStore.Notify(ctx, sr, subExportedService+svc.String(), updateCh)
}
func (m *subscriptionManager) NotifyConnectProxyService(
ctx context.Context,
svc structs.ServiceName,
updateCh chan<- cache.UpdateEvent,
) error {
sr := newExportedConnectProxyServiceRequest(m.logger, svc, m.backend)
return m.viewStore.Notify(ctx, sr, subExportedProxyService+svc.String(), updateCh)
}
// syntheticProxyNameSuffix is the suffix to add to synthetic proxies we
// replicate to route traffic to an exported discovery chain through the mesh

View file

@ -7,7 +7,6 @@ import (
"time"
"github.com/stretchr/testify/require"
"google.golang.org/protobuf/types/known/durationpb"
"github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/cache"
@ -23,12 +22,6 @@ import (
)
func TestSubscriptionManager_RegisterDeregister(t *testing.T) {
testSubscriptionManager_RegisterDeregister(t, true)
}
func TestSubscriptionManager_RegisterDeregister_EnableMeshGateways(t *testing.T) {
testSubscriptionManager_RegisterDeregister(t, false)
}
func testSubscriptionManager_RegisterDeregister(t *testing.T, disableMeshGateways bool) {
backend := newTestSubscriptionBackend(t)
// initialCatalogIdx := backend.lastIdx
@ -40,9 +33,8 @@ func testSubscriptionManager_RegisterDeregister(t *testing.T, disableMeshGateway
partition := acl.DefaultEnterpriseMeta().PartitionOrEmpty()
mgr := newSubscriptionManager(ctx, testutil.Logger(t), Config{
Datacenter: "dc1",
ConnectEnabled: true,
DisableMeshGatewayMode: disableMeshGateways,
Datacenter: "dc1",
ConnectEnabled: true,
}, connect.TestTrustDomain, backend)
subCh := mgr.subscribe(ctx, id, "my-peering", partition)
@ -52,18 +44,12 @@ func testSubscriptionManager_RegisterDeregister(t *testing.T, disableMeshGateway
mysqlCorrID = subExportedService + structs.NewServiceName("mysql", nil).String()
mysqlProxyCorrID = subExportedService + structs.NewServiceName("mysql-sidecar-proxy", nil).String()
mysqlProxyCorrID_temp = subExportedProxyService + structs.NewServiceName("mysql", nil).String()
)
if disableMeshGateways {
expectEvents(t, subCh)
} else {
// Expect just the empty mesh gateway event to replicate.
expectEvents(t, subCh, func(t *testing.T, got cache.UpdateEvent) {
checkEvent(t, got, gatewayCorrID, 0)
})
}
// Expect just the empty mesh gateway event to replicate.
expectEvents(t, subCh, func(t *testing.T, got cache.UpdateEvent) {
checkEvent(t, got, gatewayCorrID, 0)
})
testutil.RunStep(t, "initial export syncs empty instance lists", func(t *testing.T) {
backend.ensureConfigEntry(t, &structs.ExportedServicesConfigEntry{
@ -84,25 +70,14 @@ func testSubscriptionManager_RegisterDeregister(t *testing.T, disableMeshGateway
},
})
if disableMeshGateways {
expectEvents(t, subCh,
func(t *testing.T, got cache.UpdateEvent) {
checkEvent(t, got, mysqlProxyCorrID_temp, 0)
},
func(t *testing.T, got cache.UpdateEvent) {
checkEvent(t, got, mysqlCorrID, 0)
},
)
} else {
expectEvents(t, subCh,
func(t *testing.T, got cache.UpdateEvent) {
checkEvent(t, got, mysqlCorrID, 0)
},
func(t *testing.T, got cache.UpdateEvent) {
checkEvent(t, got, mysqlProxyCorrID, 0)
},
)
}
expectEvents(t, subCh,
func(t *testing.T, got cache.UpdateEvent) {
checkEvent(t, got, mysqlCorrID, 0)
},
func(t *testing.T, got cache.UpdateEvent) {
checkEvent(t, got, mysqlProxyCorrID, 0)
},
)
})
mysql1 := &structs.CheckServiceNode{
@ -125,17 +100,10 @@ func testSubscriptionManager_RegisterDeregister(t *testing.T, disableMeshGateway
require.Len(t, res.Nodes, 1)
if disableMeshGateways {
prototest.AssertDeepEqual(t, &pbservice.CheckServiceNode{
Node: pbNode("foo", "10.0.0.1", partition),
Service: pbService_temp("", "mysql-1", "mysql", 5000, nil),
}, res.Nodes[0])
} else {
prototest.AssertDeepEqual(t, &pbservice.CheckServiceNode{
Node: pbNode("foo", "10.0.0.1", partition),
Service: pbService("", "mysql-1", "mysql", 5000, nil),
}, res.Nodes[0])
}
prototest.AssertDeepEqual(t, &pbservice.CheckServiceNode{
Node: pbNode("foo", "10.0.0.1", partition),
Service: pbService("", "mysql-1", "mysql", 5000, nil),
}, res.Nodes[0])
})
backend.ensureCheck(t, mysql1.Checks[0])
@ -148,23 +116,13 @@ func testSubscriptionManager_RegisterDeregister(t *testing.T, disableMeshGateway
require.Len(t, res.Nodes, 1)
if disableMeshGateways {
prototest.AssertDeepEqual(t, &pbservice.CheckServiceNode{
Node: pbNode("foo", "10.0.0.1", partition),
Service: pbService_temp("", "mysql-1", "mysql", 5000, nil),
Checks: []*pbservice.HealthCheck{
pbCheck_temp("foo", "mysql-1", "mysql", "mysql-check", "critical", nil),
},
}, res.Nodes[0])
} else {
prototest.AssertDeepEqual(t, &pbservice.CheckServiceNode{
Node: pbNode("foo", "10.0.0.1", partition),
Service: pbService("", "mysql-1", "mysql", 5000, nil),
Checks: []*pbservice.HealthCheck{
pbCheck("foo", "mysql-1", "mysql", "critical", nil),
},
}, res.Nodes[0])
}
prototest.AssertDeepEqual(t, &pbservice.CheckServiceNode{
Node: pbNode("foo", "10.0.0.1", partition),
Service: pbService("", "mysql-1", "mysql", 5000, nil),
Checks: []*pbservice.HealthCheck{
pbCheck("foo", "mysql-1", "mysql", "critical", nil),
},
}, res.Nodes[0])
})
})
@ -188,31 +146,17 @@ func testSubscriptionManager_RegisterDeregister(t *testing.T, disableMeshGateway
require.Len(t, res.Nodes, 2)
if disableMeshGateways {
prototest.AssertDeepEqual(t, &pbservice.CheckServiceNode{
Node: pbNode("bar", "10.0.0.2", partition),
Service: pbService_temp("", "mysql-2", "mysql", 5000, nil),
}, res.Nodes[0])
prototest.AssertDeepEqual(t, &pbservice.CheckServiceNode{
Node: pbNode("foo", "10.0.0.1", partition),
Service: pbService_temp("", "mysql-1", "mysql", 5000, nil),
Checks: []*pbservice.HealthCheck{
pbCheck_temp("foo", "mysql-1", "mysql", "mysql-check", "critical", nil),
},
}, res.Nodes[1])
} else {
prototest.AssertDeepEqual(t, &pbservice.CheckServiceNode{
Node: pbNode("bar", "10.0.0.2", partition),
Service: pbService("", "mysql-2", "mysql", 5000, nil),
}, res.Nodes[0])
prototest.AssertDeepEqual(t, &pbservice.CheckServiceNode{
Node: pbNode("foo", "10.0.0.1", partition),
Service: pbService("", "mysql-1", "mysql", 5000, nil),
Checks: []*pbservice.HealthCheck{
pbCheck("foo", "mysql-1", "mysql", "critical", nil),
},
}, res.Nodes[1])
}
prototest.AssertDeepEqual(t, &pbservice.CheckServiceNode{
Node: pbNode("bar", "10.0.0.2", partition),
Service: pbService("", "mysql-2", "mysql", 5000, nil),
}, res.Nodes[0])
prototest.AssertDeepEqual(t, &pbservice.CheckServiceNode{
Node: pbNode("foo", "10.0.0.1", partition),
Service: pbService("", "mysql-1", "mysql", 5000, nil),
Checks: []*pbservice.HealthCheck{
pbCheck("foo", "mysql-1", "mysql", "critical", nil),
},
}, res.Nodes[1])
})
backend.ensureCheck(t, mysql2.Checks[0])
@ -224,37 +168,20 @@ func testSubscriptionManager_RegisterDeregister(t *testing.T, disableMeshGateway
require.Equal(t, uint64(0), res.Index)
require.Len(t, res.Nodes, 2)
if disableMeshGateways {
prototest.AssertDeepEqual(t, &pbservice.CheckServiceNode{
Node: pbNode("bar", "10.0.0.2", partition),
Service: pbService_temp("", "mysql-2", "mysql", 5000, nil),
Checks: []*pbservice.HealthCheck{
pbCheck_temp("bar", "mysql-2", "mysql", "mysql-2-check", "critical", nil),
},
}, res.Nodes[0])
prototest.AssertDeepEqual(t, &pbservice.CheckServiceNode{
Node: pbNode("foo", "10.0.0.1", partition),
Service: pbService_temp("", "mysql-1", "mysql", 5000, nil),
Checks: []*pbservice.HealthCheck{
pbCheck_temp("foo", "mysql-1", "mysql", "mysql-check", "critical", nil),
},
}, res.Nodes[1])
} else {
prototest.AssertDeepEqual(t, &pbservice.CheckServiceNode{
Node: pbNode("bar", "10.0.0.2", partition),
Service: pbService("", "mysql-2", "mysql", 5000, nil),
Checks: []*pbservice.HealthCheck{
pbCheck("bar", "mysql-2", "mysql", "critical", nil),
},
}, res.Nodes[0])
prototest.AssertDeepEqual(t, &pbservice.CheckServiceNode{
Node: pbNode("foo", "10.0.0.1", partition),
Service: pbService("", "mysql-1", "mysql", 5000, nil),
Checks: []*pbservice.HealthCheck{
pbCheck("foo", "mysql-1", "mysql", "critical", nil),
},
}, res.Nodes[1])
}
prototest.AssertDeepEqual(t, &pbservice.CheckServiceNode{
Node: pbNode("bar", "10.0.0.2", partition),
Service: pbService("", "mysql-2", "mysql", 5000, nil),
Checks: []*pbservice.HealthCheck{
pbCheck("bar", "mysql-2", "mysql", "critical", nil),
},
}, res.Nodes[0])
prototest.AssertDeepEqual(t, &pbservice.CheckServiceNode{
Node: pbNode("foo", "10.0.0.1", partition),
Service: pbService("", "mysql-1", "mysql", 5000, nil),
Checks: []*pbservice.HealthCheck{
pbCheck("foo", "mysql-1", "mysql", "critical", nil),
},
}, res.Nodes[1])
})
})
@ -284,31 +211,17 @@ func testSubscriptionManager_RegisterDeregister(t *testing.T, disableMeshGateway
require.Equal(t, uint64(0), res.Index)
require.Len(t, res.Nodes, 1)
if disableMeshGateways {
prototest.AssertDeepEqual(t, &pbservice.CheckServiceNode{
Node: pbNode("bar", "10.0.0.2", partition),
Service: pbService_temp("", "mysql-2", "mysql", 5000, nil),
Checks: []*pbservice.HealthCheck{
pbCheck_temp("bar", "mysql-2", "mysql", "mysql-2-check", "critical", nil),
},
}, res.Nodes[0])
} else {
prototest.AssertDeepEqual(t, &pbservice.CheckServiceNode{
Node: pbNode("bar", "10.0.0.2", partition),
Service: pbService("", "mysql-2", "mysql", 5000, nil),
Checks: []*pbservice.HealthCheck{
pbCheck("bar", "mysql-2", "mysql", "critical", nil),
},
}, res.Nodes[0])
}
prototest.AssertDeepEqual(t, &pbservice.CheckServiceNode{
Node: pbNode("bar", "10.0.0.2", partition),
Service: pbService("", "mysql-2", "mysql", 5000, nil),
Checks: []*pbservice.HealthCheck{
pbCheck("bar", "mysql-2", "mysql", "critical", nil),
},
}, res.Nodes[0])
})
})
testutil.RunStep(t, "register mesh gateway to send proxy updates", func(t *testing.T) {
if disableMeshGateways {
t.Skip()
return
}
gateway := &structs.CheckServiceNode{
Node: &structs.Node{Node: "mgw", Address: "10.1.1.1"},
Service: &structs.NodeService{ID: "gateway-1", Kind: structs.ServiceKindMeshGateway, Service: "gateway", Port: 8443},
@ -381,10 +294,6 @@ func testSubscriptionManager_RegisterDeregister(t *testing.T, disableMeshGateway
})
testutil.RunStep(t, "deregister mesh gateway to send proxy removals", func(t *testing.T) {
if disableMeshGateways {
t.Skip()
return
}
backend.deleteService(t, "mgw", "gateway-1")
expectEvents(t, subCh,
@ -407,12 +316,6 @@ func testSubscriptionManager_RegisterDeregister(t *testing.T, disableMeshGateway
}
func TestSubscriptionManager_InitialSnapshot(t *testing.T) {
testSubscriptionManager_InitialSnapshot(t, true)
}
func TestSubscriptionManager_InitialSnapshot_EnableMeshGateways(t *testing.T) {
testSubscriptionManager_InitialSnapshot(t, false)
}
func testSubscriptionManager_InitialSnapshot(t *testing.T, disableMeshGateways bool) {
backend := newTestSubscriptionBackend(t)
// initialCatalogIdx := backend.lastIdx
@ -424,9 +327,8 @@ func testSubscriptionManager_InitialSnapshot(t *testing.T, disableMeshGateways b
partition := acl.DefaultEnterpriseMeta().PartitionOrEmpty()
mgr := newSubscriptionManager(ctx, testutil.Logger(t), Config{
Datacenter: "dc1",
ConnectEnabled: true,
DisableMeshGatewayMode: disableMeshGateways,
Datacenter: "dc1",
ConnectEnabled: true,
}, connect.TestTrustDomain, backend)
subCh := mgr.subscribe(ctx, id, "my-peering", partition)
@ -455,20 +357,12 @@ func testSubscriptionManager_InitialSnapshot(t *testing.T, disableMeshGateways b
mysqlProxyCorrID = subExportedService + structs.NewServiceName("mysql-sidecar-proxy", nil).String()
mongoProxyCorrID = subExportedService + structs.NewServiceName("mongo-sidecar-proxy", nil).String()
chainProxyCorrID = subExportedService + structs.NewServiceName("chain-sidecar-proxy", nil).String()
mysqlProxyCorrID_temp = subExportedProxyService + structs.NewServiceName("mysql", nil).String()
mongoProxyCorrID_temp = subExportedProxyService + structs.NewServiceName("mongo", nil).String()
chainProxyCorrID_temp = subExportedProxyService + structs.NewServiceName("chain", nil).String()
)
if disableMeshGateways {
expectEvents(t, subCh)
} else {
// Expect just the empty mesh gateway event to replicate.
expectEvents(t, subCh, func(t *testing.T, got cache.UpdateEvent) {
checkEvent(t, got, gatewayCorrID, 0)
})
}
// Expect just the empty mesh gateway event to replicate.
expectEvents(t, subCh, func(t *testing.T, got cache.UpdateEvent) {
checkEvent(t, got, gatewayCorrID, 0)
})
// At this point in time we'll have a mesh-gateway notification with no
// content stored and handled.
@ -497,56 +391,29 @@ func testSubscriptionManager_InitialSnapshot(t *testing.T, disableMeshGateways b
},
})
if disableMeshGateways {
expectEvents(t, subCh,
func(t *testing.T, got cache.UpdateEvent) {
checkEvent(t, got, chainProxyCorrID_temp, 0)
},
func(t *testing.T, got cache.UpdateEvent) {
checkEvent(t, got, mongoProxyCorrID_temp, 0)
},
func(t *testing.T, got cache.UpdateEvent) {
checkEvent(t, got, mysqlProxyCorrID_temp, 0)
},
func(t *testing.T, got cache.UpdateEvent) {
checkEvent(t, got, chainCorrID, 0)
},
func(t *testing.T, got cache.UpdateEvent) {
checkEvent(t, got, mongoCorrID, 1, "mongo", string(structs.ServiceKindTypical))
},
func(t *testing.T, got cache.UpdateEvent) {
checkEvent(t, got, mysqlCorrID, 1, "mysql", string(structs.ServiceKindTypical))
},
)
} else {
expectEvents(t, subCh,
func(t *testing.T, got cache.UpdateEvent) {
checkEvent(t, got, chainCorrID, 0)
},
func(t *testing.T, got cache.UpdateEvent) {
checkEvent(t, got, chainProxyCorrID, 0)
},
func(t *testing.T, got cache.UpdateEvent) {
checkEvent(t, got, mongoCorrID, 1, "mongo", string(structs.ServiceKindTypical))
},
func(t *testing.T, got cache.UpdateEvent) {
checkEvent(t, got, mongoProxyCorrID, 0)
},
func(t *testing.T, got cache.UpdateEvent) {
checkEvent(t, got, mysqlCorrID, 1, "mysql", string(structs.ServiceKindTypical))
},
func(t *testing.T, got cache.UpdateEvent) {
checkEvent(t, got, mysqlProxyCorrID, 0)
},
)
}
expectEvents(t, subCh,
func(t *testing.T, got cache.UpdateEvent) {
checkEvent(t, got, chainCorrID, 0)
},
func(t *testing.T, got cache.UpdateEvent) {
checkEvent(t, got, chainProxyCorrID, 0)
},
func(t *testing.T, got cache.UpdateEvent) {
checkEvent(t, got, mongoCorrID, 1, "mongo", string(structs.ServiceKindTypical))
},
func(t *testing.T, got cache.UpdateEvent) {
checkEvent(t, got, mongoProxyCorrID, 0)
},
func(t *testing.T, got cache.UpdateEvent) {
checkEvent(t, got, mysqlCorrID, 1, "mysql", string(structs.ServiceKindTypical))
},
func(t *testing.T, got cache.UpdateEvent) {
checkEvent(t, got, mysqlProxyCorrID, 0)
},
)
})
testutil.RunStep(t, "registering a mesh gateway triggers connect replies", func(t *testing.T) {
if disableMeshGateways {
t.Skip()
return
}
gateway := &structs.CheckServiceNode{
Node: &structs.Node{Node: "mgw", Address: "10.1.1.1"},
Service: &structs.NodeService{ID: "gateway-1", Kind: structs.ServiceKindMeshGateway, Service: "gateway", Port: 8443},
@ -850,29 +717,6 @@ func pbService(kind, id, name string, port int32, entMeta *pbcommon.EnterpriseMe
}
}
func pbService_temp(kind, id, name string, port int32, entMeta *pbcommon.EnterpriseMeta) *pbservice.NodeService {
if entMeta == nil {
entMeta = pbcommon.DefaultEnterpriseMeta
}
return &pbservice.NodeService{
ID: id,
Kind: kind,
Service: name,
Port: port,
Weights: &pbservice.Weights{
Passing: 1,
Warning: 1,
},
EnterpriseMeta: entMeta,
Connect: &pbservice.ServiceConnect{},
Proxy: &pbservice.ConnectProxyConfig{
MeshGateway: &pbservice.MeshGatewayConfig{},
Expose: &pbservice.ExposeConfig{},
TransparentProxy: &pbservice.TransparentProxyConfig{},
},
}
}
func pbCheck(node, svcID, svcName, status string, entMeta *pbcommon.EnterpriseMeta) *pbservice.HealthCheck {
if entMeta == nil {
entMeta = pbcommon.DefaultEnterpriseMeta
@ -887,23 +731,3 @@ func pbCheck(node, svcID, svcName, status string, entMeta *pbcommon.EnterpriseMe
EnterpriseMeta: entMeta,
}
}
func pbCheck_temp(node, svcID, svcName, checkID, status string, entMeta *pbcommon.EnterpriseMeta) *pbservice.HealthCheck {
if entMeta == nil {
entMeta = pbcommon.DefaultEnterpriseMeta
}
return &pbservice.HealthCheck{
Node: node,
CheckID: checkID,
Status: status,
ServiceID: svcID,
ServiceName: svcName,
EnterpriseMeta: entMeta,
Definition: &pbservice.HealthCheckDefinition{
DeregisterCriticalServiceAfter: durationpb.New(0),
Interval: durationpb.New(0),
TTL: durationpb.New(0),
Timeout: durationpb.New(0),
},
}
}

View file

@ -25,9 +25,8 @@ type subscriptionState struct {
// plain data
exportList *structs.ExportedServiceList
watchedServices map[structs.ServiceName]context.CancelFunc
watchedProxyServices map[structs.ServiceName]context.CancelFunc // TODO(peering): remove
connectServices map[structs.ServiceName]string // value:protocol
watchedServices map[structs.ServiceName]context.CancelFunc
connectServices map[structs.ServiceName]string // value:protocol
// eventVersions is a duplicate event suppression system keyed by the "id"
// not the "correlationID"
@ -46,12 +45,11 @@ type subscriptionState struct {
func newSubscriptionState(peerName, partition string) *subscriptionState {
return &subscriptionState{
peerName: peerName,
partition: partition,
watchedServices: make(map[structs.ServiceName]context.CancelFunc),
watchedProxyServices: make(map[structs.ServiceName]context.CancelFunc),
connectServices: make(map[structs.ServiceName]string),
eventVersions: make(map[string]string),
peerName: peerName,
partition: partition,
watchedServices: make(map[structs.ServiceName]context.CancelFunc),
connectServices: make(map[structs.ServiceName]string),
eventVersions: make(map[string]string),
}
}
@ -103,14 +101,6 @@ func (s *subscriptionState) cleanupEventVersions(logger hclog.Logger) {
keep = true
}
case strings.HasPrefix(id, proxyServicePayloadIDPrefix):
name := strings.TrimPrefix(id, proxyServicePayloadIDPrefix)
sn := structs.ServiceNameFromString(name)
if _, ok := s.watchedProxyServices[sn]; ok {
keep = true
}
case strings.HasPrefix(id, discoveryChainPayloadIDPrefix):
name := strings.TrimPrefix(id, discoveryChainPayloadIDPrefix)
sn := structs.ServiceNameFromString(name)
@ -142,7 +132,6 @@ const (
caRootsPayloadID = "roots"
meshGatewayPayloadID = "mesh-gateway"
servicePayloadIDPrefix = "service:"
proxyServicePayloadIDPrefix = "proxy-service:" // TODO(peering): remove
discoveryChainPayloadIDPrefix = "chain:"
)

View file

@ -37,20 +37,6 @@ func newExportedStandardServiceRequest(logger hclog.Logger, svc structs.ServiceN
}
}
// TODO(peering): remove
func newExportedConnectProxyServiceRequest(logger hclog.Logger, svc structs.ServiceName, sub Subscriber) *exportedServiceRequest {
req := structs.ServiceSpecificRequest{
ServiceName: svc.Name,
Connect: true,
EnterpriseMeta: svc.EnterpriseMeta,
}
return &exportedServiceRequest{
logger: logger,
req: req,
sub: sub,
}
}
// CacheInfo implements submatview.Request
func (e *exportedServiceRequest) CacheInfo() cache.RequestInfo {
return e.req.CacheInfo()

View file

@ -13,10 +13,12 @@ import (
envoy_upstreams_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/upstreams/http/v3"
envoy_matcher_v3 "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3"
envoy_type_v3 "github.com/envoyproxy/go-control-plane/envoy/type/v3"
"github.com/golang/protobuf/jsonpb"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes/any"
"github.com/golang/protobuf/ptypes/wrappers"
"github.com/hashicorp/go-hclog"
"google.golang.org/protobuf/types/known/anypb"
"google.golang.org/protobuf/types/known/durationpb"
@ -575,23 +577,14 @@ func (s *ResourceGenerator) makeUpstreamClusterForPeerService(
s.Logger.Trace("generating cluster for", "cluster", clusterName)
if c == nil {
c = &envoy_cluster_v3.Cluster{
Name: clusterName,
AltStatName: clusterName,
ConnectTimeout: durationpb.New(time.Duration(cfg.ConnectTimeoutMs) * time.Millisecond),
ClusterDiscoveryType: &envoy_cluster_v3.Cluster_Type{Type: envoy_cluster_v3.Cluster_EDS},
Name: clusterName,
AltStatName: clusterName,
ConnectTimeout: durationpb.New(time.Duration(cfg.ConnectTimeoutMs) * time.Millisecond),
CommonLbConfig: &envoy_cluster_v3.Cluster_CommonLbConfig{
HealthyPanicThreshold: &envoy_type_v3.Percent{
Value: 0, // disable panic threshold
},
},
EdsClusterConfig: &envoy_cluster_v3.Cluster_EdsClusterConfig{
EdsConfig: &envoy_core_v3.ConfigSource{
ResourceApiVersion: envoy_core_v3.ApiVersion_V3,
ConfigSourceSpecifier: &envoy_core_v3.ConfigSource_Ads{
Ads: &envoy_core_v3.AggregatedConfigSource{},
},
},
},
CircuitBreakers: &envoy_cluster_v3.CircuitBreakers{
Thresholds: makeThresholdsIfNeeded(cfg.Limits),
},
@ -602,6 +595,35 @@ func (s *ResourceGenerator) makeUpstreamClusterForPeerService(
return c, err
}
}
useEDS := true
if _, ok := cfgSnap.ConnectProxy.PeerUpstreamEndpointsUseHostnames[uid]; ok {
useEDS = false
}
// If none of the service instances are addressed by a hostname we
// provide the endpoint IP addresses via EDS
if useEDS {
c.ClusterDiscoveryType = &envoy_cluster_v3.Cluster_Type{Type: envoy_cluster_v3.Cluster_EDS}
c.EdsClusterConfig = &envoy_cluster_v3.Cluster_EdsClusterConfig{
EdsConfig: &envoy_core_v3.ConfigSource{
ResourceApiVersion: envoy_core_v3.ApiVersion_V3,
ConfigSourceSpecifier: &envoy_core_v3.ConfigSource_Ads{
Ads: &envoy_core_v3.AggregatedConfigSource{},
},
},
}
} else {
configureClusterWithHostnames(
s.Logger,
c,
"", /*TODO:make configurable?*/
cfgSnap.ConnectProxy.PeerUpstreamEndpoints[uid],
true, /*isRemote*/
false, /*onlyPassing*/
)
}
}
rootPEMs := cfgSnap.RootPEMs()
@ -1054,9 +1076,31 @@ func (s *ResourceGenerator) makeGatewayCluster(snap *proxycfg.ConfigSnapshot, op
},
},
}
return cluster
} else {
configureClusterWithHostnames(
s.Logger,
cluster,
cfg.DNSDiscoveryType,
opts.hostnameEndpoints,
opts.isRemote,
opts.onlyPassing,
)
}
return cluster
}
func configureClusterWithHostnames(
logger hclog.Logger,
cluster *envoy_cluster_v3.Cluster,
dnsDiscoveryType string,
// hostnameEndpoints is a list of endpoints with a hostname as their address
hostnameEndpoints structs.CheckServiceNodes,
// isRemote determines whether the cluster is in a remote DC or partition and we should prefer a WAN address
isRemote bool,
// onlyPassing determines whether endpoints that do not have a passing status should be considered unhealthy
onlyPassing bool,
) {
// When a service instance is addressed by a hostname we have Envoy do the DNS resolution
// by setting a DNS cluster type and passing the hostname endpoints via CDS.
rate := 10 * time.Second
@ -1064,7 +1108,7 @@ func (s *ResourceGenerator) makeGatewayCluster(snap *proxycfg.ConfigSnapshot, op
cluster.DnsLookupFamily = envoy_cluster_v3.Cluster_V4_ONLY
discoveryType := envoy_cluster_v3.Cluster_Type{Type: envoy_cluster_v3.Cluster_LOGICAL_DNS}
if cfg.DNSDiscoveryType == "strict_dns" {
if dnsDiscoveryType == "strict_dns" {
discoveryType.Type = envoy_cluster_v3.Cluster_STRICT_DNS
}
cluster.ClusterDiscoveryType = &discoveryType
@ -1077,11 +1121,11 @@ func (s *ResourceGenerator) makeGatewayCluster(snap *proxycfg.ConfigSnapshot, op
idx int
fallback *envoy_endpoint_v3.LbEndpoint
)
for i, e := range opts.hostnameEndpoints {
_, addr, port := e.BestAddress(opts.isRemote)
for i, e := range hostnameEndpoints {
_, addr, port := e.BestAddress(isRemote)
uniqueHostnames[addr] = true
health, weight := calculateEndpointHealthAndWeight(e, opts.onlyPassing)
health, weight := calculateEndpointHealthAndWeight(e, onlyPassing)
if health == envoy_core_v3.HealthStatus_UNHEALTHY {
fallback = makeLbEndpoint(addr, port, health, weight)
continue
@ -1096,18 +1140,18 @@ func (s *ResourceGenerator) makeGatewayCluster(snap *proxycfg.ConfigSnapshot, op
}
}
dc := opts.hostnameEndpoints[idx].Node.Datacenter
service := opts.hostnameEndpoints[idx].Service.CompoundServiceName()
dc := hostnameEndpoints[idx].Node.Datacenter
service := hostnameEndpoints[idx].Service.CompoundServiceName()
// Fall back to last unhealthy endpoint if none were healthy
if len(endpoints) == 0 {
s.Logger.Warn("upstream service does not contain any healthy instances",
logger.Warn("upstream service does not contain any healthy instances",
"dc", dc, "service", service.String())
endpoints = append(endpoints, fallback)
}
if len(uniqueHostnames) > 1 {
s.Logger.Warn(fmt.Sprintf("service contains instances with more than one unique hostname; only %q be resolved by Envoy", hostname),
logger.Warn(fmt.Sprintf("service contains instances with more than one unique hostname; only %q be resolved by Envoy", hostname),
"dc", dc, "service", service.String())
}
@ -1119,7 +1163,6 @@ func (s *ResourceGenerator) makeGatewayCluster(snap *proxycfg.ConfigSnapshot, op
},
},
}
return cluster
}
func makeThresholdsIfNeeded(limits *structs.UpstreamLimits) []*envoy_cluster_v3.CircuitBreakers_Thresholds {

View file

@ -96,6 +96,12 @@ func (s *ResourceGenerator) endpointsFromSnapshotConnectProxy(cfgSnap *proxycfg.
clusterName = uid.EnvoyID()
}
// Also skip peer instances with a hostname as their address. EDS
// cannot resolve hostnames, so we provide them through CDS instead.
if _, ok := cfgSnap.ConnectProxy.PeerUpstreamEndpointsUseHostnames[uid]; ok {
continue
}
endpoints, ok := cfgSnap.ConnectProxy.PeerUpstreamEndpoints[uid]
if ok {
la := makeLoadAssignment(
@ -103,7 +109,7 @@ func (s *ResourceGenerator) endpointsFromSnapshotConnectProxy(cfgSnap *proxycfg.
[]loadAssignmentEndpointGroup{
{Endpoints: endpoints},
},
cfgSnap.Locality,
proxycfg.GatewayKey{ /*empty so it never matches*/ },
)
resources = append(resources, la)
}

View file

@ -134,10 +134,10 @@ func TestDetermineSupportedProxyFeaturesFromString(t *testing.T) {
}
*/
for _, v := range []string{
"1.19.0", "1.19.1", "1.19.2", "1.19.3",
"1.20.0", "1.20.1", "1.20.2",
"1.21.1",
"1.22.0",
"1.19.0", "1.19.1", "1.19.2", "1.19.3", "1.19.4", "1.19.5",
"1.20.0", "1.20.1", "1.20.2", "1.20.3", "1.20.4",
"1.21.0", "1.21.1", "1.21.2", "1.21.3",
"1.22.0", "1.22.1", "1.22.2",
} {
cases[v] = testcase{expect: supportedProxyFeatures{}}
}

View file

@ -695,6 +695,7 @@ func (s *ResourceGenerator) injectConnectFilters(cfgSnap *proxycfg.ConfigSnapsho
authzFilter, err := makeRBACNetworkFilter(
cfgSnap.ConnectProxy.Intentions,
cfgSnap.IntentionDefaultAllow,
cfgSnap.ConnectProxy.PeerTrustBundles,
)
if err != nil {
return err
@ -963,6 +964,7 @@ func (s *ResourceGenerator) makeInboundListener(cfgSnap *proxycfg.ConfigSnapshot
httpAuthzFilter, err := makeRBACHTTPFilter(
cfgSnap.ConnectProxy.Intentions,
cfgSnap.IntentionDefaultAllow,
cfgSnap.ConnectProxy.PeerTrustBundles,
)
if err != nil {
return nil, err
@ -1019,6 +1021,7 @@ func (s *ResourceGenerator) makeInboundListener(cfgSnap *proxycfg.ConfigSnapshot
filterOpts.httpAuthzFilter, err = makeRBACHTTPFilter(
cfgSnap.ConnectProxy.Intentions,
cfgSnap.IntentionDefaultAllow,
cfgSnap.ConnectProxy.PeerTrustBundles,
)
if err != nil {
return nil, err
@ -1295,6 +1298,7 @@ func (s *ResourceGenerator) makeFilterChainTerminatingGateway(
authFilter, err := makeRBACNetworkFilter(
intentions,
cfgSnap.IntentionDefaultAllow,
nil, // TODO(peering): verify intentions w peers don't apply to terminatingGateway
)
if err != nil {
return nil, err
@ -1319,6 +1323,7 @@ func (s *ResourceGenerator) makeFilterChainTerminatingGateway(
opts.httpAuthzFilter, err = makeRBACHTTPFilter(
intentions,
cfgSnap.IntentionDefaultAllow,
nil, // TODO(peering): verify intentions w peers don't apply to terminatingGateway
)
if err != nil {
return nil, err

View file

@ -7,8 +7,8 @@ package proxysupport
//
// see: https://www.consul.io/docs/connect/proxies/envoy#supported-versions
var EnvoyVersions = []string{
"1.22.0",
"1.21.1",
"1.20.2",
"1.19.3",
"1.22.2",
"1.21.3",
"1.20.4",
"1.19.5",
}

View file

@ -15,10 +15,15 @@ import (
"github.com/hashicorp/consul/agent/connect"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/proto/pbpeering"
)
func makeRBACNetworkFilter(intentions structs.Intentions, intentionDefaultAllow bool) (*envoy_listener_v3.Filter, error) {
rules, err := makeRBACRules(intentions, intentionDefaultAllow, false)
func makeRBACNetworkFilter(
intentions structs.Intentions,
intentionDefaultAllow bool,
peerTrustBundles map[string]*pbpeering.PeeringTrustBundle,
) (*envoy_listener_v3.Filter, error) {
rules, err := makeRBACRules(intentions, intentionDefaultAllow, false, peerTrustBundles)
if err != nil {
return nil, err
}
@ -30,8 +35,12 @@ func makeRBACNetworkFilter(intentions structs.Intentions, intentionDefaultAllow
return makeFilter("envoy.filters.network.rbac", cfg)
}
func makeRBACHTTPFilter(intentions structs.Intentions, intentionDefaultAllow bool) (*envoy_http_v3.HttpFilter, error) {
rules, err := makeRBACRules(intentions, intentionDefaultAllow, true)
func makeRBACHTTPFilter(
intentions structs.Intentions,
intentionDefaultAllow bool,
peerTrustBundles map[string]*pbpeering.PeeringTrustBundle,
) (*envoy_http_v3.HttpFilter, error) {
rules, err := makeRBACRules(intentions, intentionDefaultAllow, true, peerTrustBundles)
if err != nil {
return nil, err
}
@ -42,7 +51,11 @@ func makeRBACHTTPFilter(intentions structs.Intentions, intentionDefaultAllow boo
return makeEnvoyHTTPFilter("envoy.filters.http.rbac", cfg)
}
func intentionListToIntermediateRBACForm(intentions structs.Intentions, isHTTP bool) []*rbacIntention {
func intentionListToIntermediateRBACForm(
intentions structs.Intentions,
isHTTP bool,
trustBundlesByPeer map[string]*pbpeering.PeeringTrustBundle,
) []*rbacIntention {
sort.Sort(structs.IntentionPrecedenceSorter(intentions))
// Omit any lower-precedence intentions that share the same source.
@ -50,7 +63,16 @@ func intentionListToIntermediateRBACForm(intentions structs.Intentions, isHTTP b
rbacIxns := make([]*rbacIntention, 0, len(intentions))
for _, ixn := range intentions {
rixn := intentionToIntermediateRBACForm(ixn, isHTTP)
// trustBundle is only applicable to imported services
trustBundle, ok := trustBundlesByPeer[ixn.SourcePeer]
if ixn.SourcePeer != "" && !ok {
// If the intention defines a source peer, we expect to
// see a trust bundle. Otherwise the config snapshot may
// not have yet received the bundles and we fail silently
continue
}
rixn := intentionToIntermediateRBACForm(ixn, isHTTP, trustBundle)
rbacIxns = append(rbacIxns, rixn)
}
return rbacIxns
@ -188,11 +210,21 @@ func removePermissionPrecedence(perms []*rbacPermission, intentionDefaultAction
return out
}
func intentionToIntermediateRBACForm(ixn *structs.Intention, isHTTP bool) *rbacIntention {
func intentionToIntermediateRBACForm(ixn *structs.Intention, isHTTP bool, bundle *pbpeering.PeeringTrustBundle) *rbacIntention {
rixn := &rbacIntention{
Source: ixn.SourceServiceName(),
Source: rbacService{
ServiceName: ixn.SourceServiceName(),
Peer: ixn.SourcePeer,
},
Precedence: ixn.Precedence,
}
// imported services will have addition metadata used to override SpiffeID creation
if bundle != nil {
rixn.Source.ExportedPartition = bundle.ExportedPartition
rixn.Source.TrustDomain = bundle.TrustDomain
}
if len(ixn.Permissions) > 0 {
if isHTTP {
rixn.Action = intentionActionLayer7
@ -237,9 +269,20 @@ func intentionActionFromString(s structs.IntentionAction) intentionAction {
return intentionActionDeny
}
type rbacService struct {
structs.ServiceName
// Peer, ExportedPartition, and TrustDomain are
// only applicable to imported services and are
// used to override SPIFFEID fields.
Peer string
ExportedPartition string
TrustDomain string
}
type rbacIntention struct {
Source structs.ServiceName
NotSources []structs.ServiceName
Source rbacService
NotSources []rbacService
Action intentionAction
Permissions []*rbacPermission
Precedence int
@ -300,7 +343,7 @@ func (p *rbacPermission) Flatten() *envoy_rbac_v3.Permission {
// simplifyNotSourceSlice will collapse NotSources elements together if any element is
// a subset of another.
// For example "default/web" is a subset of "default/*" because it is covered by the wildcard.
func simplifyNotSourceSlice(notSources []structs.ServiceName) []structs.ServiceName {
func simplifyNotSourceSlice(notSources []rbacService) []rbacService {
if len(notSources) <= 1 {
return notSources
}
@ -311,7 +354,7 @@ func simplifyNotSourceSlice(notSources []structs.ServiceName) []structs.ServiceN
return countWild(notSources[i]) < countWild(notSources[j])
})
keep := make([]structs.ServiceName, 0, len(notSources))
keep := make([]rbacService, 0, len(notSources))
for i := 0; i < len(notSources); i++ {
si := notSources[i]
remove := false
@ -380,7 +423,12 @@ func simplifyNotSourceSlice(notSources []structs.ServiceName) []structs.ServiceN
// <default> : DENY
//
// Which really is just an allow-list of [A, C AND NOT(B)]
func makeRBACRules(intentions structs.Intentions, intentionDefaultAllow bool, isHTTP bool) (*envoy_rbac_v3.RBAC, error) {
func makeRBACRules(
intentions structs.Intentions,
intentionDefaultAllow bool,
isHTTP bool,
peerTrustBundles map[string]*pbpeering.PeeringTrustBundle,
) (*envoy_rbac_v3.RBAC, error) {
// Note that we DON'T explicitly validate the trust-domain matches ours.
//
// For now we don't validate the trust domain of the _destination_ at all.
@ -396,7 +444,7 @@ func makeRBACRules(intentions structs.Intentions, intentionDefaultAllow bool, is
// TODO(banks,rb): Implement revocation list checking?
// First build up just the basic principal matches.
rbacIxns := intentionListToIntermediateRBACForm(intentions, isHTTP)
rbacIxns := intentionListToIntermediateRBACForm(intentions, isHTTP, peerTrustBundles)
// Normalize: if we are in default-deny then all intentions must be allows and vice versa
intentionDefaultAction := intentionActionFromBool(intentionDefaultAllow)
@ -477,17 +525,20 @@ func removeSameSourceIntentions(intentions structs.Intentions) structs.Intention
var (
out = make(structs.Intentions, 0, len(intentions))
changed = false
seenSource = make(map[structs.ServiceName]struct{})
seenSource = make(map[structs.PeeredServiceName]struct{})
)
for _, ixn := range intentions {
sn := ixn.SourceServiceName()
if _, ok := seenSource[sn]; ok {
psn := structs.PeeredServiceName{
ServiceName: ixn.SourceServiceName(),
Peer: ixn.SourcePeer,
}
if _, ok := seenSource[psn]; ok {
// A higher precedence intention already used this exact source
// definition with a different destination.
changed = true
continue
}
seenSource[sn] = struct{}{}
seenSource[psn] = struct{}{}
out = append(out, ixn)
}
@ -497,7 +548,7 @@ func removeSameSourceIntentions(intentions structs.Intentions) structs.Intention
return out
}
// ixnSourceMatches deterines if the 'tester' service name is matched by the
// ixnSourceMatches determines if the 'tester' service name is matched by the
// 'against' service name via wildcard rules.
//
// For instance:
@ -506,7 +557,9 @@ func removeSameSourceIntentions(intentions structs.Intentions) structs.Intention
// - (default/web, default/*) => true, because "all services in the default NS" includes "default/web"
// - (default/*, */*) => true, "any service in any NS" includes "all services in the default NS"
// - (default/default/*, other/*/*) => false, "any service in "other" partition" does NOT include services in the default partition"
func ixnSourceMatches(tester, against structs.ServiceName) bool {
//
// Peer and partition must be exact names and cannot be compared with wildcards.
func ixnSourceMatches(tester, against rbacService) bool {
// We assume that we can't have the same intention twice before arriving
// here.
numWildTester := countWild(tester)
@ -518,18 +571,22 @@ func ixnSourceMatches(tester, against structs.ServiceName) bool {
return false
}
matchesAP := tester.PartitionOrDefault() == against.PartitionOrDefault() || against.PartitionOrDefault() == structs.WildcardSpecifier
matchesAP := tester.PartitionOrDefault() == against.PartitionOrDefault()
matchesPeer := tester.Peer == against.Peer
matchesNS := tester.NamespaceOrDefault() == against.NamespaceOrDefault() || against.NamespaceOrDefault() == structs.WildcardSpecifier
matchesName := tester.Name == against.Name || against.Name == structs.WildcardSpecifier
return matchesAP && matchesNS && matchesName
return matchesAP && matchesPeer && matchesNS && matchesName
}
// countWild counts the number of wildcard values in the given namespace and name.
func countWild(src structs.ServiceName) int {
func countWild(src rbacService) int {
// If Partition is wildcard, panic because it's not supported
if src.PartitionOrDefault() == structs.WildcardSpecifier {
panic("invalid state: intention references wildcard partition")
}
if src.Peer == structs.WildcardSpecifier {
panic("invalid state: intention references wildcard peer")
}
// If NS is wildcard, it must be 2 since wildcards only follow exact
if src.NamespaceOrDefault() == structs.WildcardSpecifier {
@ -564,8 +621,8 @@ func notPrincipal(id *envoy_rbac_v3.Principal) *envoy_rbac_v3.Principal {
}
}
func idPrincipal(src structs.ServiceName) *envoy_rbac_v3.Principal {
pattern := makeSpiffePattern(src.PartitionOrDefault(), src.NamespaceOrDefault(), src.Name)
func idPrincipal(src rbacService) *envoy_rbac_v3.Principal {
pattern := makeSpiffePattern(src)
return &envoy_rbac_v3.Principal{
Identifier: &envoy_rbac_v3.Principal_Authenticated_{
@ -580,37 +637,52 @@ func idPrincipal(src structs.ServiceName) *envoy_rbac_v3.Principal {
}
}
func makeSpiffePattern(sourceAP, sourceNS, sourceName string) string {
if sourceNS == structs.WildcardSpecifier && sourceName != structs.WildcardSpecifier {
panic(fmt.Sprintf("not possible to have a wildcarded namespace %q but an exact service %q", sourceNS, sourceName))
const anyPath = `[^/]+`
func makeSpiffePattern(src rbacService) string {
var (
host = anyPath // TODO(peering): We match trust domain on any value but should be defaulting to the local trust domain
ap = src.PartitionOrDefault()
ns = src.NamespaceOrDefault()
svc = src.Name
)
// Validate proper wildcarding
if ns == structs.WildcardSpecifier && svc != structs.WildcardSpecifier {
panic(fmt.Sprintf("not possible to have a wildcarded namespace %q but an exact service %q", ns, svc))
}
if sourceAP == structs.WildcardSpecifier {
if ap == structs.WildcardSpecifier {
panic("not possible to have a wildcarded source partition")
}
const anyPath = `[^/]+`
// Match on any namespace or service if it is a wildcard, or on a specific value otherwise.
ns := sourceNS
if sourceNS == structs.WildcardSpecifier {
ns = anyPath
if src.Peer == structs.WildcardSpecifier {
panic("not possible to have a wildcarded source peer")
}
svc := sourceName
if sourceName == structs.WildcardSpecifier {
// Match on any namespace or service if it is a wildcard, or on a specific value otherwise.
if ns == structs.WildcardSpecifier {
ns = anyPath
}
if svc == structs.WildcardSpecifier {
svc = anyPath
}
// If service is imported from a peer, the SpiffeID must
// refer to its remote partition and trust domain.
if src.Peer != "" {
ap = src.ExportedPartition
host = src.TrustDomain
}
id := connect.SpiffeIDService{
Namespace: ns,
Service: svc,
Host: host,
// Trust domain and datacenter are not verified by RBAC, so we match on any value.
Host: anyPath,
// Datacenter is not verified by RBAC, so we match on any value.
Datacenter: anyPath,
// Partition can only ever be an exact value.
Partition: sourceAP,
Partition: ap,
}
return fmt.Sprintf(`^%s://%s%s$`, id.URI().Scheme, id.Host, id.URI().Path)

View file

@ -13,24 +13,35 @@ import (
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/proto/pbpeering"
)
func TestRemoveIntentionPrecedence(t *testing.T) {
testIntention := func(t *testing.T, src, dst string, action structs.IntentionAction) *structs.Intention {
type ixnOpts struct {
src string
peer string
action structs.IntentionAction
}
testIntention := func(t *testing.T, opts ixnOpts) *structs.Intention {
t.Helper()
ixn := structs.TestIntention(t)
ixn.SourceName = src
ixn.DestinationName = dst
ixn.Action = action
ixn.SourceName = opts.src
ixn.SourcePeer = opts.peer
ixn.Action = opts.action
// Destination is hardcoded, since RBAC rules are generated for a single destination
ixn.DestinationName = "api"
//nolint:staticcheck
ixn.UpdatePrecedence()
return ixn
}
testSourceIntention := func(src string, action structs.IntentionAction) *structs.Intention {
return testIntention(t, src, "api", action)
testSourceIntention := func(opts ixnOpts) *structs.Intention {
return testIntention(t, opts)
}
testSourcePermIntention := func(src string, perms ...*structs.IntentionPermission) *structs.Intention {
ixn := testIntention(t, src, "api", "")
opts := ixnOpts{src: src}
ixn := testIntention(t, opts)
ixn.Permissions = perms
return ixn
}
@ -40,10 +51,21 @@ func TestRemoveIntentionPrecedence(t *testing.T) {
})
return structs.Intentions(ixns)
}
testPeerTrustBundle := map[string]*pbpeering.PeeringTrustBundle{
"peer1": {
PeerName: "peer1",
TrustDomain: "peer1.domain",
ExportedPartition: "part1",
},
}
var (
nameWild = structs.NewServiceName("*", nil)
nameWeb = structs.NewServiceName("web", nil)
nameWild = rbacService{ServiceName: structs.NewServiceName("*", nil)}
nameWeb = rbacService{ServiceName: structs.NewServiceName("web", nil)}
nameWildPeered = rbacService{ServiceName: structs.NewServiceName("*", nil),
Peer: "peer1", TrustDomain: "peer1.domain", ExportedPartition: "part1"}
nameWebPeered = rbacService{ServiceName: structs.NewServiceName("web", nil),
Peer: "peer1", TrustDomain: "peer1.domain", ExportedPartition: "part1"}
permSlashPrefix = &structs.IntentionPermission{
Action: structs.IntentionActionAllow,
HTTP: &structs.IntentionHTTPPermission{
@ -154,12 +176,12 @@ func TestRemoveIntentionPrecedence(t *testing.T) {
http: true,
intentions: sorted(
testSourcePermIntention("web", permSlashPrefix),
testSourceIntention("*", structs.IntentionActionDeny),
testSourceIntention(ixnOpts{src: "*", action: structs.IntentionActionDeny}),
),
expect: []*rbacIntention{
{
Source: nameWild,
NotSources: []structs.ServiceName{
NotSources: []rbacService{
nameWeb,
},
Action: intentionActionDeny,
@ -182,7 +204,7 @@ func TestRemoveIntentionPrecedence(t *testing.T) {
http: true,
intentions: sorted(
testSourcePermIntention("web", permSlashPrefix),
testSourceIntention("*", structs.IntentionActionDeny),
testSourceIntention(ixnOpts{src: "*", action: structs.IntentionActionDeny}),
),
expect: []*rbacIntention{
{
@ -209,7 +231,7 @@ func TestRemoveIntentionPrecedence(t *testing.T) {
http: true,
intentions: sorted(
testSourcePermIntention("web", permDenySlashPrefix),
testSourceIntention("*", structs.IntentionActionDeny),
testSourceIntention(ixnOpts{src: "*", action: structs.IntentionActionDeny}),
),
expect: []*rbacIntention{
{
@ -231,7 +253,7 @@ func TestRemoveIntentionPrecedence(t *testing.T) {
},
{
Source: nameWild,
NotSources: []structs.ServiceName{
NotSources: []rbacService{
nameWeb,
},
Action: intentionActionDeny,
@ -254,7 +276,7 @@ func TestRemoveIntentionPrecedence(t *testing.T) {
http: true,
intentions: sorted(
testSourcePermIntention("web", permDenySlashPrefix),
testSourceIntention("*", structs.IntentionActionDeny),
testSourceIntention(ixnOpts{src: "*", action: structs.IntentionActionDeny}),
),
expect: []*rbacIntention{},
},
@ -264,7 +286,7 @@ func TestRemoveIntentionPrecedence(t *testing.T) {
http: true,
intentions: sorted(
testSourcePermIntention("web", permSlashPrefix),
testSourceIntention("*", structs.IntentionActionAllow),
testSourceIntention(ixnOpts{src: "*", action: structs.IntentionActionAllow}),
),
expect: []*rbacIntention{},
},
@ -273,7 +295,7 @@ func TestRemoveIntentionPrecedence(t *testing.T) {
http: true,
intentions: sorted(
testSourcePermIntention("web", permSlashPrefix),
testSourceIntention("*", structs.IntentionActionAllow),
testSourceIntention(ixnOpts{src: "*", action: structs.IntentionActionAllow}),
),
expect: []*rbacIntention{
{
@ -295,7 +317,7 @@ func TestRemoveIntentionPrecedence(t *testing.T) {
},
{
Source: nameWild,
NotSources: []structs.ServiceName{
NotSources: []rbacService{
nameWeb,
},
Action: intentionActionAllow,
@ -318,7 +340,7 @@ func TestRemoveIntentionPrecedence(t *testing.T) {
http: true,
intentions: sorted(
testSourcePermIntention("web", permDenySlashPrefix),
testSourceIntention("*", structs.IntentionActionAllow),
testSourceIntention(ixnOpts{src: "*", action: structs.IntentionActionAllow}),
),
expect: []*rbacIntention{
{
@ -345,12 +367,12 @@ func TestRemoveIntentionPrecedence(t *testing.T) {
http: true,
intentions: sorted(
testSourcePermIntention("web", permDenySlashPrefix),
testSourceIntention("*", structs.IntentionActionAllow),
testSourceIntention(ixnOpts{src: "*", action: structs.IntentionActionAllow}),
),
expect: []*rbacIntention{
{
Source: nameWild,
NotSources: []structs.ServiceName{
NotSources: []rbacService{
nameWeb,
},
Action: intentionActionAllow,
@ -368,11 +390,56 @@ func TestRemoveIntentionPrecedence(t *testing.T) {
},
},
},
// ========= Sanity check that peers get passed through
"default-deny-peered": {
intentionDefaultAllow: false,
http: true,
intentions: sorted(
testSourceIntention(ixnOpts{
src: "*",
action: structs.IntentionActionAllow,
peer: "peer1",
}),
testSourceIntention(ixnOpts{
src: "web",
action: structs.IntentionActionAllow,
peer: "peer1",
}),
),
expect: []*rbacIntention{
{
Source: nameWebPeered,
Action: intentionActionAllow,
Permissions: nil,
Precedence: 9,
Skip: false,
ComputedPrincipal: idPrincipal(nameWebPeered),
},
{
Source: nameWildPeered,
Action: intentionActionAllow,
NotSources: []rbacService{
nameWebPeered,
},
Permissions: nil,
Precedence: 8,
Skip: false,
ComputedPrincipal: andPrincipals(
[]*envoy_rbac_v3.Principal{
idPrincipal(nameWildPeered),
notPrincipal(
idPrincipal(nameWebPeered),
),
},
),
},
},
},
}
for name, tt := range tests {
t.Run(name, func(t *testing.T) {
rbacIxns := intentionListToIntermediateRBACForm(tt.intentions, tt.http)
rbacIxns := intentionListToIntermediateRBACForm(tt.intentions, tt.http, testPeerTrustBundle)
intentionDefaultAction := intentionActionFromBool(tt.intentionDefaultAllow)
rbacIxns = removeIntentionPrecedence(rbacIxns, intentionDefaultAction)
@ -395,11 +462,23 @@ func TestMakeRBACNetworkAndHTTPFilters(t *testing.T) {
testSourceIntention := func(src string, action structs.IntentionAction) *structs.Intention {
return testIntention(t, src, "api", action)
}
testIntentionPeered := func(src string, peer string, action structs.IntentionAction) *structs.Intention {
ixn := testIntention(t, src, "api", action)
ixn.SourcePeer = peer
return ixn
}
testSourcePermIntention := func(src string, perms ...*structs.IntentionPermission) *structs.Intention {
ixn := testIntention(t, src, "api", "")
ixn.Permissions = perms
return ixn
}
testPeerTrustBundle := map[string]*pbpeering.PeeringTrustBundle{
"peer1": {
PeerName: "peer1",
TrustDomain: "peer1.domain",
ExportedPartition: "part1",
},
}
sorted := func(ixns ...*structs.Intention) structs.Intentions {
sort.SliceStable(ixns, func(i, j int) bool {
return ixns[j].Precedence < ixns[i].Precedence
@ -485,6 +564,14 @@ func TestMakeRBACNetworkAndHTTPFilters(t *testing.T) {
testSourceIntention("*", structs.IntentionActionDeny),
),
},
"default-deny-peered-kitchen-sink": {
intentionDefaultAllow: false,
intentions: sorted(
testSourceIntention("web", structs.IntentionActionAllow),
testIntentionPeered("*", "peer1", structs.IntentionActionAllow),
testIntentionPeered("web", "peer1", structs.IntentionActionDeny),
),
},
// ========================
"default-allow-path-allow": {
intentionDefaultAllow: true,
@ -710,7 +797,7 @@ func TestMakeRBACNetworkAndHTTPFilters(t *testing.T) {
tt := tt
t.Run(name, func(t *testing.T) {
t.Run("network filter", func(t *testing.T) {
filter, err := makeRBACNetworkFilter(tt.intentions, tt.intentionDefaultAllow)
filter, err := makeRBACNetworkFilter(tt.intentions, tt.intentionDefaultAllow, testPeerTrustBundle)
require.NoError(t, err)
t.Run("current", func(t *testing.T) {
@ -720,7 +807,7 @@ func TestMakeRBACNetworkAndHTTPFilters(t *testing.T) {
})
})
t.Run("http filter", func(t *testing.T) {
filter, err := makeRBACHTTPFilter(tt.intentions, tt.intentionDefaultAllow)
filter, err := makeRBACHTTPFilter(tt.intentions, tt.intentionDefaultAllow, testPeerTrustBundle)
require.NoError(t, err)
t.Run("current", func(t *testing.T) {
@ -743,6 +830,16 @@ func TestRemoveSameSourceIntentions(t *testing.T) {
ixn.UpdatePrecedence()
return ixn
}
testIntentionPeered := func(t *testing.T, src, dst, peer string) *structs.Intention {
t.Helper()
ixn := structs.TestIntention(t)
ixn.SourceName = src
ixn.SourcePeer = peer
ixn.DestinationName = dst
//nolint:staticcheck
ixn.UpdatePrecedence()
return ixn
}
sorted := func(ixns ...*structs.Intention) structs.Intentions {
sort.SliceStable(ixns, func(i, j int) bool {
return ixns[j].Precedence < ixns[i].Precedence
@ -790,6 +887,20 @@ func TestRemoveSameSourceIntentions(t *testing.T) {
testIntention(t, "*", "foo"),
),
},
"kitchen sink with peers": {
in: sorted(
testIntention(t, "bar", "foo"),
testIntentionPeered(t, "bar", "foo", "peer1"),
testIntentionPeered(t, "bar", "*", "peer1"),
testIntentionPeered(t, "*", "foo", "peer1"),
testIntentionPeered(t, "*", "*", "peer1"),
),
expect: sorted(
testIntention(t, "bar", "foo"),
testIntentionPeered(t, "bar", "foo", "peer1"),
testIntentionPeered(t, "*", "foo", "peer1"),
),
},
}
for name, tc := range tests {
@ -836,36 +947,48 @@ func TestSimplifyNotSourceSlice(t *testing.T) {
func TestIxnSourceMatches(t *testing.T) {
tests := []struct {
tester, against string
matches bool
tester string
testerPeer string
against string
againstPeer string
matches bool
}{
// identical precedence
{"web", "api", false},
{"*", "*", false},
{"web", "", "api", "", false},
{"*", "", "*", "", false},
// backwards precedence
{"*", "web", false},
{"*", "", "web", "", false},
// name wildcards
{"web", "*", true},
{"web", "", "*", "", true},
// peered cmp peered
{"web", "peer1", "api", "peer1", false},
{"*", "peer1", "*", "peer1", false},
// no match if peer is different
{"web", "peer1", "web", "", false},
{"*", "peer1", "*", "peer2", false},
// name wildcards with peer
{"web", "peer1", "*", "peer1", true},
}
for _, tc := range tests {
t.Run(fmt.Sprintf("%s cmp %s", tc.tester, tc.against), func(t *testing.T) {
t.Run(fmt.Sprintf("%s%s cmp %s%s", tc.testerPeer, tc.tester, tc.againstPeer, tc.against), func(t *testing.T) {
matches := ixnSourceMatches(
structs.ServiceNameFromString(tc.tester),
structs.ServiceNameFromString(tc.against),
rbacService{ServiceName: structs.ServiceNameFromString(tc.tester), Peer: tc.testerPeer},
rbacService{ServiceName: structs.ServiceNameFromString(tc.against), Peer: tc.againstPeer},
)
assert.Equal(t, tc.matches, matches)
})
}
}
func makeServiceNameSlice(slice []string) []structs.ServiceName {
func makeServiceNameSlice(slice []string) []rbacService {
if len(slice) == 0 {
return nil
}
var out []structs.ServiceName
var out []rbacService
for _, src := range slice {
out = append(out, structs.ServiceNameFromString(src))
out = append(out, rbacService{ServiceName: structs.ServiceNameFromString(src)})
}
return out
}

View file

@ -30,19 +30,34 @@
"@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster",
"name": "payments.default.default.cloud.external.1c053652-8512-4373-90cf-5a7f6263a994.consul",
"altStatName": "payments.default.default.cloud.external.1c053652-8512-4373-90cf-5a7f6263a994.consul",
"type": "EDS",
"edsClusterConfig": {
"edsConfig": {
"ads": {
},
"resourceApiVersion": "V3"
}
},
"type": "LOGICAL_DNS",
"connectTimeout": "5s",
"loadAssignment": {
"clusterName": "payments.default.default.cloud.external.1c053652-8512-4373-90cf-5a7f6263a994.consul",
"endpoints": [
{
"lbEndpoints": [
{
"endpoint": {
"address": {
"socketAddress": {
"address": "123.us-east-1.elb.notaws.com",
"portValue": 8443
}
}
},
"healthStatus": "HEALTHY",
"loadBalancingWeight": 1
}
]
}
]
},
"circuitBreakers": {
},
"dnsRefreshRate": "10s",
"dnsLookupFamily": "V4_ONLY",
"outlierDetection": {
},

View file

@ -0,0 +1,63 @@
{
"name": "envoy.filters.http.rbac",
"typedConfig": {
"@type": "type.googleapis.com/envoy.extensions.filters.http.rbac.v3.RBAC",
"rules": {
"policies": {
"consul-intentions-layer4": {
"permissions": [
{
"any": true
}
],
"principals": [
{
"authenticated": {
"principalName": {
"safeRegex": {
"googleRe2": {
},
"regex": "^spiffe://[^/]+/ns/default/dc/[^/]+/svc/web$"
}
}
}
},
{
"andIds": {
"ids": [
{
"authenticated": {
"principalName": {
"safeRegex": {
"googleRe2": {
},
"regex": "^spiffe://peer1.domain/ap/part1/ns/default/dc/[^/]+/svc/[^/]+$"
}
}
}
},
{
"notId": {
"authenticated": {
"principalName": {
"safeRegex": {
"googleRe2": {
},
"regex": "^spiffe://peer1.domain/ap/part1/ns/default/dc/[^/]+/svc/web$"
}
}
}
}
}
]
}
}
]
}
}
}
}
}

View file

@ -0,0 +1,64 @@
{
"name": "envoy.filters.network.rbac",
"typedConfig": {
"@type": "type.googleapis.com/envoy.extensions.filters.network.rbac.v3.RBAC",
"rules": {
"policies": {
"consul-intentions-layer4": {
"permissions": [
{
"any": true
}
],
"principals": [
{
"authenticated": {
"principalName": {
"safeRegex": {
"googleRe2": {
},
"regex": "^spiffe://[^/]+/ns/default/dc/[^/]+/svc/web$"
}
}
}
},
{
"andIds": {
"ids": [
{
"authenticated": {
"principalName": {
"safeRegex": {
"googleRe2": {
},
"regex": "^spiffe://peer1.domain/ap/part1/ns/default/dc/[^/]+/svc/[^/]+$"
}
}
}
},
{
"notId": {
"authenticated": {
"principalName": {
"safeRegex": {
"googleRe2": {
},
"regex": "^spiffe://peer1.domain/ap/part1/ns/default/dc/[^/]+/svc/web$"
}
}
}
}
}
]
}
}
]
}
}
},
"statPrefix": "connect_authz"
}
}

View file

@ -3,6 +3,7 @@ package api
import (
"context"
"fmt"
"time"
)
// PeeringState enumerates all the states a peering can be in
@ -36,6 +37,8 @@ type Peering struct {
Name string
// Partition is the local partition connecting to the peer.
Partition string `json:",omitempty"`
// DeletedAt is the time when the Peering was marked for deletion
DeletedAt *time.Time `json:",omitempty" alias:"deleted_at"`
// Meta is a mapping of some string value to any other string value
Meta map[string]string `json:",omitempty"`
// State is one of the valid PeeringState values to represent the status of
@ -77,7 +80,7 @@ type PeeringGenerateTokenResponse struct {
PeeringToken string
}
type PeeringInitiateRequest struct {
type PeeringEstablishRequest struct {
// Name of the remote peer.
PeerName string
// The peering token returned from the peer's GenerateToken endpoint.
@ -88,7 +91,7 @@ type PeeringInitiateRequest struct {
Meta map[string]string `json:",omitempty"`
}
type PeeringInitiateResponse struct {
type PeeringEstablishResponse struct {
}
type PeeringListRequest struct {
@ -192,8 +195,8 @@ func (p *Peerings) GenerateToken(ctx context.Context, g PeeringGenerateTokenRequ
}
// TODO(peering): verify this is the ultimate signature we want
func (p *Peerings) Initiate(ctx context.Context, i PeeringInitiateRequest, wq *WriteOptions) (*PeeringInitiateResponse, *WriteMeta, error) {
req := p.c.newRequest("POST", fmt.Sprint("/v1/peering/initiate"))
func (p *Peerings) Establish(ctx context.Context, i PeeringEstablishRequest, wq *WriteOptions) (*PeeringEstablishResponse, *WriteMeta, error) {
req := p.c.newRequest("POST", fmt.Sprint("/v1/peering/establish"))
req.setWriteOptions(wq)
req.ctx = ctx
req.obj = i
@ -209,7 +212,7 @@ func (p *Peerings) Initiate(ctx context.Context, i PeeringInitiateRequest, wq *W
wm := &WriteMeta{RequestTime: rtt}
var out PeeringInitiateResponse
var out PeeringEstablishResponse
if err := decodeBody(resp, &out); err != nil {
return nil, nil, err
}

View file

@ -134,10 +134,10 @@ func TestAPI_Peering_GenerateToken(t *testing.T) {
// TODO(peering): cover the following test cases: bad/ malformed input, peering with wrong token,
// peering with the wrong PeerName
// TestAPI_Peering_GenerateToken_Read_Initiate_Delete tests the following use case:
// a server creates a peering token, reads the token, then another server calls initiate peering
// TestAPI_Peering_GenerateToken_Read_Establish_Delete tests the following use case:
// a server creates a peering token, reads the token, then another server calls establish peering
// finally, we delete the token on the first server
func TestAPI_Peering_GenerateToken_Read_Initiate_Delete(t *testing.T) {
func TestAPI_Peering_GenerateToken_Read_Establish_Delete(t *testing.T) {
t.Parallel()
c, s := makeClientWithCA(t)
@ -181,15 +181,15 @@ func TestAPI_Peering_GenerateToken_Read_Initiate_Delete(t *testing.T) {
})
defer s2.Stop()
testutil.RunStep(t, "initiate peering", func(t *testing.T) {
i := PeeringInitiateRequest{
testutil.RunStep(t, "establish peering", func(t *testing.T) {
i := PeeringEstablishRequest{
Datacenter: c2.config.Datacenter,
PeerName: "peer1",
PeeringToken: token1,
Meta: map[string]string{"foo": "bar"},
}
_, wm, err := c2.Peerings().Initiate(ctx, i, nil)
_, wm, err := c2.Peerings().Establish(ctx, i, nil)
require.NoError(t, err)
require.NotNil(t, wm)
@ -212,10 +212,12 @@ func TestAPI_Peering_GenerateToken_Read_Initiate_Delete(t *testing.T) {
require.NoError(t, err)
require.NotNil(t, wm)
// Read to see if the token is "gone"
resp, qm, err := c.Peerings().Read(ctx, "peer1", nil)
require.NoError(t, err)
require.NotNil(t, qm)
require.Nil(t, resp)
// Read to see if the token is gone
retry.Run(t, func(r *retry.R) {
resp, qm, err := c.Peerings().Read(ctx, "peer1", nil)
require.NoError(r, err)
require.NotNil(r, qm)
require.Nil(r, resp)
})
})
}

View file

@ -274,6 +274,42 @@ function git_branch {
return ${ret}
}
function git_date {
# Arguments:
# $1 - Path to the git repo (optional - assumes pwd is git repo otherwise)
#
# Returns:
# 0 - success
# * - failure
#
# Notes:
# Echos the date of the last git commit in
local gdir="$(pwd)"
if test -d "$1"
then
gdir="$1"
fi
pushd "${gdir}" > /dev/null
local ret=0
# it's tricky to do an RFC3339 format in a cross platform way, so we hardcode UTC
local date_format="%Y-%m-%dT%H:%M:%SZ"
# we're using this for build date because it's stable across platform builds
local date="$(TZ=UTC0 git show -s --format=%cd --date=format-local:"$date_format" HEAD)" || ret=1
##local head="$(git status -b --porcelain=v2 | awk '{if ($1 == "#" && $2 =="branch.head") { print $3 }}')" || ret=1
popd > /dev/null
test ${ret} -eq 0 && echo "$date"
return ${ret}
}
function is_git_clean {
# Arguments:
# $1 - Path to git repo
@ -325,7 +361,8 @@ function update_git_env {
export GIT_COMMIT=$(git rev-parse --short HEAD)
export GIT_DIRTY=$(test -n "$(git status --porcelain)" && echo "+CHANGES")
export GIT_IMPORT=github.com/hashicorp/consul/version
export GOLDFLAGS="-X ${GIT_IMPORT}.GitCommit=${GIT_COMMIT}${GIT_DIRTY}"
export GIT_DATE=$(git_date "$1")
export GOLDFLAGS="-X ${GIT_IMPORT}.GitCommit=${GIT_COMMIT}${GIT_DIRTY} -X ${T}.BuildDate=${GIT_DATE}"
return 0
}

View file

@ -0,0 +1,72 @@
#!/bin/bash
readonly SCRIPT_NAME="$(basename ${BASH_SOURCE[0]})"
readonly SCRIPT_DIR="$(dirname ${BASH_SOURCE[0]})"
readonly SOURCE_DIR="$(dirname "$(dirname "${SCRIPT_DIR}")")"
readonly FN_DIR="$(dirname "${SCRIPT_DIR}")/functions"
source "${SCRIPT_DIR}/functions.sh"
function usage {
cat <<-EOF
Usage: ${SCRIPT_NAME} [<options ...>]
Description:
This script uses the date of the last checkin on the branch as the build date. This
is to make the date consistent across the various platforms we build on, even if they
start at different times. In practice this is the commit where the version string is set.
Options:
-s | --source DIR Path to source to build.
Defaults to "${SOURCE_DIR}"
EOF
}
function err_usage {
err "$1"
err ""
err "$(usage)"
}
function main {
declare sdir="${SOURCE_DIR}"
declare -i date=0
while test $# -gt 0
do
case "$1" in
-h | --help )
usage
return 0
;;
-s | --source )
if test -z "$2"
then
err_usage "ERROR: option -s/--source requires an argument"
return 1
fi
if ! test -d "$2"
then
err_usage "ERROR: '$2' is not a directory and not suitable for the value of -s/--source"
return 1
fi
sdir="$2"
shift 2
;;
*)
err_usage "ERROR: Unknown argument: '$1'"
return 1
;;
esac
done
git_date "${sdir}" || return 1
return 0
}
main "$@"
exit $?

View file

@ -27,12 +27,19 @@ import (
)
func New(ui cli.Ui) *cmd {
buildDate, err := time.Parse(time.RFC3339, consulversion.BuildDate)
if err != nil {
ui.Error(fmt.Sprintf("Fatal error with internal time set; check makefile for build date %v %v \n", buildDate, err))
return nil
}
c := &cmd{
ui: ui,
revision: consulversion.GitCommit,
version: consulversion.Version,
versionPrerelease: consulversion.VersionPrerelease,
versionHuman: consulversion.GetHumanVersion(),
buildDate: buildDate,
flags: flag.NewFlagSet("", flag.ContinueOnError),
}
config.AddFlags(c.flags, &c.configLoadOpts)
@ -53,6 +60,7 @@ type cmd struct {
version string
versionPrerelease string
versionHuman string
buildDate time.Time
configLoadOpts config.LoadOpts
logger hclog.InterceptLogger
}
@ -194,6 +202,10 @@ func (c *cmd) run(args []string) int {
segment = "<all>"
}
ui.Info(fmt.Sprintf(" Version: '%s'", c.versionHuman))
if strings.Contains(c.versionHuman, "dev") {
ui.Info(fmt.Sprintf(" Revision: '%s'", c.revision))
}
ui.Info(fmt.Sprintf(" Build Date: '%s'", c.buildDate))
ui.Info(fmt.Sprintf(" Node ID: '%s'", config.NodeID))
ui.Info(fmt.Sprintf(" Node name: '%s'", config.NodeName))
if ap := config.PartitionOrEmpty(); ap != "" {

View file

@ -4,6 +4,7 @@ import (
"bytes"
"encoding/json"
"fmt"
"time"
)
const (
@ -43,6 +44,8 @@ func (_ *prettyFormatter) Format(info *VersionInfo) (string, error) {
buffer.WriteString(fmt.Sprintf("Revision %s\n", info.Revision))
}
buffer.WriteString(fmt.Sprintf("Build Date %s\n", info.BuildDate.Format(time.RFC3339)))
var supplement string
if info.RPC.Default < info.RPC.Max {
supplement = fmt.Sprintf(" (agent will automatically use protocol >%d when speaking to compatible agents)",

View file

@ -6,6 +6,7 @@ import (
"io/ioutil"
"path/filepath"
"testing"
"time"
"github.com/stretchr/testify/require"
)
@ -31,11 +32,13 @@ func golden(t *testing.T, name, got string) string {
}
func TestFormat(t *testing.T) {
buildDate, _ := time.Parse(time.RFC3339, "2022-06-01T13:18:45Z")
info := VersionInfo{
HumanVersion: "1.99.3-beta1",
Version: "1.99.3",
Prerelease: "beta1",
Revision: "5e5dbedd47a5f875b60e241c5555a9caab595246",
BuildDate: buildDate,
RPC: RPCVersionInfo{
Default: 2,
Min: 1,

View file

@ -2,6 +2,7 @@
"Version": "1.99.3",
"Revision": "5e5dbedd47a5f875b60e241c5555a9caab595246",
"Prerelease": "beta1",
"BuildDate": "2022-06-01T13:18:45Z",
"RPC": {
"Default": 2,
"Min": 1,

View file

@ -1,3 +1,4 @@
Consul v1.99.3-beta1
Revision 5e5dbedd47a5f875b60e241c5555a9caab595246
Build Date 2022-06-01T13:18:45Z
Protocol 2 spoken by default, understands 1 to 3 (agent will automatically use protocol >2 when speaking to compatible agents)

View file

@ -4,6 +4,7 @@ import (
"flag"
"fmt"
"strings"
"time"
"github.com/hashicorp/consul/agent/consul"
"github.com/hashicorp/consul/command/flags"
@ -46,6 +47,7 @@ type VersionInfo struct {
Version string
Revision string
Prerelease string
BuildDate time.Time
RPC RPCVersionInfo
}
@ -59,11 +61,20 @@ func (c *cmd) Run(args []string) int {
c.UI.Error(err.Error())
return 1
}
// We parse this here because consul version is used in our 'smoke' tests and we want to fail early
buildDate, err := time.Parse(time.RFC3339, version.BuildDate)
if err != nil {
c.UI.Error(err.Error())
return 1
}
out, err := formatter.Format(&VersionInfo{
HumanVersion: version.GetHumanVersion(),
Version: version.Version,
Revision: version.GitCommit,
Prerelease: version.VersionPrerelease,
BuildDate: buildDate,
RPC: RPCVersionInfo{
Default: consul.DefaultRPCProtocol,
Min: int(consul.ProtocolVersionMin),

View file

@ -4,6 +4,36 @@ package pbpeering
import "github.com/hashicorp/consul/api"
func EstablishRequestToAPI(s *EstablishRequest, t *api.PeeringEstablishRequest) {
if s == nil {
return
}
t.PeerName = s.PeerName
t.PeeringToken = s.PeeringToken
t.Datacenter = s.Datacenter
t.Token = s.Token
t.Meta = s.Meta
}
func EstablishRequestFromAPI(t *api.PeeringEstablishRequest, s *EstablishRequest) {
if s == nil {
return
}
s.PeerName = t.PeerName
s.PeeringToken = t.PeeringToken
s.Datacenter = t.Datacenter
s.Token = t.Token
s.Meta = t.Meta
}
func EstablishResponseToAPI(s *EstablishResponse, t *api.PeeringEstablishResponse) {
if s == nil {
return
}
}
func EstablishResponseFromAPI(t *api.PeeringEstablishResponse, s *EstablishResponse) {
if s == nil {
return
}
}
func GenerateTokenRequestToAPI(s *GenerateTokenRequest, t *api.PeeringGenerateTokenRequest) {
if s == nil {
return
@ -36,36 +66,6 @@ func GenerateTokenResponseFromAPI(t *api.PeeringGenerateTokenResponse, s *Genera
}
s.PeeringToken = t.PeeringToken
}
func InitiateRequestToAPI(s *InitiateRequest, t *api.PeeringInitiateRequest) {
if s == nil {
return
}
t.PeerName = s.PeerName
t.PeeringToken = s.PeeringToken
t.Datacenter = s.Datacenter
t.Token = s.Token
t.Meta = s.Meta
}
func InitiateRequestFromAPI(t *api.PeeringInitiateRequest, s *InitiateRequest) {
if s == nil {
return
}
s.PeerName = t.PeerName
s.PeeringToken = t.PeeringToken
s.Datacenter = t.Datacenter
s.Token = t.Token
s.Meta = t.Meta
}
func InitiateResponseToAPI(s *InitiateResponse, t *api.PeeringInitiateResponse) {
if s == nil {
return
}
}
func InitiateResponseFromAPI(t *api.PeeringInitiateResponse, s *InitiateResponse) {
if s == nil {
return
}
}
func PeeringToAPI(s *Peering, t *api.Peering) {
if s == nil {
return
@ -73,6 +73,7 @@ func PeeringToAPI(s *Peering, t *api.Peering) {
t.ID = s.ID
t.Name = s.Name
t.Partition = s.Partition
t.DeletedAt = TimePtrFromProto(s.DeletedAt)
t.Meta = s.Meta
t.State = PeeringStateToAPI(s.State)
t.PeerID = s.PeerID
@ -89,6 +90,7 @@ func PeeringFromAPI(t *api.Peering, s *Peering) {
s.ID = t.ID
s.Name = t.Name
s.Partition = t.Partition
s.DeletedAt = TimePtrToProto(t.DeletedAt)
s.Meta = t.Meta
s.State = PeeringStateFromAPI(t.State)
s.PeerID = t.PeerID

View file

@ -4,9 +4,11 @@ import (
"strconv"
"time"
"github.com/golang/protobuf/ptypes/timestamp"
"github.com/mitchellh/hashstructure"
"github.com/hashicorp/consul/agent/cache"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/lib"
)
@ -52,32 +54,32 @@ func (msg *GenerateTokenRequest) Timeout(rpcHoldTimeout time.Duration, maxQueryT
}
// IsRead implements structs.RPCInfo
func (req *InitiateRequest) IsRead() bool {
func (req *EstablishRequest) IsRead() bool {
return false
}
// AllowStaleRead implements structs.RPCInfo
func (req *InitiateRequest) AllowStaleRead() bool {
func (req *EstablishRequest) AllowStaleRead() bool {
return false
}
// TokenSecret implements structs.RPCInfo
func (req *InitiateRequest) TokenSecret() string {
func (req *EstablishRequest) TokenSecret() string {
return req.Token
}
// SetTokenSecret implements structs.RPCInfo
func (req *InitiateRequest) SetTokenSecret(token string) {
func (req *EstablishRequest) SetTokenSecret(token string) {
req.Token = token
}
// HasTimedOut implements structs.RPCInfo
func (req *InitiateRequest) HasTimedOut(start time.Time, rpcHoldTimeout, _, _ time.Duration) (bool, error) {
func (req *EstablishRequest) HasTimedOut(start time.Time, rpcHoldTimeout, _, _ time.Duration) (bool, error) {
return time.Since(start) > rpcHoldTimeout, nil
}
// Timeout implements structs.RPCInfo
func (msg *InitiateRequest) Timeout(rpcHoldTimeout time.Duration, maxQueryTime time.Duration, defaultQueryTime time.Duration) time.Duration {
func (msg *EstablishRequest) Timeout(rpcHoldTimeout time.Duration, maxQueryTime time.Duration, defaultQueryTime time.Duration) time.Duration {
return rpcHoldTimeout
}
@ -86,7 +88,7 @@ func (msg *InitiateRequest) Timeout(rpcHoldTimeout time.Duration, maxQueryTime t
// If we generated a token for this peer we did not store our server addresses under PeerServerAddresses.
// These server addresses are for dialing, and only the peer initiating the peering will do the dialing.
func (p *Peering) ShouldDial() bool {
return len(p.PeerServerAddresses) > 0 && p.State != PeeringState_TERMINATED
return len(p.PeerServerAddresses) > 0
}
func (x ReplicationMessage_Response_Operation) GoString() string {
@ -175,6 +177,18 @@ func PeeringStateFromAPI(t api.PeeringState) PeeringState {
}
}
func (p *Peering) IsActive() bool {
if p != nil && p.State == PeeringState_TERMINATED {
return false
}
if p == nil || p.DeletedAt == nil {
return true
}
// The minimum protobuf timestamp is the Unix epoch rather than go's zero.
return structs.IsZeroProtoTime(p.DeletedAt)
}
func (p *Peering) ToAPI() *api.Peering {
var t api.Peering
PeeringToAPI(p, &t)
@ -198,9 +212,9 @@ func (resp *GenerateTokenResponse) ToAPI() *api.PeeringGenerateTokenResponse {
}
// TODO consider using mog for this
func (resp *InitiateResponse) ToAPI() *api.PeeringInitiateResponse {
var t api.PeeringInitiateResponse
InitiateResponseToAPI(resp, &t)
func (resp *EstablishResponse) ToAPI() *api.PeeringEstablishResponse {
var t api.PeeringEstablishResponse
EstablishResponseToAPI(resp, &t)
return &t
}
@ -215,12 +229,12 @@ func NewGenerateTokenRequestFromAPI(req *api.PeeringGenerateTokenRequest) *Gener
}
// convenience
func NewInitiateRequestFromAPI(req *api.PeeringInitiateRequest) *InitiateRequest {
func NewEstablishRequestFromAPI(req *api.PeeringEstablishRequest) *EstablishRequest {
if req == nil {
return nil
}
t := &InitiateRequest{}
InitiateRequestFromAPI(req, t)
t := &EstablishRequest{}
EstablishRequestFromAPI(req, t)
return t
}
@ -252,3 +266,18 @@ func (r *TrustBundleListByServiceRequest) CacheInfo() cache.RequestInfo {
return info
}
func TimePtrFromProto(s *timestamp.Timestamp) *time.Time {
if s == nil {
return nil
}
t := structs.TimeFromProto(s)
return &t
}
func TimePtrToProto(s *time.Time) *timestamp.Timestamp {
if s == nil {
return nil
}
return structs.TimeToProto(*s)
}

View file

@ -228,22 +228,22 @@ func (msg *GenerateTokenResponse) UnmarshalBinary(b []byte) error {
}
// MarshalBinary implements encoding.BinaryMarshaler
func (msg *InitiateRequest) MarshalBinary() ([]byte, error) {
func (msg *EstablishRequest) MarshalBinary() ([]byte, error) {
return proto.Marshal(msg)
}
// UnmarshalBinary implements encoding.BinaryUnmarshaler
func (msg *InitiateRequest) UnmarshalBinary(b []byte) error {
func (msg *EstablishRequest) UnmarshalBinary(b []byte) error {
return proto.Unmarshal(b, msg)
}
// MarshalBinary implements encoding.BinaryMarshaler
func (msg *InitiateResponse) MarshalBinary() ([]byte, error) {
func (msg *EstablishResponse) MarshalBinary() ([]byte, error) {
return proto.Marshal(msg)
}
// UnmarshalBinary implements encoding.BinaryUnmarshaler
func (msg *InitiateResponse) UnmarshalBinary(b []byte) error {
func (msg *EstablishResponse) UnmarshalBinary(b []byte) error {
return proto.Unmarshal(b, msg)
}

View file

@ -11,6 +11,7 @@ import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
anypb "google.golang.org/protobuf/types/known/anypb"
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
reflect "reflect"
sync "sync"
)
@ -159,26 +160,30 @@ type Peering struct {
Name string `protobuf:"bytes,2,opt,name=Name,proto3" json:"Name,omitempty"`
// Partition is the local partition connecting to the peer.
Partition string `protobuf:"bytes,3,opt,name=Partition,proto3" json:"Partition,omitempty"`
// DeletedAt is the time when the Peering was marked for deletion
// This is nullable so that we can omit if empty when encoding in JSON
// mog: func-to=TimePtrFromProto func-from=TimePtrToProto
DeletedAt *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=DeletedAt,proto3" json:"DeletedAt,omitempty"`
// Meta is a mapping of some string value to any other string value
Meta map[string]string `protobuf:"bytes,11,rep,name=Meta,proto3" json:"Meta,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
Meta map[string]string `protobuf:"bytes,5,rep,name=Meta,proto3" json:"Meta,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// State is one of the valid PeeringState values to represent the status of
// peering relationship.
//
// mog: func-to=PeeringStateToAPI func-from=PeeringStateFromAPI
State PeeringState `protobuf:"varint,4,opt,name=State,proto3,enum=peering.PeeringState" json:"State,omitempty"`
State PeeringState `protobuf:"varint,6,opt,name=State,proto3,enum=peering.PeeringState" json:"State,omitempty"`
// PeerID is the ID that our peer assigned to this peering.
// This ID is to be used when dialing the peer, so that it can know who dialed it.
PeerID string `protobuf:"bytes,5,opt,name=PeerID,proto3" json:"PeerID,omitempty"`
PeerID string `protobuf:"bytes,7,opt,name=PeerID,proto3" json:"PeerID,omitempty"`
// PeerCAPems contains all the CA certificates for the remote peer.
PeerCAPems []string `protobuf:"bytes,6,rep,name=PeerCAPems,proto3" json:"PeerCAPems,omitempty"`
PeerCAPems []string `protobuf:"bytes,8,rep,name=PeerCAPems,proto3" json:"PeerCAPems,omitempty"`
// PeerServerName is the name of the remote server as it relates to TLS.
PeerServerName string `protobuf:"bytes,7,opt,name=PeerServerName,proto3" json:"PeerServerName,omitempty"`
PeerServerName string `protobuf:"bytes,9,opt,name=PeerServerName,proto3" json:"PeerServerName,omitempty"`
// PeerServerAddresses contains all the the connection addresses for the remote peer.
PeerServerAddresses []string `protobuf:"bytes,8,rep,name=PeerServerAddresses,proto3" json:"PeerServerAddresses,omitempty"`
PeerServerAddresses []string `protobuf:"bytes,10,rep,name=PeerServerAddresses,proto3" json:"PeerServerAddresses,omitempty"`
// CreateIndex is the Raft index at which the Peering was created.
CreateIndex uint64 `protobuf:"varint,9,opt,name=CreateIndex,proto3" json:"CreateIndex,omitempty"`
CreateIndex uint64 `protobuf:"varint,11,opt,name=CreateIndex,proto3" json:"CreateIndex,omitempty"`
// ModifyIndex is the latest Raft index at which the Peering. was modified.
ModifyIndex uint64 `protobuf:"varint,10,opt,name=ModifyIndex,proto3" json:"ModifyIndex,omitempty"`
ModifyIndex uint64 `protobuf:"varint,12,opt,name=ModifyIndex,proto3" json:"ModifyIndex,omitempty"`
}
func (x *Peering) Reset() {
@ -234,6 +239,13 @@ func (x *Peering) GetPartition() string {
return ""
}
func (x *Peering) GetDeletedAt() *timestamppb.Timestamp {
if x != nil {
return x.DeletedAt
}
return nil
}
func (x *Peering) GetMeta() map[string]string {
if x != nil {
return x.Meta
@ -300,14 +312,17 @@ type PeeringTrustBundle struct {
TrustDomain string `protobuf:"bytes,1,opt,name=TrustDomain,proto3" json:"TrustDomain,omitempty"`
// PeerName associates the trust bundle with a peer.
PeerName string `protobuf:"bytes,2,opt,name=PeerName,proto3" json:"PeerName,omitempty"`
// Partition isolates the bundle from other trust bundles in separate partitions.
// Partition isolates the bundle from other trust bundles in separate local partitions.
Partition string `protobuf:"bytes,3,opt,name=Partition,proto3" json:"Partition,omitempty"`
// RootPEMs holds ASN.1 DER encoded X.509 certificate data for the trust bundle.
RootPEMs []string `protobuf:"bytes,4,rep,name=RootPEMs,proto3" json:"RootPEMs,omitempty"`
// ExportedPartition references the remote partition of the peer
// which sent this trust bundle. Used for generating SpiffeIDs.
ExportedPartition string `protobuf:"bytes,5,opt,name=ExportedPartition,proto3" json:"ExportedPartition,omitempty"`
// CreateIndex is the Raft index at which the trust domain was created.
CreateIndex uint64 `protobuf:"varint,5,opt,name=CreateIndex,proto3" json:"CreateIndex,omitempty"`
CreateIndex uint64 `protobuf:"varint,6,opt,name=CreateIndex,proto3" json:"CreateIndex,omitempty"`
// ModifyIndex is the latest Raft index at which the trust bundle was modified.
ModifyIndex uint64 `protobuf:"varint,6,opt,name=ModifyIndex,proto3" json:"ModifyIndex,omitempty"`
ModifyIndex uint64 `protobuf:"varint,7,opt,name=ModifyIndex,proto3" json:"ModifyIndex,omitempty"`
}
func (x *PeeringTrustBundle) Reset() {
@ -370,6 +385,13 @@ func (x *PeeringTrustBundle) GetRootPEMs() []string {
return nil
}
func (x *PeeringTrustBundle) GetExportedPartition() string {
if x != nil {
return x.ExportedPartition
}
return ""
}
func (x *PeeringTrustBundle) GetCreateIndex() uint64 {
if x != nil {
return x.CreateIndex
@ -1486,10 +1508,10 @@ func (x *GenerateTokenResponse) GetPeeringToken() string {
//
// mog annotation:
//
// target=github.com/hashicorp/consul/api.PeeringInitiateRequest
// target=github.com/hashicorp/consul/api.PeeringEstablishRequest
// output=peering.gen.go
// name=API
type InitiateRequest struct {
type EstablishRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
@ -1508,8 +1530,8 @@ type InitiateRequest struct {
Meta map[string]string `protobuf:"bytes,6,rep,name=Meta,proto3" json:"Meta,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
}
func (x *InitiateRequest) Reset() {
*x = InitiateRequest{}
func (x *EstablishRequest) Reset() {
*x = EstablishRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_proto_pbpeering_peering_proto_msgTypes[22]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
@ -1517,13 +1539,13 @@ func (x *InitiateRequest) Reset() {
}
}
func (x *InitiateRequest) String() string {
func (x *EstablishRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*InitiateRequest) ProtoMessage() {}
func (*EstablishRequest) ProtoMessage() {}
func (x *InitiateRequest) ProtoReflect() protoreflect.Message {
func (x *EstablishRequest) ProtoReflect() protoreflect.Message {
mi := &file_proto_pbpeering_peering_proto_msgTypes[22]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
@ -1535,47 +1557,47 @@ func (x *InitiateRequest) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
// Deprecated: Use InitiateRequest.ProtoReflect.Descriptor instead.
func (*InitiateRequest) Descriptor() ([]byte, []int) {
// Deprecated: Use EstablishRequest.ProtoReflect.Descriptor instead.
func (*EstablishRequest) Descriptor() ([]byte, []int) {
return file_proto_pbpeering_peering_proto_rawDescGZIP(), []int{22}
}
func (x *InitiateRequest) GetPeerName() string {
func (x *EstablishRequest) GetPeerName() string {
if x != nil {
return x.PeerName
}
return ""
}
func (x *InitiateRequest) GetPeeringToken() string {
func (x *EstablishRequest) GetPeeringToken() string {
if x != nil {
return x.PeeringToken
}
return ""
}
func (x *InitiateRequest) GetPartition() string {
func (x *EstablishRequest) GetPartition() string {
if x != nil {
return x.Partition
}
return ""
}
func (x *InitiateRequest) GetDatacenter() string {
func (x *EstablishRequest) GetDatacenter() string {
if x != nil {
return x.Datacenter
}
return ""
}
func (x *InitiateRequest) GetToken() string {
func (x *EstablishRequest) GetToken() string {
if x != nil {
return x.Token
}
return ""
}
func (x *InitiateRequest) GetMeta() map[string]string {
func (x *EstablishRequest) GetMeta() map[string]string {
if x != nil {
return x.Meta
}
@ -1585,17 +1607,17 @@ func (x *InitiateRequest) GetMeta() map[string]string {
//
// mog annotation:
//
// target=github.com/hashicorp/consul/api.PeeringInitiateResponse
// target=github.com/hashicorp/consul/api.PeeringEstablishResponse
// output=peering.gen.go
// name=API
type InitiateResponse struct {
type EstablishResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
}
func (x *InitiateResponse) Reset() {
*x = InitiateResponse{}
func (x *EstablishResponse) Reset() {
*x = EstablishResponse{}
if protoimpl.UnsafeEnabled {
mi := &file_proto_pbpeering_peering_proto_msgTypes[23]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
@ -1603,13 +1625,13 @@ func (x *InitiateResponse) Reset() {
}
}
func (x *InitiateResponse) String() string {
func (x *EstablishResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*InitiateResponse) ProtoMessage() {}
func (*EstablishResponse) ProtoMessage() {}
func (x *InitiateResponse) ProtoReflect() protoreflect.Message {
func (x *EstablishResponse) ProtoReflect() protoreflect.Message {
mi := &file_proto_pbpeering_peering_proto_msgTypes[23]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
@ -1621,8 +1643,8 @@ func (x *InitiateResponse) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
// Deprecated: Use InitiateResponse.ProtoReflect.Descriptor instead.
func (*InitiateResponse) Descriptor() ([]byte, []int) {
// Deprecated: Use EstablishResponse.ProtoReflect.Descriptor instead.
func (*EstablishResponse) Descriptor() ([]byte, []int) {
return file_proto_pbpeering_peering_proto_rawDescGZIP(), []int{23}
}
@ -1981,295 +2003,303 @@ var File_proto_pbpeering_peering_proto protoreflect.FileDescriptor
var file_proto_pbpeering_peering_proto_rawDesc = []byte{
0x0a, 0x1d, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e,
0x67, 0x2f, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12,
0x07, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x73, 0x74, 0x61,
0x74, 0x75, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x22, 0xb7, 0x03, 0x0a, 0x07, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x0e, 0x0a, 0x02,
0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x49, 0x44, 0x12, 0x12, 0x0a, 0x04,
0x4e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65,
0x12, 0x1c, 0x0a, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20,
0x01, 0x28, 0x09, 0x52, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2e,
0x0a, 0x04, 0x4d, 0x65, 0x74, 0x61, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x70,
0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x4d,
0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x2b,
0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e,
0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x53,
0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x50,
0x65, 0x65, 0x72, 0x49, 0x44, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x50, 0x65, 0x65,
0x72, 0x49, 0x44, 0x12, 0x1e, 0x0a, 0x0a, 0x50, 0x65, 0x65, 0x72, 0x43, 0x41, 0x50, 0x65, 0x6d,
0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x50, 0x65, 0x65, 0x72, 0x43, 0x41, 0x50,
0x65, 0x6d, 0x73, 0x12, 0x26, 0x0a, 0x0e, 0x50, 0x65, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x65,
0x72, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x50, 0x65, 0x65,
0x72, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x30, 0x0a, 0x13, 0x50,
0x65, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73,
0x65, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x50, 0x65, 0x65, 0x72, 0x53, 0x65,
0x72, 0x76, 0x65, 0x72, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x12, 0x20, 0x0a,
0x0b, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, 0x01,
0x28, 0x04, 0x52, 0x0b, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12,
0x20, 0x0a, 0x0b, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x0a,
0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x49, 0x6e, 0x64, 0x65,
0x78, 0x1a, 0x37, 0x0a, 0x09, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10,
0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79,
0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xd0, 0x01, 0x0a, 0x12, 0x50,
0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x54, 0x72, 0x75, 0x73, 0x74, 0x42, 0x75, 0x6e, 0x64, 0x6c,
0x65, 0x12, 0x20, 0x0a, 0x0b, 0x54, 0x72, 0x75, 0x73, 0x74, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e,
0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x54, 0x72, 0x75, 0x73, 0x74, 0x44, 0x6f, 0x6d,
0x61, 0x69, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x50, 0x65, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x18,
0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x50, 0x65, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12,
0x1c, 0x0a, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01,
0x28, 0x09, 0x52, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a,
0x08, 0x52, 0x6f, 0x6f, 0x74, 0x50, 0x45, 0x4d, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52,
0x08, 0x52, 0x6f, 0x6f, 0x74, 0x50, 0x45, 0x4d, 0x73, 0x12, 0x20, 0x0a, 0x0b, 0x43, 0x72, 0x65,
0x61, 0x74, 0x65, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b,
0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x20, 0x0a, 0x0b, 0x4d,
0x6f, 0x64, 0x69, 0x66, 0x79, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04,
0x52, 0x0b, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x22, 0x66, 0x0a,
0x12, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75,
0x07, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74,
0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x73, 0x74,
0x61, 0x74, 0x75, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x22, 0xf1, 0x03, 0x0a, 0x07, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x0e, 0x0a,
0x02, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x49, 0x44, 0x12, 0x12, 0x0a,
0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d,
0x65, 0x12, 0x1c, 0x0a, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03,
0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12,
0x38, 0x0a, 0x09, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x41, 0x74, 0x18, 0x04, 0x20, 0x01,
0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09,
0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x2e, 0x0a, 0x04, 0x4d, 0x65, 0x74,
0x61, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e,
0x67, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e,
0x74, 0x72, 0x79, 0x52, 0x04, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x2b, 0x0a, 0x05, 0x53, 0x74, 0x61,
0x74, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69,
0x6e, 0x67, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52,
0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x50, 0x65, 0x65, 0x72, 0x49, 0x44,
0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x50, 0x65, 0x65, 0x72, 0x49, 0x44, 0x12, 0x1e,
0x0a, 0x0a, 0x50, 0x65, 0x65, 0x72, 0x43, 0x41, 0x50, 0x65, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x03,
0x28, 0x09, 0x52, 0x0a, 0x50, 0x65, 0x65, 0x72, 0x43, 0x41, 0x50, 0x65, 0x6d, 0x73, 0x12, 0x26,
0x0a, 0x0e, 0x50, 0x65, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65,
0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x50, 0x65, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76,
0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x30, 0x0a, 0x13, 0x50, 0x65, 0x65, 0x72, 0x53, 0x65,
0x72, 0x76, 0x65, 0x72, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x0a, 0x20,
0x03, 0x28, 0x09, 0x52, 0x13, 0x50, 0x65, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x41,
0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x12, 0x20, 0x0a, 0x0b, 0x43, 0x72, 0x65, 0x61,
0x74, 0x65, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x43,
0x72, 0x65, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x20, 0x0a, 0x0b, 0x4d, 0x6f,
0x64, 0x69, 0x66, 0x79, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x04, 0x52,
0x0b, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x1a, 0x37, 0x0a, 0x09,
0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79,
0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76,
0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75,
0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xfe, 0x01, 0x0a, 0x12, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e,
0x67, 0x54, 0x72, 0x75, 0x73, 0x74, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x12, 0x20, 0x0a, 0x0b,
0x54, 0x72, 0x75, 0x73, 0x74, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28,
0x09, 0x52, 0x0b, 0x54, 0x72, 0x75, 0x73, 0x74, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x1a,
0x0a, 0x08, 0x50, 0x65, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
0x52, 0x08, 0x50, 0x65, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x50, 0x61,
0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x50,
0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x52, 0x6f, 0x6f, 0x74,
0x50, 0x45, 0x4d, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x52, 0x6f, 0x6f, 0x74,
0x50, 0x45, 0x4d, 0x73, 0x12, 0x2c, 0x0a, 0x11, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64,
0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52,
0x11, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69,
0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x64, 0x65,
0x78, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x49,
0x6e, 0x64, 0x65, 0x78, 0x12, 0x20, 0x0a, 0x0b, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x49, 0x6e,
0x64, 0x65, 0x78, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x4d, 0x6f, 0x64, 0x69, 0x66,
0x79, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x22, 0x66, 0x0a, 0x12, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e,
0x67, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04,
0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65,
0x12, 0x1c, 0x0a, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20,
0x01, 0x28, 0x09, 0x52, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e,
0x0a, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01,
0x28, 0x09, 0x52, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x22, 0x41,
0x0a, 0x13, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73,
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x07, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67,
0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67,
0x2e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x07, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e,
0x67, 0x22, 0x52, 0x0a, 0x12, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x4c, 0x69, 0x73, 0x74,
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69,
0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x50, 0x61, 0x72, 0x74,
0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e,
0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63,
0x65, 0x6e, 0x74, 0x65, 0x72, 0x22, 0x43, 0x0a, 0x13, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67,
0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2c, 0x0a, 0x08,
0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10,
0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67,
0x52, 0x08, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x73, 0x22, 0xd6, 0x01, 0x0a, 0x13, 0x50,
0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
0x73, 0x74, 0x12, 0x2a, 0x0a, 0x07, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x18, 0x01, 0x20,
0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x65,
0x65, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x07, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x1e,
0x0a, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01,
0x28, 0x09, 0x52, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x3a,
0x0a, 0x04, 0x4d, 0x65, 0x74, 0x61, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x70,
0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x57, 0x72,
0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x45,
0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, 0x4d, 0x65, 0x74, 0x61, 0x1a, 0x37, 0x0a, 0x09, 0x4d, 0x65,
0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01,
0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c,
0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a,
0x02, 0x38, 0x01, 0x22, 0x16, 0x0a, 0x14, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x57, 0x72,
0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x68, 0x0a, 0x14, 0x50,
0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75,
0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69,
0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x50, 0x61, 0x72, 0x74,
0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e,
0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63,
0x65, 0x6e, 0x74, 0x65, 0x72, 0x22, 0x41, 0x0a, 0x13, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67,
0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x07,
0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e,
0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x52,
0x07, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x22, 0x52, 0x0a, 0x12, 0x50, 0x65, 0x65, 0x72,
0x69, 0x6e, 0x67, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c,
0x0a, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28,
0x09, 0x52, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a,
0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
0x52, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x22, 0x43, 0x0a, 0x13,
0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f,
0x6e, 0x73, 0x65, 0x12, 0x2c, 0x0a, 0x08, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x73, 0x18,
0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e,
0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x08, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67,
0x73, 0x22, 0xd6, 0x01, 0x0a, 0x13, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x57, 0x72, 0x69,
0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2a, 0x0a, 0x07, 0x50, 0x65, 0x65,
0x72, 0x69, 0x6e, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x70, 0x65, 0x65,
0x72, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x07, 0x50, 0x65,
0x65, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x1e, 0x0a, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e,
0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63,
0x65, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x3a, 0x0a, 0x04, 0x4d, 0x65, 0x74, 0x61, 0x18, 0x03, 0x20,
0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x65,
0x65, 0x72, 0x69, 0x6e, 0x67, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
0x74, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, 0x4d, 0x65, 0x74,
0x61, 0x1a, 0x37, 0x0a, 0x09, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10,
0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79,
0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x16, 0x0a, 0x14, 0x50, 0x65,
0x65, 0x72, 0x69, 0x6e, 0x67, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
0x73, 0x65, 0x22, 0x68, 0x0a, 0x14, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x6c,
0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61,
0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1c,
0x0a, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28,
0x09, 0x52, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a,
0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09,
0x52, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x22, 0x17, 0x0a, 0x15,
0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73,
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x9f, 0x01, 0x0a, 0x1f, 0x54, 0x72, 0x75, 0x73, 0x74, 0x42,
0x75, 0x6e, 0x64, 0x6c, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69,
0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x53, 0x65, 0x72,
0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b,
0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x4e,
0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09,
0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x50, 0x61, 0x72,
0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x50, 0x61,
0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63,
0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x44, 0x61, 0x74,
0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x22, 0x6f, 0x0a, 0x20, 0x54, 0x72, 0x75, 0x73, 0x74,
0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x79, 0x53, 0x65, 0x72, 0x76,
0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x49,
0x6e, 0x64, 0x65, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x49, 0x6e, 0x64, 0x65,
0x78, 0x12, 0x35, 0x0a, 0x07, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03,
0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x65, 0x65,
0x72, 0x69, 0x6e, 0x67, 0x54, 0x72, 0x75, 0x73, 0x74, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x52,
0x07, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x22, 0x6a, 0x0a, 0x16, 0x54, 0x72, 0x75, 0x73,
0x74, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65,
0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74,
0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69,
0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74,
0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65,
0x6e, 0x74, 0x65, 0x72, 0x22, 0x64, 0x0a, 0x17, 0x54, 0x72, 0x75, 0x73, 0x74, 0x42, 0x75, 0x6e,
0x64, 0x6c, 0x65, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
0x14, 0x0a, 0x05, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05,
0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x33, 0x0a, 0x06, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x18,
0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e,
0x65, 0x6e, 0x74, 0x65, 0x72, 0x22, 0x17, 0x0a, 0x15, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67,
0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x9f,
0x01, 0x0a, 0x1f, 0x54, 0x72, 0x75, 0x73, 0x74, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x4c, 0x69,
0x73, 0x74, 0x42, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
0x73, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d,
0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63,
0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61,
0x63, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18,
0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e,
0x12, 0x1e, 0x0a, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x04,
0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72,
0x22, 0x6f, 0x0a, 0x20, 0x54, 0x72, 0x75, 0x73, 0x74, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x4c,
0x69, 0x73, 0x74, 0x42, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70,
0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x01, 0x20,
0x01, 0x28, 0x04, 0x52, 0x05, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x35, 0x0a, 0x07, 0x42, 0x75,
0x6e, 0x64, 0x6c, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x70, 0x65,
0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x54, 0x72, 0x75,
0x73, 0x74, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x07, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65,
0x73, 0x22, 0x6a, 0x0a, 0x16, 0x54, 0x72, 0x75, 0x73, 0x74, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65,
0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x4e,
0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12,
0x1c, 0x0a, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01,
0x28, 0x09, 0x52, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a,
0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28,
0x09, 0x52, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x22, 0x64, 0x0a,
0x17, 0x54, 0x72, 0x75, 0x73, 0x74, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x65, 0x61, 0x64,
0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x49, 0x6e, 0x64, 0x65,
0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x33,
0x0a, 0x06, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b,
0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67,
0x54, 0x72, 0x75, 0x73, 0x74, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x06, 0x42, 0x75, 0x6e,
0x64, 0x6c, 0x65, 0x22, 0x2d, 0x0a, 0x1b, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x54, 0x65,
0x72, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x42, 0x79, 0x49, 0x44, 0x52, 0x65, 0x71, 0x75, 0x65,
0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02,
0x49, 0x44, 0x22, 0x1e, 0x0a, 0x1c, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x54, 0x65, 0x72,
0x6d, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x42, 0x79, 0x49, 0x44, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
0x73, 0x65, 0x22, 0x8d, 0x01, 0x0a, 0x1e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x54, 0x72,
0x75, 0x73, 0x74, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65,
0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4b, 0x0a, 0x12, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67,
0x54, 0x72, 0x75, 0x73, 0x74, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
0x0b, 0x32, 0x1b, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x65, 0x65, 0x72,
0x69, 0x6e, 0x67, 0x54, 0x72, 0x75, 0x73, 0x74, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x12,
0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x54, 0x72, 0x75, 0x73, 0x74, 0x42, 0x75, 0x6e, 0x64,
0x6c, 0x65, 0x52, 0x06, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x22, 0x2d, 0x0a, 0x1b, 0x50, 0x65,
0x65, 0x72, 0x69, 0x6e, 0x67, 0x54, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x42, 0x79,
0x49, 0x44, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x49, 0x44, 0x18,
0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x49, 0x44, 0x22, 0x1e, 0x0a, 0x1c, 0x50, 0x65, 0x65,
0x72, 0x69, 0x6e, 0x67, 0x54, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x42, 0x79, 0x49,
0x44, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x8d, 0x01, 0x0a, 0x1e, 0x50, 0x65,
0x6c, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72,
0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74,
0x65, 0x72, 0x22, 0x21, 0x0a, 0x1f, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x54, 0x72, 0x75,
0x73, 0x74, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73,
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x73, 0x0a, 0x1f, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67,
0x54, 0x72, 0x75, 0x73, 0x74, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74,
0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65,
0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09,
0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x44, 0x61,
0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a,
0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x22, 0x22, 0x0a, 0x20, 0x50, 0x65,
0x65, 0x72, 0x69, 0x6e, 0x67, 0x54, 0x72, 0x75, 0x73, 0x74, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65,
0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4b, 0x0a, 0x12,
0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x54, 0x72, 0x75, 0x73, 0x74, 0x42, 0x75, 0x6e, 0x64,
0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69,
0x6e, 0x67, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x54, 0x72, 0x75, 0x73, 0x74, 0x42,
0x75, 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x12, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x54, 0x72,
0x75, 0x73, 0x74, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x44, 0x61, 0x74,
0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x44,
0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x22, 0x21, 0x0a, 0x1f, 0x50, 0x65, 0x65,
0x72, 0x69, 0x6e, 0x67, 0x54, 0x72, 0x75, 0x73, 0x74, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x57,
0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x73, 0x0a, 0x1f,
0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x54, 0x72, 0x75, 0x73, 0x74, 0x42, 0x75, 0x6e, 0x64,
0x6c, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e,
0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xfc,
0x01, 0x0a, 0x14, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e,
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x50, 0x65, 0x65, 0x72, 0x4e,
0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x50, 0x65, 0x65, 0x72, 0x4e,
0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e,
0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f,
0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18,
0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65,
0x72, 0x22, 0x22, 0x0a, 0x20, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x54, 0x72, 0x75, 0x73,
0x74, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73,
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xfc, 0x01, 0x0a, 0x14, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61,
0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a,
0x0a, 0x08, 0x50, 0x65, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
0x52, 0x08, 0x50, 0x65, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x50, 0x61,
0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x50,
0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x44, 0x61, 0x74, 0x61,
0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x44, 0x61,
0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x54, 0x6f, 0x6b, 0x65,
0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x3b,
0x0a, 0x04, 0x4d, 0x65, 0x74, 0x61, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x70,
0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x54,
0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x61,
0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, 0x4d, 0x65, 0x74, 0x61, 0x1a, 0x37, 0x0a, 0x09, 0x4d,
0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18,
0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61,
0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
0x3a, 0x02, 0x38, 0x01, 0x22, 0x3b, 0x0a, 0x15, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65,
0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x22, 0x0a,
0x0c, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20,
0x01, 0x28, 0x09, 0x52, 0x0c, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x54, 0x6f, 0x6b, 0x65,
0x6e, 0x22, 0x96, 0x02, 0x0a, 0x0f, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x74, 0x65, 0x52, 0x65,
0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x50, 0x65, 0x65, 0x72, 0x4e, 0x61, 0x6d,
0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x50, 0x65, 0x65, 0x72, 0x4e, 0x61, 0x6d,
0x65, 0x12, 0x22, 0x0a, 0x0c, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x54, 0x6f, 0x6b, 0x65,
0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67,
0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69,
0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74,
0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65,
0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e,
0x74, 0x65, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x05, 0x20, 0x01,
0x28, 0x09, 0x52, 0x05, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x36, 0x0a, 0x04, 0x4d, 0x65, 0x74,
0x61, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e,
0x67, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
0x74, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, 0x4d, 0x65, 0x74,
0x61, 0x1a, 0x37, 0x0a, 0x09, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10,
0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79,
0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x12, 0x0a, 0x10, 0x49, 0x6e,
0x69, 0x74, 0x69, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x94,
0x05, 0x0a, 0x12, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65,
0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x3f, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67,
0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73,
0x61, 0x67, 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x07, 0x72,
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x42, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e,
0x73, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69,
0x6e, 0x67, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65,
0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00,
0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x48, 0x0a, 0x0a, 0x74, 0x65,
0x72, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26,
0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61,
0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x54, 0x65, 0x72, 0x6d,
0x69, 0x6e, 0x61, 0x74, 0x65, 0x64, 0x48, 0x00, 0x52, 0x0a, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e,
0x61, 0x74, 0x65, 0x64, 0x1a, 0x7f, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
0x16, 0x0a, 0x06, 0x50, 0x65, 0x65, 0x72, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
0x06, 0x50, 0x65, 0x65, 0x72, 0x49, 0x44, 0x12, 0x14, 0x0a, 0x05, 0x4e, 0x6f, 0x6e, 0x63, 0x65,
0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x4e, 0x6f, 0x6e, 0x63, 0x65, 0x12, 0x20, 0x0a,
0x0b, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x55, 0x52, 0x4c, 0x18, 0x03, 0x20, 0x01,
0x28, 0x09, 0x52, 0x0b, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x55, 0x52, 0x4c, 0x12,
0x24, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e,
0x2e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x05,
0x45, 0x72, 0x72, 0x6f, 0x72, 0x1a, 0x94, 0x02, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x4e, 0x6f, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
0x09, 0x52, 0x05, 0x4e, 0x6f, 0x6e, 0x63, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x52, 0x65, 0x73, 0x6f,
0x75, 0x72, 0x63, 0x65, 0x55, 0x52, 0x4c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x52,
0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x55, 0x52, 0x4c, 0x12, 0x1e, 0x0a, 0x0a, 0x52, 0x65,
0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x44, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a,
0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x44, 0x12, 0x30, 0x0a, 0x08, 0x52, 0x65,
0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41,
0x6e, 0x79, 0x52, 0x08, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x4c, 0x0a, 0x09,
0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32,
0x2e, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63,
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x52, 0x65, 0x73,
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52,
0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x30, 0x0a, 0x09, 0x4f, 0x70,
0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x6e, 0x6b, 0x6e, 0x6f,
0x77, 0x6e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x55, 0x50, 0x53, 0x45, 0x52, 0x54, 0x10, 0x01,
0x12, 0x0a, 0x0a, 0x06, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x10, 0x02, 0x1a, 0x0c, 0x0a, 0x0a,
0x54, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x64, 0x42, 0x09, 0x0a, 0x07, 0x50, 0x61,
0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0x29, 0x0a, 0x0d, 0x4c, 0x65, 0x61, 0x64, 0x65, 0x72, 0x41,
0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73,
0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73,
0x2a, 0x53, 0x0a, 0x0c, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x53, 0x74, 0x61, 0x74, 0x65,
0x12, 0x0d, 0x0a, 0x09, 0x55, 0x4e, 0x44, 0x45, 0x46, 0x49, 0x4e, 0x45, 0x44, 0x10, 0x00, 0x12,
0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x49, 0x54, 0x49, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06,
0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x46, 0x41, 0x49, 0x4c,
0x49, 0x4e, 0x47, 0x10, 0x03, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x45, 0x52, 0x4d, 0x49, 0x4e, 0x41,
0x54, 0x45, 0x44, 0x10, 0x04, 0x32, 0xea, 0x05, 0x0a, 0x0e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e,
0x67, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x4e, 0x0a, 0x0d, 0x47, 0x65, 0x6e, 0x65,
0x72, 0x61, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1d, 0x2e, 0x70, 0x65, 0x65, 0x72,
0x69, 0x6e, 0x67, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65,
0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69,
0x6e, 0x67, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e,
0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a, 0x08, 0x49, 0x6e, 0x69, 0x74,
0x69, 0x61, 0x74, 0x65, 0x12, 0x18, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x49,
0x6e, 0x69, 0x74, 0x69, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19,
0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x74,
0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x48, 0x0a, 0x0b, 0x50, 0x65, 0x65,
0x72, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x61, 0x64, 0x12, 0x1b, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69,
0x6e, 0x67, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65,
0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e,
0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f,
0x6e, 0x73, 0x65, 0x12, 0x48, 0x0a, 0x0b, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x4c, 0x69,
0x73, 0x74, 0x12, 0x1b, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x65, 0x65,
0x72, 0x69, 0x6e, 0x67, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
0x1c, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e,
0x67, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4e, 0x0a,
0x0d, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x1d,
0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67,
0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e,
0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x44,
0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4b, 0x0a,
0x0c, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x1c, 0x2e,
0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x57,
0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x70, 0x65,
0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x57, 0x72, 0x69,
0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6f, 0x0a, 0x18, 0x54, 0x72,
0x75, 0x73, 0x74, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x79, 0x53,
0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x28, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67,
0x2e, 0x54, 0x72, 0x75, 0x73, 0x74, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x4c, 0x69, 0x73, 0x74,
0x42, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
0x1a, 0x29, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x54, 0x72, 0x75, 0x73, 0x74,
0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x79, 0x53, 0x65, 0x72, 0x76,
0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x54, 0x0a, 0x0f, 0x54,
0x72, 0x75, 0x73, 0x74, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x65, 0x61, 0x64, 0x12, 0x1f,
0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x54, 0x72, 0x75, 0x73, 0x74, 0x42, 0x75,
0x6e, 0x64, 0x6c, 0x65, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
0x20, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x54, 0x72, 0x75, 0x73, 0x74, 0x42,
0x75, 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
0x65, 0x12, 0x4f, 0x0a, 0x0f, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x73, 0x6f, 0x75,
0x72, 0x63, 0x65, 0x73, 0x12, 0x1b, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x52,
0x72, 0x12, 0x14, 0x0a, 0x05, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09,
0x52, 0x05, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x3b, 0x0a, 0x04, 0x4d, 0x65, 0x74, 0x61, 0x18,
0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e,
0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x71,
0x75, 0x65, 0x73, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04,
0x4d, 0x65, 0x74, 0x61, 0x1a, 0x37, 0x0a, 0x09, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72,
0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03,
0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01,
0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x3b, 0x0a,
0x15, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65,
0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e,
0x67, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x50, 0x65,
0x65, 0x72, 0x69, 0x6e, 0x67, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x98, 0x02, 0x0a, 0x10, 0x45,
0x73, 0x74, 0x61, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
0x1a, 0x0a, 0x08, 0x50, 0x65, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
0x09, 0x52, 0x08, 0x50, 0x65, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x50,
0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28,
0x09, 0x52, 0x0c, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12,
0x1c, 0x0a, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01,
0x28, 0x09, 0x52, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a,
0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28,
0x09, 0x52, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x14, 0x0a,
0x05, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x54, 0x6f,
0x6b, 0x65, 0x6e, 0x12, 0x37, 0x0a, 0x04, 0x4d, 0x65, 0x74, 0x61, 0x18, 0x06, 0x20, 0x03, 0x28,
0x0b, 0x32, 0x23, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x45, 0x73, 0x74, 0x61,
0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4d, 0x65, 0x74,
0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, 0x4d, 0x65, 0x74, 0x61, 0x1a, 0x37, 0x0a, 0x09,
0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79,
0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76,
0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75,
0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x13, 0x0a, 0x11, 0x45, 0x73, 0x74, 0x61, 0x62, 0x6c, 0x69,
0x73, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x94, 0x05, 0x0a, 0x12, 0x52,
0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67,
0x65, 0x1a, 0x1b, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x52, 0x65, 0x70, 0x6c,
0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x28, 0x01,
0x30, 0x01, 0x42, 0x84, 0x01, 0x0a, 0x0b, 0x63, 0x6f, 0x6d, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69,
0x6e, 0x67, 0x42, 0x0c, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f,
0x50, 0x01, 0x5a, 0x2b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68,
0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2f,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0xa2,
0x02, 0x03, 0x50, 0x58, 0x58, 0xaa, 0x02, 0x07, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0xca,
0x02, 0x07, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0xe2, 0x02, 0x13, 0x50, 0x65, 0x65, 0x72,
0x69, 0x6e, 0x67, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea,
0x02, 0x07, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x33,
0x65, 0x12, 0x3f, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01,
0x28, 0x0b, 0x32, 0x23, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x52, 0x65, 0x70,
0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e,
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65,
0x73, 0x74, 0x12, 0x42, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x02,
0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x52,
0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67,
0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65,
0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x48, 0x0a, 0x0a, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e,
0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x70, 0x65, 0x65,
0x72, 0x69, 0x6e, 0x67, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x54, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x74,
0x65, 0x64, 0x48, 0x00, 0x52, 0x0a, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x64,
0x1a, 0x7f, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x50,
0x65, 0x65, 0x72, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x50, 0x65, 0x65,
0x72, 0x49, 0x44, 0x12, 0x14, 0x0a, 0x05, 0x4e, 0x6f, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01,
0x28, 0x09, 0x52, 0x05, 0x4e, 0x6f, 0x6e, 0x63, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x52, 0x65, 0x73,
0x6f, 0x75, 0x72, 0x63, 0x65, 0x55, 0x52, 0x4c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b,
0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x55, 0x52, 0x4c, 0x12, 0x24, 0x0a, 0x05, 0x45,
0x72, 0x72, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x73, 0x74, 0x61,
0x74, 0x75, 0x73, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x05, 0x45, 0x72, 0x72, 0x6f,
0x72, 0x1a, 0x94, 0x02, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14,
0x0a, 0x05, 0x4e, 0x6f, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x4e,
0x6f, 0x6e, 0x63, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
0x55, 0x52, 0x4c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x52, 0x65, 0x73, 0x6f, 0x75,
0x72, 0x63, 0x65, 0x55, 0x52, 0x4c, 0x12, 0x1e, 0x0a, 0x0a, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72,
0x63, 0x65, 0x49, 0x44, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x52, 0x65, 0x73, 0x6f,
0x75, 0x72, 0x63, 0x65, 0x49, 0x44, 0x12, 0x30, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72,
0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x08,
0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x4c, 0x0a, 0x09, 0x6f, 0x70, 0x65, 0x72,
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x70, 0x65,
0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
0x65, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x6f, 0x70, 0x65,
0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x30, 0x0a, 0x09, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74,
0x69, 0x6f, 0x6e, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x10, 0x00,
0x12, 0x0a, 0x0a, 0x06, 0x55, 0x50, 0x53, 0x45, 0x52, 0x54, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06,
0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x10, 0x02, 0x1a, 0x0c, 0x0a, 0x0a, 0x54, 0x65, 0x72, 0x6d,
0x69, 0x6e, 0x61, 0x74, 0x65, 0x64, 0x42, 0x09, 0x0a, 0x07, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61,
0x64, 0x22, 0x29, 0x0a, 0x0d, 0x4c, 0x65, 0x61, 0x64, 0x65, 0x72, 0x41, 0x64, 0x64, 0x72, 0x65,
0x73, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20,
0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x2a, 0x53, 0x0a, 0x0c,
0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0d, 0x0a, 0x09,
0x55, 0x4e, 0x44, 0x45, 0x46, 0x49, 0x4e, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x49,
0x4e, 0x49, 0x54, 0x49, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x54, 0x49,
0x56, 0x45, 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x46, 0x41, 0x49, 0x4c, 0x49, 0x4e, 0x47, 0x10,
0x03, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x45, 0x52, 0x4d, 0x49, 0x4e, 0x41, 0x54, 0x45, 0x44, 0x10,
0x04, 0x32, 0xed, 0x05, 0x0a, 0x0e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72,
0x76, 0x69, 0x63, 0x65, 0x12, 0x4e, 0x0a, 0x0d, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65,
0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1d, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e,
0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x71,
0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x47,
0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x73, 0x70,
0x6f, 0x6e, 0x73, 0x65, 0x12, 0x42, 0x0a, 0x09, 0x45, 0x73, 0x74, 0x61, 0x62, 0x6c, 0x69, 0x73,
0x68, 0x12, 0x19, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x45, 0x73, 0x74, 0x61,
0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x70,
0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x45, 0x73, 0x74, 0x61, 0x62, 0x6c, 0x69, 0x73, 0x68,
0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x48, 0x0a, 0x0b, 0x50, 0x65, 0x65, 0x72,
0x69, 0x6e, 0x67, 0x52, 0x65, 0x61, 0x64, 0x12, 0x1b, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e,
0x67, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x71,
0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x50,
0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
0x73, 0x65, 0x12, 0x48, 0x0a, 0x0b, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x4c, 0x69, 0x73,
0x74, 0x12, 0x1b, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x65, 0x65, 0x72,
0x69, 0x6e, 0x67, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c,
0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67,
0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4e, 0x0a, 0x0d,
0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x1d, 0x2e,
0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x44,
0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x70,
0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x44, 0x65,
0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4b, 0x0a, 0x0c,
0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x1c, 0x2e, 0x70,
0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x57, 0x72,
0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x70, 0x65, 0x65,
0x72, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x57, 0x72, 0x69, 0x74,
0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6f, 0x0a, 0x18, 0x54, 0x72, 0x75,
0x73, 0x74, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x79, 0x53, 0x65,
0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x28, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e,
0x54, 0x72, 0x75, 0x73, 0x74, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x42,
0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
0x29, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x54, 0x72, 0x75, 0x73, 0x74, 0x42,
0x75, 0x6e, 0x64, 0x6c, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69,
0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x54, 0x0a, 0x0f, 0x54, 0x72,
0x75, 0x73, 0x74, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x65, 0x61, 0x64, 0x12, 0x1f, 0x2e,
0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x54, 0x72, 0x75, 0x73, 0x74, 0x42, 0x75, 0x6e,
0x64, 0x6c, 0x65, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20,
0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x54, 0x72, 0x75, 0x73, 0x74, 0x42, 0x75,
0x6e, 0x64, 0x6c, 0x65, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
0x12, 0x4f, 0x0a, 0x0f, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72,
0x63, 0x65, 0x73, 0x12, 0x1b, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x52, 0x65,
0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
0x1a, 0x1b, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69,
0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x28, 0x01, 0x30,
0x01, 0x42, 0x84, 0x01, 0x0a, 0x0b, 0x63, 0x6f, 0x6d, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e,
0x67, 0x42, 0x0c, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50,
0x01, 0x5a, 0x2b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61,
0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2f, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0xa2, 0x02,
0x03, 0x50, 0x58, 0x58, 0xaa, 0x02, 0x07, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0xca, 0x02,
0x07, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0xe2, 0x02, 0x13, 0x50, 0x65, 0x65, 0x72, 0x69,
0x6e, 0x67, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02,
0x07, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
@ -2311,61 +2341,63 @@ var file_proto_pbpeering_peering_proto_goTypes = []interface{}{
(*PeeringTrustBundleDeleteResponse)(nil), // 21: peering.PeeringTrustBundleDeleteResponse
(*GenerateTokenRequest)(nil), // 22: peering.GenerateTokenRequest
(*GenerateTokenResponse)(nil), // 23: peering.GenerateTokenResponse
(*InitiateRequest)(nil), // 24: peering.InitiateRequest
(*InitiateResponse)(nil), // 25: peering.InitiateResponse
(*EstablishRequest)(nil), // 24: peering.EstablishRequest
(*EstablishResponse)(nil), // 25: peering.EstablishResponse
(*ReplicationMessage)(nil), // 26: peering.ReplicationMessage
(*LeaderAddress)(nil), // 27: peering.LeaderAddress
nil, // 28: peering.Peering.MetaEntry
nil, // 29: peering.PeeringWriteRequest.MetaEntry
nil, // 30: peering.GenerateTokenRequest.MetaEntry
nil, // 31: peering.InitiateRequest.MetaEntry
nil, // 31: peering.EstablishRequest.MetaEntry
(*ReplicationMessage_Request)(nil), // 32: peering.ReplicationMessage.Request
(*ReplicationMessage_Response)(nil), // 33: peering.ReplicationMessage.Response
(*ReplicationMessage_Terminated)(nil), // 34: peering.ReplicationMessage.Terminated
(*pbstatus.Status)(nil), // 35: status.Status
(*anypb.Any)(nil), // 36: google.protobuf.Any
(*timestamppb.Timestamp)(nil), // 35: google.protobuf.Timestamp
(*pbstatus.Status)(nil), // 36: status.Status
(*anypb.Any)(nil), // 37: google.protobuf.Any
}
var file_proto_pbpeering_peering_proto_depIdxs = []int32{
28, // 0: peering.Peering.Meta:type_name -> peering.Peering.MetaEntry
0, // 1: peering.Peering.State:type_name -> peering.PeeringState
2, // 2: peering.PeeringReadResponse.Peering:type_name -> peering.Peering
2, // 3: peering.PeeringListResponse.Peerings:type_name -> peering.Peering
2, // 4: peering.PeeringWriteRequest.Peering:type_name -> peering.Peering
29, // 5: peering.PeeringWriteRequest.Meta:type_name -> peering.PeeringWriteRequest.MetaEntry
3, // 6: peering.TrustBundleListByServiceResponse.Bundles:type_name -> peering.PeeringTrustBundle
3, // 7: peering.TrustBundleReadResponse.Bundle:type_name -> peering.PeeringTrustBundle
3, // 8: peering.PeeringTrustBundleWriteRequest.PeeringTrustBundle:type_name -> peering.PeeringTrustBundle
30, // 9: peering.GenerateTokenRequest.Meta:type_name -> peering.GenerateTokenRequest.MetaEntry
31, // 10: peering.InitiateRequest.Meta:type_name -> peering.InitiateRequest.MetaEntry
32, // 11: peering.ReplicationMessage.request:type_name -> peering.ReplicationMessage.Request
33, // 12: peering.ReplicationMessage.response:type_name -> peering.ReplicationMessage.Response
34, // 13: peering.ReplicationMessage.terminated:type_name -> peering.ReplicationMessage.Terminated
35, // 14: peering.ReplicationMessage.Request.Error:type_name -> status.Status
36, // 15: peering.ReplicationMessage.Response.Resource:type_name -> google.protobuf.Any
1, // 16: peering.ReplicationMessage.Response.operation:type_name -> peering.ReplicationMessage.Response.Operation
22, // 17: peering.PeeringService.GenerateToken:input_type -> peering.GenerateTokenRequest
24, // 18: peering.PeeringService.Initiate:input_type -> peering.InitiateRequest
4, // 19: peering.PeeringService.PeeringRead:input_type -> peering.PeeringReadRequest
6, // 20: peering.PeeringService.PeeringList:input_type -> peering.PeeringListRequest
10, // 21: peering.PeeringService.PeeringDelete:input_type -> peering.PeeringDeleteRequest
8, // 22: peering.PeeringService.PeeringWrite:input_type -> peering.PeeringWriteRequest
12, // 23: peering.PeeringService.TrustBundleListByService:input_type -> peering.TrustBundleListByServiceRequest
14, // 24: peering.PeeringService.TrustBundleRead:input_type -> peering.TrustBundleReadRequest
26, // 25: peering.PeeringService.StreamResources:input_type -> peering.ReplicationMessage
23, // 26: peering.PeeringService.GenerateToken:output_type -> peering.GenerateTokenResponse
25, // 27: peering.PeeringService.Initiate:output_type -> peering.InitiateResponse
5, // 28: peering.PeeringService.PeeringRead:output_type -> peering.PeeringReadResponse
7, // 29: peering.PeeringService.PeeringList:output_type -> peering.PeeringListResponse
11, // 30: peering.PeeringService.PeeringDelete:output_type -> peering.PeeringDeleteResponse
9, // 31: peering.PeeringService.PeeringWrite:output_type -> peering.PeeringWriteResponse
13, // 32: peering.PeeringService.TrustBundleListByService:output_type -> peering.TrustBundleListByServiceResponse
15, // 33: peering.PeeringService.TrustBundleRead:output_type -> peering.TrustBundleReadResponse
26, // 34: peering.PeeringService.StreamResources:output_type -> peering.ReplicationMessage
26, // [26:35] is the sub-list for method output_type
17, // [17:26] is the sub-list for method input_type
17, // [17:17] is the sub-list for extension type_name
17, // [17:17] is the sub-list for extension extendee
0, // [0:17] is the sub-list for field type_name
35, // 0: peering.Peering.DeletedAt:type_name -> google.protobuf.Timestamp
28, // 1: peering.Peering.Meta:type_name -> peering.Peering.MetaEntry
0, // 2: peering.Peering.State:type_name -> peering.PeeringState
2, // 3: peering.PeeringReadResponse.Peering:type_name -> peering.Peering
2, // 4: peering.PeeringListResponse.Peerings:type_name -> peering.Peering
2, // 5: peering.PeeringWriteRequest.Peering:type_name -> peering.Peering
29, // 6: peering.PeeringWriteRequest.Meta:type_name -> peering.PeeringWriteRequest.MetaEntry
3, // 7: peering.TrustBundleListByServiceResponse.Bundles:type_name -> peering.PeeringTrustBundle
3, // 8: peering.TrustBundleReadResponse.Bundle:type_name -> peering.PeeringTrustBundle
3, // 9: peering.PeeringTrustBundleWriteRequest.PeeringTrustBundle:type_name -> peering.PeeringTrustBundle
30, // 10: peering.GenerateTokenRequest.Meta:type_name -> peering.GenerateTokenRequest.MetaEntry
31, // 11: peering.EstablishRequest.Meta:type_name -> peering.EstablishRequest.MetaEntry
32, // 12: peering.ReplicationMessage.request:type_name -> peering.ReplicationMessage.Request
33, // 13: peering.ReplicationMessage.response:type_name -> peering.ReplicationMessage.Response
34, // 14: peering.ReplicationMessage.terminated:type_name -> peering.ReplicationMessage.Terminated
36, // 15: peering.ReplicationMessage.Request.Error:type_name -> status.Status
37, // 16: peering.ReplicationMessage.Response.Resource:type_name -> google.protobuf.Any
1, // 17: peering.ReplicationMessage.Response.operation:type_name -> peering.ReplicationMessage.Response.Operation
22, // 18: peering.PeeringService.GenerateToken:input_type -> peering.GenerateTokenRequest
24, // 19: peering.PeeringService.Establish:input_type -> peering.EstablishRequest
4, // 20: peering.PeeringService.PeeringRead:input_type -> peering.PeeringReadRequest
6, // 21: peering.PeeringService.PeeringList:input_type -> peering.PeeringListRequest
10, // 22: peering.PeeringService.PeeringDelete:input_type -> peering.PeeringDeleteRequest
8, // 23: peering.PeeringService.PeeringWrite:input_type -> peering.PeeringWriteRequest
12, // 24: peering.PeeringService.TrustBundleListByService:input_type -> peering.TrustBundleListByServiceRequest
14, // 25: peering.PeeringService.TrustBundleRead:input_type -> peering.TrustBundleReadRequest
26, // 26: peering.PeeringService.StreamResources:input_type -> peering.ReplicationMessage
23, // 27: peering.PeeringService.GenerateToken:output_type -> peering.GenerateTokenResponse
25, // 28: peering.PeeringService.Establish:output_type -> peering.EstablishResponse
5, // 29: peering.PeeringService.PeeringRead:output_type -> peering.PeeringReadResponse
7, // 30: peering.PeeringService.PeeringList:output_type -> peering.PeeringListResponse
11, // 31: peering.PeeringService.PeeringDelete:output_type -> peering.PeeringDeleteResponse
9, // 32: peering.PeeringService.PeeringWrite:output_type -> peering.PeeringWriteResponse
13, // 33: peering.PeeringService.TrustBundleListByService:output_type -> peering.TrustBundleListByServiceResponse
15, // 34: peering.PeeringService.TrustBundleRead:output_type -> peering.TrustBundleReadResponse
26, // 35: peering.PeeringService.StreamResources:output_type -> peering.ReplicationMessage
27, // [27:36] is the sub-list for method output_type
18, // [18:27] is the sub-list for method input_type
18, // [18:18] is the sub-list for extension type_name
18, // [18:18] is the sub-list for extension extendee
0, // [0:18] is the sub-list for field type_name
}
func init() { file_proto_pbpeering_peering_proto_init() }
@ -2639,7 +2671,7 @@ func file_proto_pbpeering_peering_proto_init() {
}
}
file_proto_pbpeering_peering_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*InitiateRequest); i {
switch v := v.(*EstablishRequest); i {
case 0:
return &v.state
case 1:
@ -2651,7 +2683,7 @@ func file_proto_pbpeering_peering_proto_init() {
}
}
file_proto_pbpeering_peering_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*InitiateResponse); i {
switch v := v.(*EstablishResponse); i {
case 0:
return &v.state
case 1:

View file

@ -2,6 +2,7 @@ syntax = "proto3";
package peering;
import "google/protobuf/timestamp.proto";
import "google/protobuf/any.proto";
// TODO(peering): Handle this some other way
import "proto/pbstatus/status.proto";
@ -10,7 +11,7 @@ import "proto/pbstatus/status.proto";
// between disparate Consul clusters.
service PeeringService {
rpc GenerateToken(GenerateTokenRequest) returns (GenerateTokenResponse);
rpc Initiate(InitiateRequest) returns (InitiateResponse);
rpc Establish(EstablishRequest) returns (EstablishResponse);
rpc PeeringRead(PeeringReadRequest) returns (PeeringReadResponse);
rpc PeeringList(PeeringListRequest) returns (PeeringListResponse);
rpc PeeringDelete(PeeringDeleteRequest) returns (PeeringDeleteResponse);
@ -71,33 +72,38 @@ message Peering {
// Partition is the local partition connecting to the peer.
string Partition = 3;
// DeletedAt is the time when the Peering was marked for deletion
// This is nullable so that we can omit if empty when encoding in JSON
// mog: func-to=TimePtrFromProto func-from=TimePtrToProto
google.protobuf.Timestamp DeletedAt = 4;
// Meta is a mapping of some string value to any other string value
map<string, string> Meta = 11;
map<string, string> Meta = 5;
// State is one of the valid PeeringState values to represent the status of
// peering relationship.
//
// mog: func-to=PeeringStateToAPI func-from=PeeringStateFromAPI
PeeringState State = 4;
PeeringState State = 6;
// PeerID is the ID that our peer assigned to this peering.
// This ID is to be used when dialing the peer, so that it can know who dialed it.
string PeerID = 5;
string PeerID = 7;
// PeerCAPems contains all the CA certificates for the remote peer.
repeated string PeerCAPems = 6;
repeated string PeerCAPems = 8;
// PeerServerName is the name of the remote server as it relates to TLS.
string PeerServerName = 7;
string PeerServerName = 9;
// PeerServerAddresses contains all the the connection addresses for the remote peer.
repeated string PeerServerAddresses = 8;
repeated string PeerServerAddresses = 10;
// CreateIndex is the Raft index at which the Peering was created.
uint64 CreateIndex = 9;
uint64 CreateIndex = 11;
// ModifyIndex is the latest Raft index at which the Peering. was modified.
uint64 ModifyIndex = 10;
uint64 ModifyIndex = 12;
}
// PeeringTrustBundle holds the trust information for validating requests from a peer.
@ -108,17 +114,21 @@ message PeeringTrustBundle {
// PeerName associates the trust bundle with a peer.
string PeerName = 2;
// Partition isolates the bundle from other trust bundles in separate partitions.
// Partition isolates the bundle from other trust bundles in separate local partitions.
string Partition = 3;
// RootPEMs holds ASN.1 DER encoded X.509 certificate data for the trust bundle.
repeated string RootPEMs = 4;
// ExportedPartition references the remote partition of the peer
// which sent this trust bundle. Used for generating SpiffeIDs.
string ExportedPartition = 5;
// CreateIndex is the Raft index at which the trust domain was created.
uint64 CreateIndex = 5;
uint64 CreateIndex = 6;
// ModifyIndex is the latest Raft index at which the trust bundle was modified.
uint64 ModifyIndex = 6;
uint64 ModifyIndex = 7;
}
// @consul-rpc-glue: Datacenter,ReadTODO
@ -273,10 +283,10 @@ message GenerateTokenResponse {
//
// mog annotation:
//
// target=github.com/hashicorp/consul/api.PeeringInitiateRequest
// target=github.com/hashicorp/consul/api.PeeringEstablishRequest
// output=peering.gen.go
// name=API
message InitiateRequest {
message EstablishRequest {
// Name of the remote peer.
string PeerName = 1;
@ -298,10 +308,10 @@ message InitiateRequest {
//
// mog annotation:
//
// target=github.com/hashicorp/consul/api.PeeringInitiateResponse
// target=github.com/hashicorp/consul/api.PeeringEstablishResponse
// output=peering.gen.go
// name=API
message InitiateResponse {}
message EstablishResponse {}
message ReplicationMessage {
oneof Payload {

View file

@ -311,7 +311,7 @@ func (msg *PeeringTrustBundleDeleteRequest) RequestDatacenter() string {
}
// RequestDatacenter implements structs.RPCInfo
func (msg *InitiateRequest) RequestDatacenter() string {
func (msg *EstablishRequest) RequestDatacenter() string {
if msg == nil {
return ""
}

View file

@ -23,7 +23,7 @@ const _ = grpc.SupportPackageIsVersion7
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
type PeeringServiceClient interface {
GenerateToken(ctx context.Context, in *GenerateTokenRequest, opts ...grpc.CallOption) (*GenerateTokenResponse, error)
Initiate(ctx context.Context, in *InitiateRequest, opts ...grpc.CallOption) (*InitiateResponse, error)
Establish(ctx context.Context, in *EstablishRequest, opts ...grpc.CallOption) (*EstablishResponse, error)
PeeringRead(ctx context.Context, in *PeeringReadRequest, opts ...grpc.CallOption) (*PeeringReadResponse, error)
PeeringList(ctx context.Context, in *PeeringListRequest, opts ...grpc.CallOption) (*PeeringListResponse, error)
PeeringDelete(ctx context.Context, in *PeeringDeleteRequest, opts ...grpc.CallOption) (*PeeringDeleteResponse, error)
@ -58,9 +58,9 @@ func (c *peeringServiceClient) GenerateToken(ctx context.Context, in *GenerateTo
return out, nil
}
func (c *peeringServiceClient) Initiate(ctx context.Context, in *InitiateRequest, opts ...grpc.CallOption) (*InitiateResponse, error) {
out := new(InitiateResponse)
err := c.cc.Invoke(ctx, "/peering.PeeringService/Initiate", in, out, opts...)
func (c *peeringServiceClient) Establish(ctx context.Context, in *EstablishRequest, opts ...grpc.CallOption) (*EstablishResponse, error) {
out := new(EstablishResponse)
err := c.cc.Invoke(ctx, "/peering.PeeringService/Establish", in, out, opts...)
if err != nil {
return nil, err
}
@ -157,7 +157,7 @@ func (x *peeringServiceStreamResourcesClient) Recv() (*ReplicationMessage, error
// for forward compatibility
type PeeringServiceServer interface {
GenerateToken(context.Context, *GenerateTokenRequest) (*GenerateTokenResponse, error)
Initiate(context.Context, *InitiateRequest) (*InitiateResponse, error)
Establish(context.Context, *EstablishRequest) (*EstablishResponse, error)
PeeringRead(context.Context, *PeeringReadRequest) (*PeeringReadResponse, error)
PeeringList(context.Context, *PeeringListRequest) (*PeeringListResponse, error)
PeeringDelete(context.Context, *PeeringDeleteRequest) (*PeeringDeleteResponse, error)
@ -182,8 +182,8 @@ type UnimplementedPeeringServiceServer struct {
func (UnimplementedPeeringServiceServer) GenerateToken(context.Context, *GenerateTokenRequest) (*GenerateTokenResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method GenerateToken not implemented")
}
func (UnimplementedPeeringServiceServer) Initiate(context.Context, *InitiateRequest) (*InitiateResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Initiate not implemented")
func (UnimplementedPeeringServiceServer) Establish(context.Context, *EstablishRequest) (*EstablishResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Establish not implemented")
}
func (UnimplementedPeeringServiceServer) PeeringRead(context.Context, *PeeringReadRequest) (*PeeringReadResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method PeeringRead not implemented")
@ -236,20 +236,20 @@ func _PeeringService_GenerateToken_Handler(srv interface{}, ctx context.Context,
return interceptor(ctx, in, info, handler)
}
func _PeeringService_Initiate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(InitiateRequest)
func _PeeringService_Establish_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(EstablishRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(PeeringServiceServer).Initiate(ctx, in)
return srv.(PeeringServiceServer).Establish(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/peering.PeeringService/Initiate",
FullMethod: "/peering.PeeringService/Establish",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(PeeringServiceServer).Initiate(ctx, req.(*InitiateRequest))
return srv.(PeeringServiceServer).Establish(ctx, req.(*EstablishRequest))
}
return interceptor(ctx, in, info, handler)
}
@ -400,8 +400,8 @@ var PeeringService_ServiceDesc = grpc.ServiceDesc{
Handler: _PeeringService_GenerateToken_Handler,
},
{
MethodName: "Initiate",
Handler: _PeeringService_Initiate_Handler,
MethodName: "Establish",
Handler: _PeeringService_Establish_Handler,
},
{
MethodName: "PeeringRead",

View file

@ -1,6 +1,8 @@
package pbservice
import (
"fmt"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/proto/pbcommon"
"github.com/hashicorp/consul/types"
@ -42,6 +44,23 @@ func NewMapHeadersFromStructs(t map[string][]string) map[string]*HeaderValue {
return s
}
// CheckServiceNodesToStruct converts the contained CheckServiceNodes to their structs equivalent.
func (s *IndexedCheckServiceNodes) CheckServiceNodesToStruct() ([]structs.CheckServiceNode, error) {
if s == nil {
return nil, nil
}
resp := make([]structs.CheckServiceNode, 0, len(s.Nodes))
for _, pb := range s.Nodes {
instance, err := CheckServiceNodeToStructs(pb)
if err != nil {
return resp, fmt.Errorf("failed to convert instance: %w", err)
}
resp = append(resp, *instance)
}
return resp, nil
}
// TODO: use mog once it supports pointers and slices
func CheckServiceNodeToStructs(s *CheckServiceNode) (*structs.CheckServiceNode, error) {
if s == nil {

View file

@ -1,4 +1,4 @@
FROM alpine:latest
FROM alpine:3.12
RUN apk add --no-cache tcpdump
VOLUME [ "/data" ]

View file

@ -0,0 +1,2 @@
primary_datacenter = "alpha"
log_level = "trace"

View file

@ -0,0 +1,26 @@
config_entries {
bootstrap = [
{
kind = "proxy-defaults"
name = "global"
config {
protocol = "tcp"
}
},
{
kind = "exported-services"
name = "default"
services = [
{
name = "s2"
consumers = [
{
peer_name = "alpha-to-primary"
}
]
}
]
}
]
}

View file

@ -0,0 +1,5 @@
services {
name = "mesh-gateway"
kind = "mesh-gateway"
port = 4432
}

View file

@ -0,0 +1 @@
# We don't want an s1 service in this peer

View file

@ -0,0 +1,7 @@
services {
name = "s2"
port = 8181
connect {
sidecar_service {}
}
}

View file

@ -0,0 +1,11 @@
#!/bin/bash
set -euo pipefail
register_services alpha
gen_envoy_bootstrap s2 19002 alpha
gen_envoy_bootstrap mesh-gateway 19003 alpha true
wait_for_config_entry proxy-defaults global alpha
wait_for_config_entry exported-services default alpha

View file

@ -0,0 +1,31 @@
#!/usr/bin/env bats
load helpers
@test "s2 proxy is running correct version" {
assert_envoy_version 19002
}
@test "s2 proxy admin is up on :19002" {
retry_default curl -f -s localhost:19002/stats -o /dev/null
}
@test "gateway-alpha proxy admin is up on :19003" {
retry_default curl -f -s localhost:19003/stats -o /dev/null
}
@test "s2 proxy listener should be up and have right cert" {
assert_proxy_presents_cert_uri localhost:21000 s2 alpha
}
@test "s2 proxy should be healthy" {
assert_service_has_healthy_instances s2 1 alpha
}
@test "gateway-alpha should be up and listening" {
retry_long nc -z consul-alpha:4432
}
@test "s2 proxies should be healthy" {
assert_service_has_healthy_instances s2 1 alpha
}

View file

@ -0,0 +1,2 @@
bind_addr = "0.0.0.0"
advertise_addr = "{{ GetInterfaceIP \"eth0\" }}"

View file

@ -0,0 +1,6 @@
#!/bin/bash
snapshot_envoy_admin localhost:19000 s1 primary || true
snapshot_envoy_admin localhost:19001 mesh-gateway primary || true
snapshot_envoy_admin localhost:19002 s2 alpha || true
snapshot_envoy_admin localhost:19003 mesh-gateway alpha || true

View file

@ -0,0 +1,12 @@
config_entries {
bootstrap = [
{
kind = "proxy-defaults"
name = "global"
config {
protocol = "tcp"
}
}
]
}

View file

@ -0,0 +1,5 @@
services {
name = "mesh-gateway"
kind = "mesh-gateway"
port = 4431
}

View file

@ -0,0 +1,17 @@
services {
name = "s1"
port = 8080
connect {
sidecar_service {
proxy {
upstreams = [
{
destination_name = "s2"
destination_peer = "primary-to-alpha"
local_bind_port = 5000
}
]
}
}
}
}

View file

@ -0,0 +1 @@
# We don't want an s2 service in the primary dc

View file

@ -0,0 +1,10 @@
#!/bin/bash
set -euo pipefail
register_services primary
gen_envoy_bootstrap s1 19000 primary
gen_envoy_bootstrap mesh-gateway 19001 primary true
wait_for_config_entry proxy-defaults global

View file

@ -0,0 +1,57 @@
#!/usr/bin/env bats
load helpers
@test "s1 proxy is running correct version" {
assert_envoy_version 19000
}
@test "s1 proxy admin is up on :19000" {
retry_default curl -f -s localhost:19000/stats -o /dev/null
}
@test "gateway-primary proxy admin is up on :19001" {
retry_default curl -f -s localhost:19001/stats -o /dev/null
}
@test "s1 proxy listener should be up and have right cert" {
assert_proxy_presents_cert_uri localhost:21000 s1
}
@test "s2 proxies should be healthy in alpha" {
assert_service_has_healthy_instances s2 1 alpha
}
@test "gateway-primary should be up and listening" {
retry_long nc -z consul-primary:4431
}
@test "gateway-alpha should be up and listening" {
retry_long nc -z consul-alpha:4432
}
@test "peer the two clusters together" {
create_peering primary alpha
}
@test "s2 alpha proxies should be healthy in primary" {
assert_service_has_healthy_instances s2 1 primary "" "" primary-to-alpha
}
@test "gateway-alpha should have healthy endpoints for s2" {
assert_upstream_has_endpoints_in_status consul-alpha:19003 s2.default.alpha HEALTHY 1
}
@test "s1 upstream should have healthy endpoints for s2" {
assert_upstream_has_endpoints_in_status 127.0.0.1:19000 s2.default.default.alpha-to-primary.external HEALTHY 1
}
@test "s1 upstream should be able to connect to s2" {
run retry_default curl -s -f -d hello localhost:5000
[ "$status" -eq 0 ]
[ "$output" = "hello" ]
}
@test "s1 upstream made 1 connection to s2" {
assert_envoy_metric_at_least 127.0.0.1:19000 "cluster.s2.default.default.alpha-to-primary.external.*cx_total" 1
}

View file

@ -0,0 +1,4 @@
#!/bin/bash
export REQUIRED_SERVICES="s1 s1-sidecar-proxy gateway-primary s2-alpha s2-sidecar-proxy-alpha gateway-alpha tcpdump-primary tcpdump-alpha"
export REQUIRE_PEERS=1

View file

@ -3,4 +3,5 @@
export DEFAULT_REQUIRED_SERVICES="s1 s1-sidecar-proxy s2 s2-sidecar-proxy"
export REQUIRED_SERVICES="${DEFAULT_REQUIRED_SERVICES}"
export REQUIRE_SECONDARY=0
export REQUIRE_PARTITIONS=0
export REQUIRE_PARTITIONS=0
export REQUIRE_PEERS=0

View file

@ -357,7 +357,6 @@ function get_upstream_endpoint_in_status_count {
local HEALTH_STATUS=$3
run curl -s -f "http://${HOSTPORT}/clusters?format=json"
[ "$status" -eq 0 ]
# echo "$output" >&3
echo "$output" | jq --raw-output "
.cluster_statuses[]
| select(.name|startswith(\"${CLUSTER_NAME}\"))
@ -477,8 +476,11 @@ function get_healthy_service_count {
local SERVICE_NAME=$1
local DC=$2
local NS=$3
local AP=$4
local PEER_NAME=$5
run curl -s -f ${HEADERS} "consul-${DC}:8500/v1/health/connect/${SERVICE_NAME}?passing&ns=${NS}&partition=${AP}&peer=${PEER_NAME}"
run curl -s -f ${HEADERS} "127.0.0.1:8500/v1/health/connect/${SERVICE_NAME}?dc=${DC}&passing&ns=${NS}"
[ "$status" -eq 0 ]
echo "$output" | jq --raw-output '. | length'
}
@ -508,9 +510,11 @@ function assert_service_has_healthy_instances_once {
local SERVICE_NAME=$1
local EXPECT_COUNT=$2
local DC=${3:-primary}
local NS=$4
local NS=${4:-}
local AP=${5:-}
local PEER_NAME=${6:-}
GOT_COUNT=$(get_healthy_service_count "$SERVICE_NAME" "$DC" "$NS")
GOT_COUNT=$(get_healthy_service_count "$SERVICE_NAME" "$DC" "$NS" "$AP" "$PEER_NAME")
[ "$GOT_COUNT" -eq $EXPECT_COUNT ]
}
@ -519,9 +523,11 @@ function assert_service_has_healthy_instances {
local SERVICE_NAME=$1
local EXPECT_COUNT=$2
local DC=${3:-primary}
local NS=$4
local NS=${4:-}
local AP=${5:-}
local PEER_NAME=${6:-}
run retry_long assert_service_has_healthy_instances_once "$SERVICE_NAME" "$EXPECT_COUNT" "$DC" "$NS"
run retry_long assert_service_has_healthy_instances_once "$SERVICE_NAME" "$EXPECT_COUNT" "$DC" "$NS" "$AP" "$PEER_NAME"
[ "$status" -eq 0 ]
}
@ -941,3 +947,20 @@ function assert_expected_fortio_host_header {
return 1
fi
}
function create_peering {
local GENERATE_PEER=$1
local ESTABLISH_PEER=$2
run curl -sL -XPOST "http://consul-${GENERATE_PEER}:8500/v1/peering/token" -d"{ \"PeerName\" : \"${GENERATE_PEER}-to-${ESTABLISH_PEER}\" }"
# echo "$output" >&3
[ "$status" == 0 ]
local token
token="$(echo "$output" | jq -r .PeeringToken)"
[ -n "$token" ]
run curl -sLv -XPOST "http://consul-${ESTABLISH_PEER}:8500/v1/peering/establish" -d"{ \"PeerName\" : \"${ESTABLISH_PEER}-to-${GENERATE_PEER}\", \"PeeringToken\" : \"${token}\" }"
# echo "$output" >&3
[ "$status" == 0 ]
}

View file

@ -10,7 +10,7 @@ readonly HASHICORP_DOCKER_PROXY="docker.mirror.hashicorp.services"
DEBUG=${DEBUG:-}
# ENVOY_VERSION to run each test against
ENVOY_VERSION=${ENVOY_VERSION:-"1.22.0"}
ENVOY_VERSION=${ENVOY_VERSION:-"1.22.2"}
export ENVOY_VERSION
export DOCKER_BUILDKIT=1
@ -127,13 +127,21 @@ function start_consul {
'-p=9411:9411'
'-p=16686:16686'
)
if [[ $DC == 'secondary' ]]; then
case "$DC" in
secondary)
ports=(
'-p=9500:8500'
'-p=9502:8502'
)
fi
;;
alpha)
ports=(
'-p=9510:8500'
'-p=9512:8502'
)
;;
esac
license="${CONSUL_LICENSE:-}"
# load the consul license so we can pass it into the consul
# containers as an env var in the case that this is a consul
@ -269,7 +277,10 @@ function capture_logs {
then
services="$services consul-ap1"
fi
if is_set $REQUIRE_PEERS
then
services="$services consul-alpha"
fi
if [ -f "${CASE_DIR}/capture.sh" ]
then
@ -289,7 +300,7 @@ function stop_services {
# Teardown
docker_kill_rm $REQUIRED_SERVICES
docker_kill_rm consul-primary consul-secondary consul-ap1
docker_kill_rm consul-primary consul-secondary consul-ap1 consul-alpha
}
function init_vars {
@ -332,6 +343,10 @@ function run_tests {
then
init_workdir ap1
fi
if is_set $REQUIRE_PEERS
then
init_workdir alpha
fi
global_setup
@ -357,6 +372,9 @@ function run_tests {
docker_consul "primary" consul partition create -name ap1 > /dev/null
start_partitioned_client ap1
fi
if is_set $REQUIRE_PEERS; then
start_consul alpha
fi
echo "Setting up the primary datacenter"
pre_service_setup primary
@ -369,6 +387,10 @@ function run_tests {
echo "Setting up the non-default partition"
pre_service_setup ap1
fi
if is_set $REQUIRE_PEERS; then
echo "Setting up the alpha peer"
pre_service_setup alpha
fi
echo "Starting services"
start_services
@ -381,6 +403,10 @@ function run_tests {
echo "Verifying the secondary datacenter"
verify secondary
fi
if is_set $REQUIRE_PEERS; then
echo "Verifying the alpha peer"
verify alpha
fi
}
function test_teardown {
@ -435,13 +461,13 @@ function suite_setup {
}
function suite_teardown {
docker_kill_rm verify-primary verify-secondary
docker_kill_rm verify-primary verify-secondary verify-alpha
# this is some hilarious magic
docker_kill_rm $(grep "^function run_container_" $self_name | \
sed 's/^function run_container_\(.*\) {/\1/g')
docker_kill_rm consul-primary consul-secondary consul-ap1
docker_kill_rm consul-primary consul-secondary consul-ap1 consul-alpha
if docker network inspect envoy-tests &>/dev/null ; then
echo -n "Deleting network 'envoy-tests'..."
@ -530,6 +556,14 @@ function run_container_s3-ap1 {
common_run_container_service s3 ap1 8580 8579
}
function run_container_s1-alpha {
common_run_container_service s1-alpha alpha 8080 8079
}
function run_container_s2-alpha {
common_run_container_service s2-alpha alpha 8181 8179
}
function common_run_container_sidecar_proxy {
local service="$1"
local CLUSTER="$2"
@ -544,7 +578,7 @@ function common_run_container_sidecar_proxy {
"${HASHICORP_DOCKER_PROXY}/envoyproxy/envoy:v${ENVOY_VERSION}" \
envoy \
-c /workdir/${CLUSTER}/envoy/${service}-bootstrap.json \
-l debug \
-l trace \
--disable-hot-restart \
--drain-time-s 1 >/dev/null
}
@ -564,7 +598,7 @@ function run_container_s1-sidecar-proxy-consul-exec {
consul connect envoy -sidecar-for s1 \
-envoy-version ${ENVOY_VERSION} \
-- \
-l debug >/dev/null
-l trace >/dev/null
}
function run_container_s2-sidecar-proxy {
@ -606,6 +640,13 @@ function run_container_s3-ap1-sidecar-proxy {
common_run_container_sidecar_proxy s3 ap1
}
function run_container_s1-sidecar-proxy-alpha {
common_run_container_sidecar_proxy s1 alpha
}
function run_container_s2-sidecar-proxy-alpha {
common_run_container_sidecar_proxy s2 alpha
}
function common_run_container_gateway {
local name="$1"
local DC="$2"
@ -620,7 +661,7 @@ function common_run_container_gateway {
"${HASHICORP_DOCKER_PROXY}/envoyproxy/envoy:v${ENVOY_VERSION}" \
envoy \
-c /workdir/${DC}/envoy/${name}-bootstrap.json \
-l debug \
-l trace \
--disable-hot-restart \
--drain-time-s 1 >/dev/null
}
@ -631,6 +672,9 @@ function run_container_gateway-primary {
function run_container_gateway-secondary {
common_run_container_gateway mesh-gateway secondary
}
function run_container_gateway-alpha {
common_run_container_gateway mesh-gateway alpha
}
function run_container_ingress-gateway-primary {
common_run_container_gateway ingress-gateway primary
@ -699,6 +743,10 @@ function run_container_tcpdump-secondary {
# To use add "tcpdump-secondary" to REQUIRED_SERVICES
common_run_container_tcpdump secondary
}
function run_container_tcpdump-alpha {
# To use add "tcpdump-alpha" to REQUIRED_SERVICES
common_run_container_tcpdump alpha
}
function common_run_container_tcpdump {
local DC="$1"

View file

@ -11,7 +11,7 @@
"scripts": {
"doc:toc": "doctoc README.md",
"compliance": "npm-run-all compliance:*",
"compliance:licenses": "license-checker --summary --onlyAllow 'Python-2.0;Apache*;Apache License, Version 2.0;Apache-2.0;Apache 2.0;Artistic-2.0;BSD;BSD-3-Clause;CC-BY-3.0;CC-BY-4.0;CC0-1.0;ISC;MIT;MPL-2.0;Public Domain;Unicode-TOU;Unlicense;WTFPL' --excludePackages 'consul-ui@2.2.0;consul-acls@0.1.0;consul-lock-sessions@0.1.0;consul-partitions@0.1.0;consul-nspaces@0.1.0'"
"compliance:licenses": "license-checker --summary --onlyAllow 'Python-2.0;Apache*;Apache License, Version 2.0;Apache-2.0;Apache 2.0;Artistic-2.0;BSD;BSD-3-Clause;CC-BY-3.0;CC-BY-4.0;CC0-1.0;ISC;MIT;MPL-2.0;Public Domain;Unicode-TOU;Unlicense;WTFPL' --excludePackages 'consul-ui@2.2.0;consul-acls@0.1.0;consul-lock-sessions@0.1.0;consul-partitions@0.1.0;consul-nspaces@0.1.0;consul-hcp@0.1.0'"
},
"devDependencies": {

View file

@ -0,0 +1,5 @@
{
"name": "consul-hcp",
"version": "0.1.0",
"private": true
}

View file

@ -0,0 +1,11 @@
(routes => routes({
dc: {
show: {
license: null,
},
},
}))(
(json, data = (typeof document !== 'undefined' ? document.currentScript.dataset : module.exports)) => {
data[`routes`] = JSON.stringify(json);
}
);

View file

@ -0,0 +1,7 @@
(services => services({
}))(
(json, data = (typeof document !== 'undefined' ? document.currentScript.dataset : module.exports)) => {
data[`services`] = JSON.stringify(json);
}
);

Some files were not shown because too many files have changed in this diff Show more